2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/net.h>
49 #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
50 / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
57 /* the reset value from the FM is supposed to be 0xffff, handle both */
58 #define OPA_LINK_WIDTH_RESET_OLD 0x0fff
59 #define OPA_LINK_WIDTH_RESET 0xffff
61 static int reply(struct ib_mad_hdr *smp)
64 * The verbs framework will handle the directed/LID route
67 smp->method = IB_MGMT_METHOD_GET_RESP;
68 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
69 smp->status |= IB_SMP_DIRECTION;
70 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
73 static inline void clear_opa_smp_data(struct opa_smp *smp)
75 void *data = opa_get_smp_data(smp);
76 size_t size = opa_get_smp_data_size(smp);
78 memset(data, 0, size);
81 void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
83 struct ib_event event;
85 event.event = IB_EVENT_PKEY_CHANGE;
86 event.device = &dd->verbs_dev.rdi.ibdev;
87 event.element.port_num = port;
88 ib_dispatch_event(&event);
91 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
93 struct ib_mad_send_buf *send_buf;
94 struct ib_mad_agent *agent;
98 unsigned long timeout;
100 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
102 agent = ibp->rvp.send_agent;
107 if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
111 if (ibp->rvp.trap_timeout && time_before(jiffies,
112 ibp->rvp.trap_timeout))
115 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
117 pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
118 __func__, hfi1_get_pkey(ibp, 1));
122 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
123 IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
124 GFP_ATOMIC, IB_MGMT_BASE_VERSION);
125 if (IS_ERR(send_buf))
129 smp->base_version = OPA_MGMT_BASE_VERSION;
130 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
131 smp->class_version = OPA_SMI_CLASS_VERSION;
132 smp->method = IB_MGMT_METHOD_TRAP;
134 smp->tid = cpu_to_be64(ibp->rvp.tid);
135 smp->attr_id = IB_SMP_ATTR_NOTICE;
136 /* o14-1: smp->mkey = 0; */
137 memcpy(smp->route.lid.data, data, len);
139 spin_lock_irqsave(&ibp->rvp.lock, flags);
140 if (!ibp->rvp.sm_ah) {
141 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
144 ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
149 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
156 send_buf->ah = &ibp->rvp.sm_ah->ibah;
159 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
162 ret = ib_post_send_mad(send_buf, NULL);
165 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
166 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
168 ib_free_send_mad(send_buf);
169 ibp->rvp.trap_timeout = 0;
174 * Send a bad [PQ]_Key trap (ch. 14.3.8).
176 void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
177 u32 qp1, u32 qp2, u16 lid1, u16 lid2)
179 struct opa_mad_notice_attr data;
180 u32 lid = ppd_from_ibp(ibp)->lid;
184 memset(&data, 0, sizeof(data));
186 if (trap_num == OPA_TRAP_BAD_P_KEY)
187 ibp->rvp.pkey_violations++;
189 ibp->rvp.qkey_violations++;
190 ibp->rvp.n_pkt_drops++;
192 /* Send violation trap */
193 data.generic_type = IB_NOTICE_TYPE_SECURITY;
194 data.prod_type_lsb = IB_NOTICE_PROD_CA;
195 data.trap_num = trap_num;
196 data.issuer_lid = cpu_to_be32(lid);
197 data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
198 data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
199 data.ntc_257_258.key = cpu_to_be32(key);
200 data.ntc_257_258.sl = sl << 3;
201 data.ntc_257_258.qp1 = cpu_to_be32(qp1);
202 data.ntc_257_258.qp2 = cpu_to_be32(qp2);
204 send_trap(ibp, &data, sizeof(data));
208 * Send a bad M_Key trap (ch. 14.3.9).
210 static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
211 __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
213 struct opa_mad_notice_attr data;
214 u32 lid = ppd_from_ibp(ibp)->lid;
216 memset(&data, 0, sizeof(data));
217 /* Send violation trap */
218 data.generic_type = IB_NOTICE_TYPE_SECURITY;
219 data.prod_type_lsb = IB_NOTICE_PROD_CA;
220 data.trap_num = OPA_TRAP_BAD_M_KEY;
221 data.issuer_lid = cpu_to_be32(lid);
222 data.ntc_256.lid = data.issuer_lid;
223 data.ntc_256.method = mad->method;
224 data.ntc_256.attr_id = mad->attr_id;
225 data.ntc_256.attr_mod = mad->attr_mod;
226 data.ntc_256.mkey = mkey;
227 if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
228 data.ntc_256.dr_slid = dr_slid;
229 data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
230 if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
231 data.ntc_256.dr_trunc_hop |=
232 IB_NOTICE_TRAP_DR_TRUNC;
233 hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
235 data.ntc_256.dr_trunc_hop |= hop_cnt;
236 memcpy(data.ntc_256.dr_rtn_path, return_path,
240 send_trap(ibp, &data, sizeof(data));
244 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
246 void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
248 struct opa_mad_notice_attr data;
249 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
250 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
251 struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
252 u32 lid = ppd_from_ibp(ibp)->lid;
254 memset(&data, 0, sizeof(data));
256 data.generic_type = IB_NOTICE_TYPE_INFO;
257 data.prod_type_lsb = IB_NOTICE_PROD_CA;
258 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
259 data.issuer_lid = cpu_to_be32(lid);
260 data.ntc_144.lid = data.issuer_lid;
261 data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
263 send_trap(ibp, &data, sizeof(data));
267 * Send a System Image GUID Changed trap (ch. 14.3.12).
269 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
271 struct opa_mad_notice_attr data;
272 u32 lid = ppd_from_ibp(ibp)->lid;
274 memset(&data, 0, sizeof(data));
276 data.generic_type = IB_NOTICE_TYPE_INFO;
277 data.prod_type_lsb = IB_NOTICE_PROD_CA;
278 data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
279 data.issuer_lid = cpu_to_be32(lid);
280 data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
281 data.ntc_145.lid = data.issuer_lid;
283 send_trap(ibp, &data, sizeof(data));
287 * Send a Node Description Changed trap (ch. 14.3.13).
289 void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
291 struct opa_mad_notice_attr data;
292 u32 lid = ppd_from_ibp(ibp)->lid;
294 memset(&data, 0, sizeof(data));
296 data.generic_type = IB_NOTICE_TYPE_INFO;
297 data.prod_type_lsb = IB_NOTICE_PROD_CA;
298 data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
299 data.issuer_lid = cpu_to_be32(lid);
300 data.ntc_144.lid = data.issuer_lid;
301 data.ntc_144.change_flags =
302 cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
304 send_trap(ibp, &data, sizeof(data));
307 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
308 u8 *data, struct ib_device *ibdev,
309 u8 port, u32 *resp_len)
311 struct opa_node_description *nd;
314 smp->status |= IB_SMP_INVALID_FIELD;
315 return reply((struct ib_mad_hdr *)smp);
318 nd = (struct opa_node_description *)data;
320 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
323 *resp_len += sizeof(*nd);
325 return reply((struct ib_mad_hdr *)smp);
328 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
329 struct ib_device *ibdev, u8 port,
332 struct opa_node_info *ni;
333 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
334 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
336 ni = (struct opa_node_info *)data;
338 /* GUID 0 is illegal */
339 if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
340 smp->status |= IB_SMP_INVALID_FIELD;
341 return reply((struct ib_mad_hdr *)smp);
344 ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
345 ni->base_version = OPA_MGMT_BASE_VERSION;
346 ni->class_version = OPA_SMI_CLASS_VERSION;
347 ni->node_type = 1; /* channel adapter */
348 ni->num_ports = ibdev->phys_port_cnt;
349 /* This is already in network order */
350 ni->system_image_guid = ib_hfi1_sys_image_guid;
351 /* Use first-port GUID as node */
352 ni->node_guid = cpu_to_be64(dd->pport->guid);
353 ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
354 ni->device_id = cpu_to_be16(dd->pcidev->device);
355 ni->revision = cpu_to_be32(dd->minrev);
356 ni->local_port_num = port;
357 ni->vendor_id[0] = dd->oui1;
358 ni->vendor_id[1] = dd->oui2;
359 ni->vendor_id[2] = dd->oui3;
362 *resp_len += sizeof(*ni);
364 return reply((struct ib_mad_hdr *)smp);
367 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
370 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
371 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
372 unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
374 /* GUID 0 is illegal */
375 if (smp->attr_mod || pidx >= dd->num_pports ||
376 dd->pport[pidx].guid == 0)
377 smp->status |= IB_SMP_INVALID_FIELD;
379 nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
381 nip->base_version = OPA_MGMT_BASE_VERSION;
382 nip->class_version = OPA_SMI_CLASS_VERSION;
383 nip->node_type = 1; /* channel adapter */
384 nip->num_ports = ibdev->phys_port_cnt;
385 /* This is already in network order */
386 nip->sys_guid = ib_hfi1_sys_image_guid;
387 /* Use first-port GUID as node */
388 nip->node_guid = cpu_to_be64(dd->pport->guid);
389 nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
390 nip->device_id = cpu_to_be16(dd->pcidev->device);
391 nip->revision = cpu_to_be32(dd->minrev);
392 nip->local_port_num = port;
393 nip->vendor_id[0] = dd->oui1;
394 nip->vendor_id[1] = dd->oui2;
395 nip->vendor_id[2] = dd->oui3;
397 return reply((struct ib_mad_hdr *)smp);
400 static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
402 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
405 static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
407 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
410 static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
412 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
415 static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
416 int mad_flags, __be64 mkey, __be32 dr_slid,
417 u8 return_path[], u8 hop_cnt)
422 /* Is the mkey in the process of expiring? */
423 if (ibp->rvp.mkey_lease_timeout &&
424 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
425 /* Clear timeout and mkey protection field. */
426 ibp->rvp.mkey_lease_timeout = 0;
427 ibp->rvp.mkeyprot = 0;
430 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
431 ibp->rvp.mkey == mkey)
434 /* Unset lease timeout on any valid Get/Set/TrapRepress */
435 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
436 (mad->method == IB_MGMT_METHOD_GET ||
437 mad->method == IB_MGMT_METHOD_SET ||
438 mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
439 ibp->rvp.mkey_lease_timeout = 0;
442 switch (mad->method) {
443 case IB_MGMT_METHOD_GET:
444 /* Bad mkey not a violation below level 2 */
445 if (ibp->rvp.mkeyprot < 2)
447 case IB_MGMT_METHOD_SET:
448 case IB_MGMT_METHOD_TRAP_REPRESS:
449 if (ibp->rvp.mkey_violations != 0xFFFF)
450 ++ibp->rvp.mkey_violations;
451 if (!ibp->rvp.mkey_lease_timeout &&
452 ibp->rvp.mkey_lease_period)
453 ibp->rvp.mkey_lease_timeout = jiffies +
454 ibp->rvp.mkey_lease_period * HZ;
455 /* Generate a trap notice. */
456 bad_mkey(ibp, mad, mkey, dr_slid, return_path,
466 * The SMA caches reads from LCB registers in case the LCB is unavailable.
467 * (The LCB is unavailable in certain link states, for example.)
474 static struct lcb_datum lcb_cache[] = {
475 { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
478 static int write_lcb_cache(u32 off, u64 val)
482 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
483 if (lcb_cache[i].off == off) {
484 lcb_cache[i].val = val;
489 pr_warn("%s bad offset 0x%x\n", __func__, off);
493 static int read_lcb_cache(u32 off, u64 *val)
497 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
498 if (lcb_cache[i].off == off) {
499 *val = lcb_cache[i].val;
504 pr_warn("%s bad offset 0x%x\n", __func__, off);
508 void read_ltp_rtt(struct hfi1_devdata *dd)
512 if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, ®))
513 dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
515 write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
518 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
519 struct ib_device *ibdev, u8 port,
523 struct hfi1_devdata *dd;
524 struct hfi1_pportdata *ppd;
525 struct hfi1_ibport *ibp;
526 struct opa_port_info *pi = (struct opa_port_info *)data;
529 u8 is_beaconing_active;
531 u32 num_ports = OPA_AM_NPORT(am);
532 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
536 if (num_ports != 1) {
537 smp->status |= IB_SMP_INVALID_FIELD;
538 return reply((struct ib_mad_hdr *)smp);
541 dd = dd_from_ibdev(ibdev);
542 /* IB numbers ports from 1, hw from 0 */
543 ppd = dd->pport + (port - 1);
544 ibp = &ppd->ibport_data;
546 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
547 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
548 smp->status |= IB_SMP_INVALID_FIELD;
549 return reply((struct ib_mad_hdr *)smp);
552 pi->lid = cpu_to_be32(ppd->lid);
554 /* Only return the mkey if the protection field allows it. */
555 if (!(smp->method == IB_MGMT_METHOD_GET &&
556 ibp->rvp.mkey != smp->mkey &&
557 ibp->rvp.mkeyprot == 1))
558 pi->mkey = ibp->rvp.mkey;
560 pi->subnet_prefix = ibp->rvp.gid_prefix;
561 pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
562 pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
563 pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
564 pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
565 pi->sa_qp = cpu_to_be32(ppd->sa_qp);
567 pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
568 pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
569 pi->link_width.active = cpu_to_be16(ppd->link_width_active);
571 pi->link_width_downgrade.supported =
572 cpu_to_be16(ppd->link_width_downgrade_supported);
573 pi->link_width_downgrade.enabled =
574 cpu_to_be16(ppd->link_width_downgrade_enabled);
575 pi->link_width_downgrade.tx_active =
576 cpu_to_be16(ppd->link_width_downgrade_tx_active);
577 pi->link_width_downgrade.rx_active =
578 cpu_to_be16(ppd->link_width_downgrade_rx_active);
580 pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
581 pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
582 pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
584 state = driver_lstate(ppd);
586 if (start_of_sm_config && (state == IB_PORT_INIT))
587 ppd->is_sm_config_started = 1;
589 pi->port_phys_conf = (ppd->port_type & 0xf);
591 pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
592 pi->port_states.ledenable_offlinereason |=
593 ppd->is_sm_config_started << 5;
595 * This pairs with the memory barrier in hfi1_start_led_override to
596 * ensure that we read the correct state of LED beaconing represented
597 * by led_override_timer_active
600 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
601 pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
602 pi->port_states.ledenable_offlinereason |=
603 ppd->offline_disabled_reason;
605 pi->port_states.portphysstate_portstate =
606 (hfi1_ibphys_portstate(ppd) << 4) | state;
608 pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
610 memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
611 for (i = 0; i < ppd->vls_supported; i++) {
612 mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
614 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
616 pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
618 /* don't forget VL 15 */
619 mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
620 pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
621 pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
622 pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
623 pi->partenforce_filterraw |=
624 (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
625 if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
626 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
627 if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
628 pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
629 pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
630 /* P_KeyViolations are counted by hardware. */
631 pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
632 pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
634 pi->vl.cap = ppd->vls_supported;
635 pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
636 pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
637 pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
639 pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
641 pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
642 OPA_PORT_LINK_MODE_OPA << 5 |
643 OPA_PORT_LINK_MODE_OPA);
645 pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
647 pi->port_mode = cpu_to_be16(
648 ppd->is_active_optimize_enabled ?
649 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
651 pi->port_packet_format.supported =
652 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
653 pi->port_packet_format.enabled =
654 cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
656 /* flit_control.interleave is (OPA V1, version .76):
660 * 2 DistanceSupported
662 * 5 MaxNextLevelTxEnabled
663 * 5 MaxNestLevelRxSupported
665 * HFI supports only "distance mode 1" (see OPA V1, version .76,
666 * section 9.6.2), so set DistanceSupported, DistanceEnabled
669 pi->flit_control.interleave = cpu_to_be16(0x1400);
671 pi->link_down_reason = ppd->local_link_down_reason.sma;
672 pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
673 pi->port_error_action = cpu_to_be32(ppd->port_error_action);
674 pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
676 /* 32.768 usec. response time (guessing) */
677 pi->resptimevalue = 3;
679 pi->local_port_num = port;
681 /* buffer info for FM */
682 pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
684 pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
685 pi->neigh_port_num = ppd->neighbor_port_number;
686 pi->port_neigh_mode =
687 (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
688 (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
689 (ppd->neighbor_fm_security ?
690 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
692 /* HFIs shall always return VL15 credits to their
693 * neighbor in a timely manner, without any credit return pacing.
696 buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
697 buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
698 buffer_units |= (credit_rate << 6) &
699 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
700 buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
701 pi->buffer_units = cpu_to_be32(buffer_units);
703 pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
705 /* HFI supports a replay buffer 128 LTPs in size */
706 pi->replay_depth.buffer = 0x80;
707 /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
708 read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
711 * this counter is 16 bits wide, but the replay_depth.wire
712 * variable is only 8 bits
716 pi->replay_depth.wire = tmp;
719 *resp_len += sizeof(struct opa_port_info);
721 return reply((struct ib_mad_hdr *)smp);
725 * get_pkeys - return the PKEY table
726 * @dd: the hfi1_ib device
727 * @port: the IB port number
728 * @pkeys: the pkey table is placed here
730 static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
732 struct hfi1_pportdata *ppd = dd->pport + port - 1;
734 memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
739 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
740 struct ib_device *ibdev, u8 port,
743 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
744 u32 n_blocks_req = OPA_AM_NBLK(am);
745 u32 start_block = am & 0x7ff;
750 unsigned npkeys = hfi1_get_npkeys(dd);
753 if (n_blocks_req == 0) {
754 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
755 port, start_block, n_blocks_req);
756 smp->status |= IB_SMP_INVALID_FIELD;
757 return reply((struct ib_mad_hdr *)smp);
760 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
762 size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
764 if (start_block + n_blocks_req > n_blocks_avail ||
765 n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
766 pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
767 "avail 0x%x; blk/smp 0x%lx\n",
768 start_block, n_blocks_req, n_blocks_avail,
769 OPA_NUM_PKEY_BLOCKS_PER_SMP);
770 smp->status |= IB_SMP_INVALID_FIELD;
771 return reply((struct ib_mad_hdr *)smp);
776 /* get the real pkeys if we are requesting the first block */
777 if (start_block == 0) {
778 get_pkeys(dd, port, q);
779 for (i = 0; i < npkeys; i++)
780 p[i] = cpu_to_be16(q[i]);
784 smp->status |= IB_SMP_INVALID_FIELD;
786 return reply((struct ib_mad_hdr *)smp);
790 HFI_TRANSITION_DISALLOWED,
791 HFI_TRANSITION_IGNORED,
792 HFI_TRANSITION_ALLOWED,
793 HFI_TRANSITION_UNDEFINED,
797 * Use shortened names to improve readability of
798 * {logical,physical}_state_transitions
801 __D = HFI_TRANSITION_DISALLOWED,
802 __I = HFI_TRANSITION_IGNORED,
803 __A = HFI_TRANSITION_ALLOWED,
804 __U = HFI_TRANSITION_UNDEFINED,
808 * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
809 * represented in physical_state_transitions.
811 #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
814 * Within physical_state_transitions, rows represent "old" states,
815 * columns "new" states, and physical_state_transitions.allowed[old][new]
816 * indicates if the transition from old state to new state is legal (see
817 * OPAg1v1, Table 6-4).
819 static const struct {
820 u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
821 } physical_state_transitions = {
823 /* 2 3 4 5 6 7 8 9 10 11 */
824 /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
825 /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
826 /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
827 /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
828 /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
829 /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
830 /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
831 /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
832 /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
833 /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
838 * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
839 * logical_state_transitions
842 #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
845 * Within logical_state_transitions rows represent "old" states,
846 * columns "new" states, and logical_state_transitions.allowed[old][new]
847 * indicates if the transition from old state to new state is legal (see
848 * OPAg1v1, Table 9-12).
850 static const struct {
851 u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
852 } logical_state_transitions = {
855 /* 1 */ { __I, __D, __D, __D, __U},
856 /* 2 */ { __D, __I, __A, __D, __U},
857 /* 3 */ { __D, __D, __I, __A, __U},
858 /* 4 */ { __D, __D, __I, __I, __U},
859 /* 5 */ { __U, __U, __U, __U, __U},
863 static int logical_transition_allowed(int old, int new)
865 if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
866 new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
867 pr_warn("invalid logical state(s) (old %d new %d)\n",
869 return HFI_TRANSITION_UNDEFINED;
872 if (new == IB_PORT_NOP)
873 return HFI_TRANSITION_ALLOWED; /* always allowed */
875 /* adjust states for indexing into logical_state_transitions */
879 if (old < 0 || new < 0)
880 return HFI_TRANSITION_UNDEFINED;
881 return logical_state_transitions.allowed[old][new];
884 static int physical_transition_allowed(int old, int new)
886 if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
887 new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
888 pr_warn("invalid physical state(s) (old %d new %d)\n",
890 return HFI_TRANSITION_UNDEFINED;
893 if (new == IB_PORTPHYSSTATE_NOP)
894 return HFI_TRANSITION_ALLOWED; /* always allowed */
896 /* adjust states for indexing into physical_state_transitions */
897 old -= IB_PORTPHYSSTATE_POLLING;
898 new -= IB_PORTPHYSSTATE_POLLING;
900 if (old < 0 || new < 0)
901 return HFI_TRANSITION_UNDEFINED;
902 return physical_state_transitions.allowed[old][new];
905 static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
906 u32 logical_new, u32 physical_new)
908 u32 physical_old = driver_physical_state(ppd);
909 u32 logical_old = driver_logical_state(ppd);
910 int ret, logical_allowed, physical_allowed;
912 ret = logical_transition_allowed(logical_old, logical_new);
913 logical_allowed = ret;
915 if (ret == HFI_TRANSITION_DISALLOWED ||
916 ret == HFI_TRANSITION_UNDEFINED) {
917 pr_warn("invalid logical state transition %s -> %s\n",
918 opa_lstate_name(logical_old),
919 opa_lstate_name(logical_new));
923 ret = physical_transition_allowed(physical_old, physical_new);
924 physical_allowed = ret;
926 if (ret == HFI_TRANSITION_DISALLOWED ||
927 ret == HFI_TRANSITION_UNDEFINED) {
928 pr_warn("invalid physical state transition %s -> %s\n",
929 opa_pstate_name(physical_old),
930 opa_pstate_name(physical_new));
934 if (logical_allowed == HFI_TRANSITION_IGNORED &&
935 physical_allowed == HFI_TRANSITION_IGNORED)
936 return HFI_TRANSITION_IGNORED;
939 * A change request of Physical Port State from
940 * 'Offline' to 'Polling' should be ignored.
942 if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
943 (physical_new == IB_PORTPHYSSTATE_POLLING))
944 return HFI_TRANSITION_IGNORED;
947 * Either physical_allowed or logical_allowed is
948 * HFI_TRANSITION_ALLOWED.
950 return HFI_TRANSITION_ALLOWED;
953 static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
954 u32 logical_state, u32 phys_state,
955 int suppress_idle_sma)
957 struct hfi1_devdata *dd = ppd->dd;
961 ret = port_states_transition_allowed(ppd, logical_state, phys_state);
962 if (ret == HFI_TRANSITION_DISALLOWED ||
963 ret == HFI_TRANSITION_UNDEFINED) {
964 /* error message emitted above */
965 smp->status |= IB_SMP_INVALID_FIELD;
969 if (ret == HFI_TRANSITION_IGNORED)
972 if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
973 !(logical_state == IB_PORT_DOWN ||
974 logical_state == IB_PORT_NOP)){
975 pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
976 logical_state, phys_state);
977 smp->status |= IB_SMP_INVALID_FIELD;
981 * Logical state changes are summarized in OPAv1g1 spec.,
982 * Table 9-12; physical state changes are summarized in
983 * OPAv1g1 spec., Table 6.4.
985 switch (logical_state) {
987 if (phys_state == IB_PORTPHYSSTATE_NOP)
991 if (phys_state == IB_PORTPHYSSTATE_NOP) {
992 link_state = HLS_DN_DOWNDEF;
993 } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
994 link_state = HLS_DN_POLL;
995 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
996 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
997 } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
998 link_state = HLS_DN_DISABLE;
1000 pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
1002 smp->status |= IB_SMP_INVALID_FIELD;
1006 if ((link_state == HLS_DN_POLL ||
1007 link_state == HLS_DN_DOWNDEF)) {
1009 * Going to poll. No matter what the current state,
1010 * always move offline first, then tune and start the
1011 * link. This correctly handles a FM link bounce and
1012 * a link enable. Going offline is a no-op if already
1015 set_link_state(ppd, HLS_DN_OFFLINE);
1018 set_link_state(ppd, link_state);
1020 if (link_state == HLS_DN_DISABLE &&
1021 (ppd->offline_disabled_reason >
1022 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
1023 ppd->offline_disabled_reason ==
1024 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
1025 ppd->offline_disabled_reason =
1026 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
1028 * Don't send a reply if the response would be sent
1029 * through the disabled port.
1031 if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
1032 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1035 ret = set_link_state(ppd, HLS_UP_ARMED);
1036 if ((ret == 0) && (suppress_idle_sma == 0))
1037 send_idle_sma(dd, SMA_IDLE_ARM);
1039 case IB_PORT_ACTIVE:
1040 if (ppd->neighbor_normal) {
1041 ret = set_link_state(ppd, HLS_UP_ACTIVE);
1043 send_idle_sma(dd, SMA_IDLE_ACTIVE);
1045 pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1046 smp->status |= IB_SMP_INVALID_FIELD;
1050 pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1052 smp->status |= IB_SMP_INVALID_FIELD;
1059 * subn_set_opa_portinfo - set port information
1060 * @smp: the incoming SM packet
1061 * @ibdev: the infiniband device
1062 * @port: the port on the device
1065 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1066 struct ib_device *ibdev, u8 port,
1069 struct opa_port_info *pi = (struct opa_port_info *)data;
1070 struct ib_event event;
1071 struct hfi1_devdata *dd;
1072 struct hfi1_pportdata *ppd;
1073 struct hfi1_ibport *ibp;
1075 unsigned long flags;
1076 u32 smlid, opa_lid; /* tmp vars to hold LID values */
1078 u8 ls_old, ls_new, ps_new;
1083 u32 num_ports = OPA_AM_NPORT(am);
1084 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1085 int ret, i, invalid = 0, call_set_mtu = 0;
1086 int call_link_downgrade_policy = 0;
1088 if (num_ports != 1) {
1089 smp->status |= IB_SMP_INVALID_FIELD;
1090 return reply((struct ib_mad_hdr *)smp);
1093 opa_lid = be32_to_cpu(pi->lid);
1094 if (opa_lid & 0xFFFF0000) {
1095 pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
1096 smp->status |= IB_SMP_INVALID_FIELD;
1100 lid = (u16)(opa_lid & 0x0000FFFF);
1102 smlid = be32_to_cpu(pi->sm_lid);
1103 if (smlid & 0xFFFF0000) {
1104 pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1105 smp->status |= IB_SMP_INVALID_FIELD;
1108 smlid &= 0x0000FFFF;
1110 clientrereg = (pi->clientrereg_subnettimeout &
1111 OPA_PI_MASK_CLIENT_REREGISTER);
1113 dd = dd_from_ibdev(ibdev);
1114 /* IB numbers ports from 1, hw from 0 */
1115 ppd = dd->pport + (port - 1);
1116 ibp = &ppd->ibport_data;
1117 event.device = ibdev;
1118 event.element.port_num = port;
1120 ls_old = driver_lstate(ppd);
1122 ibp->rvp.mkey = pi->mkey;
1123 ibp->rvp.gid_prefix = pi->subnet_prefix;
1124 ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
1126 /* Must be a valid unicast LID address. */
1127 if ((lid == 0 && ls_old > IB_PORT_INIT) ||
1128 lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
1129 smp->status |= IB_SMP_INVALID_FIELD;
1130 pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1132 } else if (ppd->lid != lid ||
1133 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1134 if (ppd->lid != lid)
1135 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1136 if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1137 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1138 hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1139 event.event = IB_EVENT_LID_CHANGE;
1140 ib_dispatch_event(&event);
1143 msl = pi->smsl & OPA_PI_MASK_SMSL;
1144 if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1145 ppd->linkinit_reason =
1146 (pi->partenforce_filterraw &
1147 OPA_PI_MASK_LINKINIT_REASON);
1148 /* enable/disable SW pkey checking as per FM control */
1149 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
1150 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
1152 ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
1154 if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
1155 ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
1157 ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
1159 /* Must be a valid unicast LID address. */
1160 if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
1161 smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
1162 smp->status |= IB_SMP_INVALID_FIELD;
1163 pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
1164 } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
1165 pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
1166 spin_lock_irqsave(&ibp->rvp.lock, flags);
1167 if (ibp->rvp.sm_ah) {
1168 if (smlid != ibp->rvp.sm_lid)
1169 ibp->rvp.sm_ah->attr.dlid = smlid;
1170 if (msl != ibp->rvp.sm_sl)
1171 ibp->rvp.sm_ah->attr.sl = msl;
1173 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1174 if (smlid != ibp->rvp.sm_lid)
1175 ibp->rvp.sm_lid = smlid;
1176 if (msl != ibp->rvp.sm_sl)
1177 ibp->rvp.sm_sl = msl;
1178 event.event = IB_EVENT_SM_CHANGE;
1179 ib_dispatch_event(&event);
1182 if (pi->link_down_reason == 0) {
1183 ppd->local_link_down_reason.sma = 0;
1184 ppd->local_link_down_reason.latest = 0;
1187 if (pi->neigh_link_down_reason == 0) {
1188 ppd->neigh_link_down_reason.sma = 0;
1189 ppd->neigh_link_down_reason.latest = 0;
1192 ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1193 ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1195 ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1196 lwe = be16_to_cpu(pi->link_width.enabled);
1198 if (lwe == OPA_LINK_WIDTH_RESET ||
1199 lwe == OPA_LINK_WIDTH_RESET_OLD)
1200 set_link_width_enabled(ppd, ppd->link_width_supported);
1201 else if ((lwe & ~ppd->link_width_supported) == 0)
1202 set_link_width_enabled(ppd, lwe);
1204 smp->status |= IB_SMP_INVALID_FIELD;
1206 lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1207 /* LWD.E is always applied - 0 means "disabled" */
1208 if (lwe == OPA_LINK_WIDTH_RESET ||
1209 lwe == OPA_LINK_WIDTH_RESET_OLD) {
1210 set_link_width_downgrade_enabled(ppd,
1212 link_width_downgrade_supported
1214 } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1215 /* only set and apply if something changed */
1216 if (lwe != ppd->link_width_downgrade_enabled) {
1217 set_link_width_downgrade_enabled(ppd, lwe);
1218 call_link_downgrade_policy = 1;
1221 smp->status |= IB_SMP_INVALID_FIELD;
1223 lse = be16_to_cpu(pi->link_speed.enabled);
1225 if (lse & be16_to_cpu(pi->link_speed.supported))
1226 set_link_speed_enabled(ppd, lse);
1228 smp->status |= IB_SMP_INVALID_FIELD;
1232 (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1233 ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
1234 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
1235 ibp->rvp.vl_high_limit);
1237 if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1238 ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1239 smp->status |= IB_SMP_INVALID_FIELD;
1240 return reply((struct ib_mad_hdr *)smp);
1242 for (i = 0; i < ppd->vls_supported; i++) {
1244 mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
1247 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
1249 if (mtu == 0xffff) {
1250 pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1252 (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1253 smp->status |= IB_SMP_INVALID_FIELD;
1254 mtu = hfi1_max_mtu; /* use a valid MTU */
1256 if (dd->vld[i].mtu != mtu) {
1258 "MTU change on vl %d from %d to %d\n",
1259 i, dd->vld[i].mtu, mtu);
1260 dd->vld[i].mtu = mtu;
1264 /* As per OPAV1 spec: VL15 must support and be configured
1265 * for operation with a 2048 or larger MTU.
1267 mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
1268 if (mtu < 2048 || mtu == 0xffff)
1270 if (dd->vld[15].mtu != mtu) {
1272 "MTU change on vl 15 from %d to %d\n",
1273 dd->vld[15].mtu, mtu);
1274 dd->vld[15].mtu = mtu;
1280 /* Set operational VLs */
1281 vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1283 if (vls > ppd->vls_supported) {
1284 pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1285 pi->operational_vls);
1286 smp->status |= IB_SMP_INVALID_FIELD;
1288 if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1290 smp->status |= IB_SMP_INVALID_FIELD;
1294 if (pi->mkey_violations == 0)
1295 ibp->rvp.mkey_violations = 0;
1297 if (pi->pkey_violations == 0)
1298 ibp->rvp.pkey_violations = 0;
1300 if (pi->qkey_violations == 0)
1301 ibp->rvp.qkey_violations = 0;
1303 ibp->rvp.subnet_timeout =
1304 pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1306 crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1310 if (crc_enabled != 0)
1311 ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1313 ppd->is_active_optimize_enabled =
1314 !!(be16_to_cpu(pi->port_mode)
1315 & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1317 ls_new = pi->port_states.portphysstate_portstate &
1318 OPA_PI_MASK_PORT_STATE;
1319 ps_new = (pi->port_states.portphysstate_portstate &
1320 OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1322 if (ls_old == IB_PORT_INIT) {
1323 if (start_of_sm_config) {
1324 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1325 ppd->is_sm_config_started = 1;
1326 } else if (ls_new == IB_PORT_ARMED) {
1327 if (ppd->is_sm_config_started == 0)
1332 /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1334 event.event = IB_EVENT_CLIENT_REREGISTER;
1335 ib_dispatch_event(&event);
1339 * Do the port state change now that the other link parameters
1341 * Changing the port physical state only makes sense if the link
1342 * is down or is being set to down.
1345 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1349 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1351 /* restore re-reg bit per o14-12.2.1 */
1352 pi->clientrereg_subnettimeout |= clientrereg;
1355 * Apply the new link downgrade policy. This may result in a link
1356 * bounce. Do this after everything else so things are settled.
1357 * Possible problem: if setting the port state above fails, then
1358 * the policy change is not applied.
1360 if (call_link_downgrade_policy)
1361 apply_link_downgrade_policy(ppd, 0);
1366 return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
1370 * set_pkeys - set the PKEY table for ctxt 0
1371 * @dd: the hfi1_ib device
1372 * @port: the IB port number
1373 * @pkeys: the PKEY table
1375 static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1377 struct hfi1_pportdata *ppd;
1380 int update_includes_mgmt_partition = 0;
1383 * IB port one/two always maps to context zero/one,
1384 * always a kernel context, no locking needed
1385 * If we get here with ppd setup, no need to check
1386 * that rcd is valid.
1388 ppd = dd->pport + (port - 1);
1390 * If the update does not include the management pkey, don't do it.
1392 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1393 if (pkeys[i] == LIM_MGMT_P_KEY) {
1394 update_includes_mgmt_partition = 1;
1399 if (!update_includes_mgmt_partition)
1402 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1404 u16 okey = ppd->pkeys[i];
1409 * The SM gives us the complete PKey table. We have
1410 * to ensure that we put the PKeys in the matching
1413 ppd->pkeys[i] = key;
1418 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1419 hfi1_event_pkey_change(dd, port);
1425 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1426 struct ib_device *ibdev, u8 port,
1429 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1430 u32 n_blocks_sent = OPA_AM_NBLK(am);
1431 u32 start_block = am & 0x7ff;
1432 u16 *p = (u16 *)data;
1433 __be16 *q = (__be16 *)data;
1436 unsigned npkeys = hfi1_get_npkeys(dd);
1438 if (n_blocks_sent == 0) {
1439 pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1440 port, start_block, n_blocks_sent);
1441 smp->status |= IB_SMP_INVALID_FIELD;
1442 return reply((struct ib_mad_hdr *)smp);
1445 n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1447 if (start_block + n_blocks_sent > n_blocks_avail ||
1448 n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1449 pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1450 start_block, n_blocks_sent, n_blocks_avail,
1451 OPA_NUM_PKEY_BLOCKS_PER_SMP);
1452 smp->status |= IB_SMP_INVALID_FIELD;
1453 return reply((struct ib_mad_hdr *)smp);
1456 for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1457 p[i] = be16_to_cpu(q[i]);
1459 if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1460 smp->status |= IB_SMP_INVALID_FIELD;
1461 return reply((struct ib_mad_hdr *)smp);
1464 return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
1467 static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1471 *val++ = read_csr(dd, SEND_SC2VLT0);
1472 *val++ = read_csr(dd, SEND_SC2VLT1);
1473 *val++ = read_csr(dd, SEND_SC2VLT2);
1474 *val++ = read_csr(dd, SEND_SC2VLT3);
1478 #define ILLEGAL_VL 12
1480 * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1481 * for SC15, which must map to VL15). If we don't remap things this
1482 * way it is possible for VL15 counters to increment when we try to
1483 * send on a SC which is mapped to an invalid VL.
1485 static void filter_sc2vlt(void *data)
1490 for (i = 0; i < OPA_MAX_SCS; i++) {
1493 if ((pd[i] & 0x1f) == 0xf)
1498 static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1502 filter_sc2vlt(data);
1504 write_csr(dd, SEND_SC2VLT0, *val++);
1505 write_csr(dd, SEND_SC2VLT1, *val++);
1506 write_csr(dd, SEND_SC2VLT2, *val++);
1507 write_csr(dd, SEND_SC2VLT3, *val++);
1508 write_seqlock_irq(&dd->sc2vl_lock);
1509 memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
1510 write_sequnlock_irq(&dd->sc2vl_lock);
1514 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1515 struct ib_device *ibdev, u8 port,
1518 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1520 size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1524 smp->status |= IB_SMP_INVALID_FIELD;
1525 return reply((struct ib_mad_hdr *)smp);
1528 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1529 *p++ = ibp->sl_to_sc[i];
1534 return reply((struct ib_mad_hdr *)smp);
1537 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1538 struct ib_device *ibdev, u8 port,
1541 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1547 smp->status |= IB_SMP_INVALID_FIELD;
1548 return reply((struct ib_mad_hdr *)smp);
1551 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
1553 if (ibp->sl_to_sc[i] != sc) {
1554 ibp->sl_to_sc[i] = sc;
1556 /* Put all stale qps into error state */
1557 hfi1_error_port_qps(ibp, i);
1561 return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
1564 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1565 struct ib_device *ibdev, u8 port,
1568 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1570 size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1574 smp->status |= IB_SMP_INVALID_FIELD;
1575 return reply((struct ib_mad_hdr *)smp);
1578 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1579 *p++ = ibp->sc_to_sl[i];
1584 return reply((struct ib_mad_hdr *)smp);
1587 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1588 struct ib_device *ibdev, u8 port,
1591 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1596 smp->status |= IB_SMP_INVALID_FIELD;
1597 return reply((struct ib_mad_hdr *)smp);
1600 for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1601 ibp->sc_to_sl[i] = *p++;
1603 return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
1606 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1607 struct ib_device *ibdev, u8 port,
1610 u32 n_blocks = OPA_AM_NBLK(am);
1611 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1612 void *vp = (void *)data;
1613 size_t size = 4 * sizeof(u64);
1615 if (n_blocks != 1) {
1616 smp->status |= IB_SMP_INVALID_FIELD;
1617 return reply((struct ib_mad_hdr *)smp);
1620 get_sc2vlt_tables(dd, vp);
1625 return reply((struct ib_mad_hdr *)smp);
1628 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1629 struct ib_device *ibdev, u8 port,
1632 u32 n_blocks = OPA_AM_NBLK(am);
1633 int async_update = OPA_AM_ASYNC(am);
1634 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1635 void *vp = (void *)data;
1636 struct hfi1_pportdata *ppd;
1639 if (n_blocks != 1 || async_update) {
1640 smp->status |= IB_SMP_INVALID_FIELD;
1641 return reply((struct ib_mad_hdr *)smp);
1644 /* IB numbers ports from 1, hw from 0 */
1645 ppd = dd->pport + (port - 1);
1646 lstate = driver_lstate(ppd);
1648 * it's known that async_update is 0 by this point, but include
1649 * the explicit check for clarity
1651 if (!async_update &&
1652 (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1653 smp->status |= IB_SMP_INVALID_FIELD;
1654 return reply((struct ib_mad_hdr *)smp);
1657 set_sc2vlt_tables(dd, vp);
1659 return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
1662 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1663 struct ib_device *ibdev, u8 port,
1666 u32 n_blocks = OPA_AM_NPORT(am);
1667 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1668 struct hfi1_pportdata *ppd;
1669 void *vp = (void *)data;
1672 if (n_blocks != 1) {
1673 smp->status |= IB_SMP_INVALID_FIELD;
1674 return reply((struct ib_mad_hdr *)smp);
1677 ppd = dd->pport + (port - 1);
1679 size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
1684 return reply((struct ib_mad_hdr *)smp);
1687 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1688 struct ib_device *ibdev, u8 port,
1691 u32 n_blocks = OPA_AM_NPORT(am);
1692 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1693 struct hfi1_pportdata *ppd;
1694 void *vp = (void *)data;
1697 if (n_blocks != 1) {
1698 smp->status |= IB_SMP_INVALID_FIELD;
1699 return reply((struct ib_mad_hdr *)smp);
1702 /* IB numbers ports from 1, hw from 0 */
1703 ppd = dd->pport + (port - 1);
1704 lstate = driver_lstate(ppd);
1705 if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
1706 smp->status |= IB_SMP_INVALID_FIELD;
1707 return reply((struct ib_mad_hdr *)smp);
1710 ppd = dd->pport + (port - 1);
1712 fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
1714 return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
1718 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1719 struct ib_device *ibdev, u8 port,
1722 u32 nports = OPA_AM_NPORT(am);
1723 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1725 struct hfi1_ibport *ibp;
1726 struct hfi1_pportdata *ppd;
1727 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
1730 smp->status |= IB_SMP_INVALID_FIELD;
1731 return reply((struct ib_mad_hdr *)smp);
1734 ibp = to_iport(ibdev, port);
1735 ppd = ppd_from_ibp(ibp);
1737 lstate = driver_lstate(ppd);
1739 if (start_of_sm_config && (lstate == IB_PORT_INIT))
1740 ppd->is_sm_config_started = 1;
1742 psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
1743 psi->port_states.ledenable_offlinereason |=
1744 ppd->is_sm_config_started << 5;
1745 psi->port_states.ledenable_offlinereason |=
1746 ppd->offline_disabled_reason;
1748 psi->port_states.portphysstate_portstate =
1749 (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
1750 psi->link_width_downgrade_tx_active =
1751 cpu_to_be16(ppd->link_width_downgrade_tx_active);
1752 psi->link_width_downgrade_rx_active =
1753 cpu_to_be16(ppd->link_width_downgrade_rx_active);
1755 *resp_len += sizeof(struct opa_port_state_info);
1757 return reply((struct ib_mad_hdr *)smp);
1760 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
1761 struct ib_device *ibdev, u8 port,
1764 u32 nports = OPA_AM_NPORT(am);
1765 u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1768 struct hfi1_ibport *ibp;
1769 struct hfi1_pportdata *ppd;
1770 struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
1771 int ret, invalid = 0;
1774 smp->status |= IB_SMP_INVALID_FIELD;
1775 return reply((struct ib_mad_hdr *)smp);
1778 ibp = to_iport(ibdev, port);
1779 ppd = ppd_from_ibp(ibp);
1781 ls_old = driver_lstate(ppd);
1783 ls_new = port_states_to_logical_state(&psi->port_states);
1784 ps_new = port_states_to_phys_state(&psi->port_states);
1786 if (ls_old == IB_PORT_INIT) {
1787 if (start_of_sm_config) {
1788 if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1789 ppd->is_sm_config_started = 1;
1790 } else if (ls_new == IB_PORT_ARMED) {
1791 if (ppd->is_sm_config_started == 0)
1796 ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
1801 smp->status |= IB_SMP_INVALID_FIELD;
1803 return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
1806 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
1807 struct ib_device *ibdev, u8 port,
1810 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1811 u32 addr = OPA_AM_CI_ADDR(am);
1812 u32 len = OPA_AM_CI_LEN(am) + 1;
1815 if (dd->pport->port_type != PORT_TYPE_QSFP) {
1816 smp->status |= IB_SMP_INVALID_FIELD;
1817 return reply((struct ib_mad_hdr *)smp);
1820 #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
1821 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
1822 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
1825 * check that addr is within spec, and
1826 * addr and (addr + len - 1) are on the same "page"
1829 (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
1830 smp->status |= IB_SMP_INVALID_FIELD;
1831 return reply((struct ib_mad_hdr *)smp);
1834 ret = get_cable_info(dd, port, addr, len, data);
1836 if (ret == -ENODEV) {
1837 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1838 return reply((struct ib_mad_hdr *)smp);
1841 /* The address range for the CableInfo SMA query is wider than the
1842 * memory available on the QSFP cable. We want to return a valid
1843 * response, albeit zeroed out, for address ranges beyond available
1844 * memory but that are within the CableInfo query spec
1846 if (ret < 0 && ret != -ERANGE) {
1847 smp->status |= IB_SMP_INVALID_FIELD;
1848 return reply((struct ib_mad_hdr *)smp);
1854 return reply((struct ib_mad_hdr *)smp);
1857 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1858 struct ib_device *ibdev, u8 port, u32 *resp_len)
1860 u32 num_ports = OPA_AM_NPORT(am);
1861 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1862 struct hfi1_pportdata *ppd;
1863 struct buffer_control *p = (struct buffer_control *)data;
1866 if (num_ports != 1) {
1867 smp->status |= IB_SMP_INVALID_FIELD;
1868 return reply((struct ib_mad_hdr *)smp);
1871 ppd = dd->pport + (port - 1);
1872 size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
1873 trace_bct_get(dd, p);
1877 return reply((struct ib_mad_hdr *)smp);
1880 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
1881 struct ib_device *ibdev, u8 port, u32 *resp_len)
1883 u32 num_ports = OPA_AM_NPORT(am);
1884 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1885 struct hfi1_pportdata *ppd;
1886 struct buffer_control *p = (struct buffer_control *)data;
1888 if (num_ports != 1) {
1889 smp->status |= IB_SMP_INVALID_FIELD;
1890 return reply((struct ib_mad_hdr *)smp);
1892 ppd = dd->pport + (port - 1);
1893 trace_bct_set(dd, p);
1894 if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
1895 smp->status |= IB_SMP_INVALID_FIELD;
1896 return reply((struct ib_mad_hdr *)smp);
1899 return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
1902 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1903 struct ib_device *ibdev, u8 port,
1906 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1907 u32 num_ports = OPA_AM_NPORT(am);
1908 u8 section = (am & 0x00ff0000) >> 16;
1912 if (num_ports != 1) {
1913 smp->status |= IB_SMP_INVALID_FIELD;
1914 return reply((struct ib_mad_hdr *)smp);
1918 case OPA_VLARB_LOW_ELEMENTS:
1919 size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
1921 case OPA_VLARB_HIGH_ELEMENTS:
1922 size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1924 case OPA_VLARB_PREEMPT_ELEMENTS:
1925 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
1927 case OPA_VLARB_PREEMPT_MATRIX:
1928 size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
1931 pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
1932 be32_to_cpu(smp->attr_mod));
1933 smp->status |= IB_SMP_INVALID_FIELD;
1937 if (size > 0 && resp_len)
1940 return reply((struct ib_mad_hdr *)smp);
1943 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
1944 struct ib_device *ibdev, u8 port,
1947 struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1948 u32 num_ports = OPA_AM_NPORT(am);
1949 u8 section = (am & 0x00ff0000) >> 16;
1952 if (num_ports != 1) {
1953 smp->status |= IB_SMP_INVALID_FIELD;
1954 return reply((struct ib_mad_hdr *)smp);
1958 case OPA_VLARB_LOW_ELEMENTS:
1959 (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
1961 case OPA_VLARB_HIGH_ELEMENTS:
1962 (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
1965 * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
1966 * can be changed from the default values
1968 case OPA_VLARB_PREEMPT_ELEMENTS:
1970 case OPA_VLARB_PREEMPT_MATRIX:
1971 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1974 pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
1975 be32_to_cpu(smp->attr_mod));
1976 smp->status |= IB_SMP_INVALID_FIELD;
1980 return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
1983 struct opa_pma_mad {
1984 struct ib_mad_hdr mad_hdr;
1988 struct opa_class_port_info {
1992 __be32 cap_mask2_resp_time;
1994 u8 redirect_gid[16];
1995 __be32 redirect_tc_fl;
1996 __be32 redirect_lid;
1997 __be32 redirect_sl_qp;
1998 __be32 redirect_qkey;
2007 __be16 redirect_pkey;
2013 struct opa_port_status_req {
2016 __be32 vl_select_mask;
2019 #define VL_MASK_ALL 0x00000000000080ffUL
2021 struct opa_port_status_rsp {
2024 __be32 vl_select_mask;
2027 __be64 port_xmit_data;
2028 __be64 port_rcv_data;
2029 __be64 port_xmit_pkts;
2030 __be64 port_rcv_pkts;
2031 __be64 port_multicast_xmit_pkts;
2032 __be64 port_multicast_rcv_pkts;
2033 __be64 port_xmit_wait;
2034 __be64 sw_port_congestion;
2035 __be64 port_rcv_fecn;
2036 __be64 port_rcv_becn;
2037 __be64 port_xmit_time_cong;
2038 __be64 port_xmit_wasted_bw;
2039 __be64 port_xmit_wait_data;
2040 __be64 port_rcv_bubble;
2041 __be64 port_mark_fecn;
2042 /* Error counters */
2043 __be64 port_rcv_constraint_errors;
2044 __be64 port_rcv_switch_relay_errors;
2045 __be64 port_xmit_discards;
2046 __be64 port_xmit_constraint_errors;
2047 __be64 port_rcv_remote_physical_errors;
2048 __be64 local_link_integrity_errors;
2049 __be64 port_rcv_errors;
2050 __be64 excessive_buffer_overruns;
2051 __be64 fm_config_errors;
2052 __be32 link_error_recovery;
2054 u8 uncorrectable_errors;
2056 u8 link_quality_indicator; /* 5res, 3bit */
2059 /* per-VL Data counters */
2060 __be64 port_vl_xmit_data;
2061 __be64 port_vl_rcv_data;
2062 __be64 port_vl_xmit_pkts;
2063 __be64 port_vl_rcv_pkts;
2064 __be64 port_vl_xmit_wait;
2065 __be64 sw_port_vl_congestion;
2066 __be64 port_vl_rcv_fecn;
2067 __be64 port_vl_rcv_becn;
2068 __be64 port_xmit_time_cong;
2069 __be64 port_vl_xmit_wasted_bw;
2070 __be64 port_vl_xmit_wait_data;
2071 __be64 port_vl_rcv_bubble;
2072 __be64 port_vl_mark_fecn;
2073 __be64 port_vl_xmit_discards;
2074 } vls[0]; /* real array size defined by # bits set in vl_select_mask */
2077 enum counter_selects {
2078 CS_PORT_XMIT_DATA = (1 << 31),
2079 CS_PORT_RCV_DATA = (1 << 30),
2080 CS_PORT_XMIT_PKTS = (1 << 29),
2081 CS_PORT_RCV_PKTS = (1 << 28),
2082 CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
2083 CS_PORT_MCAST_RCV_PKTS = (1 << 26),
2084 CS_PORT_XMIT_WAIT = (1 << 25),
2085 CS_SW_PORT_CONGESTION = (1 << 24),
2086 CS_PORT_RCV_FECN = (1 << 23),
2087 CS_PORT_RCV_BECN = (1 << 22),
2088 CS_PORT_XMIT_TIME_CONG = (1 << 21),
2089 CS_PORT_XMIT_WASTED_BW = (1 << 20),
2090 CS_PORT_XMIT_WAIT_DATA = (1 << 19),
2091 CS_PORT_RCV_BUBBLE = (1 << 18),
2092 CS_PORT_MARK_FECN = (1 << 17),
2093 CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
2094 CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
2095 CS_PORT_XMIT_DISCARDS = (1 << 14),
2096 CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
2097 CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
2098 CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
2099 CS_PORT_RCV_ERRORS = (1 << 10),
2100 CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
2101 CS_FM_CONFIG_ERRORS = (1 << 8),
2102 CS_LINK_ERROR_RECOVERY = (1 << 7),
2103 CS_LINK_DOWNED = (1 << 6),
2104 CS_UNCORRECTABLE_ERRORS = (1 << 5),
2107 struct opa_clear_port_status {
2108 __be64 port_select_mask[4];
2109 __be32 counter_select_mask;
2112 struct opa_aggregate {
2114 __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
2119 #define MSK_LLI 0x000000f0
2120 #define MSK_LLI_SFT 4
2121 #define MSK_LER 0x0000000f
2122 #define MSK_LER_SFT 0
2126 /* Request contains first three fields, response contains those plus the rest */
2127 struct opa_port_data_counters_msg {
2128 __be64 port_select_mask[4];
2129 __be32 vl_select_mask;
2132 /* Response fields follow */
2133 struct _port_dctrs {
2136 __be32 link_quality_indicator; /* 29res, 3bit */
2139 __be64 port_xmit_data;
2140 __be64 port_rcv_data;
2141 __be64 port_xmit_pkts;
2142 __be64 port_rcv_pkts;
2143 __be64 port_multicast_xmit_pkts;
2144 __be64 port_multicast_rcv_pkts;
2145 __be64 port_xmit_wait;
2146 __be64 sw_port_congestion;
2147 __be64 port_rcv_fecn;
2148 __be64 port_rcv_becn;
2149 __be64 port_xmit_time_cong;
2150 __be64 port_xmit_wasted_bw;
2151 __be64 port_xmit_wait_data;
2152 __be64 port_rcv_bubble;
2153 __be64 port_mark_fecn;
2155 __be64 port_error_counter_summary;
2156 /* Sum of error counts/port */
2159 /* per-VL Data counters */
2160 __be64 port_vl_xmit_data;
2161 __be64 port_vl_rcv_data;
2162 __be64 port_vl_xmit_pkts;
2163 __be64 port_vl_rcv_pkts;
2164 __be64 port_vl_xmit_wait;
2165 __be64 sw_port_vl_congestion;
2166 __be64 port_vl_rcv_fecn;
2167 __be64 port_vl_rcv_becn;
2168 __be64 port_xmit_time_cong;
2169 __be64 port_vl_xmit_wasted_bw;
2170 __be64 port_vl_xmit_wait_data;
2171 __be64 port_vl_rcv_bubble;
2172 __be64 port_vl_mark_fecn;
2174 /* array size defined by #bits set in vl_select_mask*/
2175 } port[1]; /* array size defined by #ports in attribute modifier */
2178 struct opa_port_error_counters64_msg {
2180 * Request contains first two fields, response contains the
2183 __be64 port_select_mask[4];
2184 __be32 vl_select_mask;
2186 /* Response-only fields follow */
2188 struct _port_ectrs {
2191 __be64 port_rcv_constraint_errors;
2192 __be64 port_rcv_switch_relay_errors;
2193 __be64 port_xmit_discards;
2194 __be64 port_xmit_constraint_errors;
2195 __be64 port_rcv_remote_physical_errors;
2196 __be64 local_link_integrity_errors;
2197 __be64 port_rcv_errors;
2198 __be64 excessive_buffer_overruns;
2199 __be64 fm_config_errors;
2200 __be32 link_error_recovery;
2202 u8 uncorrectable_errors;
2205 __be64 port_vl_xmit_discards;
2207 /* array size defined by #bits set in vl_select_mask */
2208 } port[1]; /* array size defined by #ports in attribute modifier */
2211 struct opa_port_error_info_msg {
2212 __be64 port_select_mask[4];
2213 __be32 error_info_select_mask;
2219 /* PortRcvErrorInfo */
2225 /* EI1to12 format */
2228 u8 remaining_flit_bits12;
2232 u8 remaining_flit_bits;
2236 } __packed port_rcv_ei;
2238 /* ExcessiveBufferOverrunInfo */
2242 } __packed excessive_buffer_overrun_ei;
2244 /* PortXmitConstraintErrorInfo */
2250 } __packed port_xmit_constraint_ei;
2252 /* PortRcvConstraintErrorInfo */
2258 } __packed port_rcv_constraint_ei;
2260 /* PortRcvSwitchRelayErrorInfo */
2265 } __packed port_rcv_switch_relay_ei;
2267 /* UncorrectableErrorInfo */
2271 } __packed uncorrectable_ei;
2273 /* FMConfigErrorInfo */
2277 } __packed fm_config_ei;
2279 } port[1]; /* actual array size defined by #ports in attr modifier */
2282 /* opa_port_error_info_msg error_info_select_mask bit definitions */
2283 enum error_info_selects {
2284 ES_PORT_RCV_ERROR_INFO = (1 << 31),
2285 ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
2286 ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
2287 ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
2288 ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
2289 ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
2290 ES_FM_CONFIG_ERROR_INFO = (1 << 25)
2293 static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2294 struct ib_device *ibdev, u32 *resp_len)
2296 struct opa_class_port_info *p =
2297 (struct opa_class_port_info *)pmp->data;
2299 memset(pmp->data, 0, sizeof(pmp->data));
2301 if (pmp->mad_hdr.attr_mod != 0)
2302 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2304 p->base_version = OPA_MGMT_BASE_VERSION;
2305 p->class_version = OPA_SMI_CLASS_VERSION;
2307 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2309 p->cap_mask2_resp_time = cpu_to_be32(18);
2312 *resp_len += sizeof(*p);
2314 return reply((struct ib_mad_hdr *)pmp);
2317 static void a0_portstatus(struct hfi1_pportdata *ppd,
2318 struct opa_port_status_rsp *rsp)
2320 if (!is_bx(ppd->dd)) {
2322 u64 sum_vl_xmit_wait = 0;
2323 unsigned long vl_all_mask = VL_MASK_ALL;
2325 for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
2326 u64 tmp = sum_vl_xmit_wait +
2327 read_port_cntr(ppd, C_TX_WAIT_VL,
2329 if (tmp < sum_vl_xmit_wait) {
2331 sum_vl_xmit_wait = (u64)~0;
2334 sum_vl_xmit_wait = tmp;
2336 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2337 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2341 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2342 struct ib_device *ibdev,
2343 u8 port, u32 *resp_len)
2345 struct opa_port_status_req *req =
2346 (struct opa_port_status_req *)pmp->data;
2347 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2348 struct opa_port_status_rsp *rsp;
2349 unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
2351 size_t response_data_size;
2352 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2353 u8 port_num = req->port_num;
2354 u8 num_vls = hweight64(vl_select_mask);
2355 struct _vls_pctrs *vlinfo;
2356 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2357 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2361 response_data_size = sizeof(struct opa_port_status_rsp) +
2362 num_vls * sizeof(struct _vls_pctrs);
2363 if (response_data_size > sizeof(pmp->data)) {
2364 pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2365 return reply((struct ib_mad_hdr *)pmp);
2368 if (nports != 1 || (port_num && port_num != port) ||
2369 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2370 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2371 return reply((struct ib_mad_hdr *)pmp);
2374 memset(pmp->data, 0, sizeof(pmp->data));
2376 rsp = (struct opa_port_status_rsp *)pmp->data;
2378 rsp->port_num = port_num;
2380 rsp->port_num = port;
2382 rsp->port_rcv_constraint_errors =
2383 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2386 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2388 rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
2389 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2391 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2393 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2395 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2397 rsp->port_multicast_xmit_pkts =
2398 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2400 rsp->port_multicast_rcv_pkts =
2401 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2403 rsp->port_xmit_wait =
2404 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2405 rsp->port_rcv_fecn =
2406 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2407 rsp->port_rcv_becn =
2408 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2409 rsp->port_xmit_discards =
2410 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2412 rsp->port_xmit_constraint_errors =
2413 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2415 rsp->port_rcv_remote_physical_errors =
2416 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2418 rsp->local_link_integrity_errors =
2419 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2421 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2422 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2424 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2425 /* overflow/wrapped */
2426 rsp->link_error_recovery = cpu_to_be32(~0);
2428 rsp->link_error_recovery = cpu_to_be32(tmp2);
2430 rsp->port_rcv_errors =
2431 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2432 rsp->excessive_buffer_overruns =
2433 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2434 rsp->fm_config_errors =
2435 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2437 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2440 /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2441 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2442 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2444 vlinfo = &rsp->vls[0];
2446 /* The vl_select_mask has been checked above, and we know
2447 * that it contains only entries which represent valid VLs.
2448 * So in the for_each_set_bit() loop below, we don't need
2449 * any additional checks for vl.
2451 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
2452 memset(vlinfo, 0, sizeof(*vlinfo));
2454 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2455 rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
2457 rsp->vls[vfi].port_vl_rcv_pkts =
2458 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2461 rsp->vls[vfi].port_vl_xmit_data =
2462 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2465 rsp->vls[vfi].port_vl_xmit_pkts =
2466 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2469 rsp->vls[vfi].port_vl_xmit_wait =
2470 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2473 rsp->vls[vfi].port_vl_rcv_fecn =
2474 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2477 rsp->vls[vfi].port_vl_rcv_becn =
2478 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2481 rsp->vls[vfi].port_vl_xmit_discards =
2482 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2488 a0_portstatus(ppd, rsp);
2491 *resp_len += response_data_size;
2493 return reply((struct ib_mad_hdr *)pmp);
2496 static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2497 u8 res_lli, u8 res_ler)
2499 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2500 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2501 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2502 u64 error_counter_summary = 0, tmp;
2504 error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2506 /* port_rcv_switch_relay_errors is 0 for HFIs */
2507 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2509 error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2511 error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2513 /* local link integrity must be right-shifted by the lli resolution */
2514 error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
2515 CNTR_INVALID_VL) >> res_lli);
2516 /* link error recovery must b right-shifted by the ler resolution */
2517 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2518 tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2519 error_counter_summary += (tmp >> res_ler);
2520 error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2522 error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2523 error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2525 /* ppd->link_downed is a 32-bit value */
2526 error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2528 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2529 /* this is an 8-bit quantity */
2530 error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2532 return error_counter_summary;
2535 static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
2537 if (!is_bx(ppd->dd)) {
2539 u64 sum_vl_xmit_wait = 0;
2540 unsigned long vl_all_mask = VL_MASK_ALL;
2542 for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
2543 u64 tmp = sum_vl_xmit_wait +
2544 read_port_cntr(ppd, C_TX_WAIT_VL,
2546 if (tmp < sum_vl_xmit_wait) {
2548 sum_vl_xmit_wait = (u64)~0;
2551 sum_vl_xmit_wait = tmp;
2553 if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2554 rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2558 static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
2559 struct _port_dctrs *rsp)
2561 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2563 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2565 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2567 rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2569 rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2571 rsp->port_multicast_xmit_pkts =
2572 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2574 rsp->port_multicast_rcv_pkts =
2575 cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2579 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2580 struct ib_device *ibdev,
2581 u8 port, u32 *resp_len)
2583 struct opa_port_data_counters_msg *req =
2584 (struct opa_port_data_counters_msg *)pmp->data;
2585 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2586 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2587 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2588 struct _port_dctrs *rsp;
2589 struct _vls_dctrs *vlinfo;
2590 size_t response_data_size;
2594 u8 res_lli, res_ler;
2598 unsigned long vl_select_mask;
2601 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2602 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2603 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2604 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2605 res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
2606 res_lli = res_lli ? res_lli + ADD_LLI : 0;
2607 res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
2608 res_ler = res_ler ? res_ler + ADD_LER : 0;
2610 if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
2611 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2612 return reply((struct ib_mad_hdr *)pmp);
2616 response_data_size = sizeof(struct opa_port_data_counters_msg) +
2617 num_vls * sizeof(struct _vls_dctrs);
2619 if (response_data_size > sizeof(pmp->data)) {
2620 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2621 return reply((struct ib_mad_hdr *)pmp);
2625 * The bit set in the mask needs to be consistent with the
2626 * port the request came in on.
2628 port_mask = be64_to_cpu(req->port_select_mask[3]);
2629 port_num = find_first_bit((unsigned long *)&port_mask,
2630 sizeof(port_mask) * 8);
2632 if (port_num != port) {
2633 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2634 return reply((struct ib_mad_hdr *)pmp);
2637 rsp = &req->port[0];
2638 memset(rsp, 0, sizeof(*rsp));
2640 rsp->port_number = port;
2642 * Note that link_quality_indicator is a 32 bit quantity in
2643 * 'datacounters' queries (as opposed to 'portinfo' queries,
2644 * where it's a byte).
2646 hfi1_read_link_quality(dd, &lq);
2647 rsp->link_quality_indicator = cpu_to_be32((u32)lq);
2648 pma_get_opa_port_dctrs(ibdev, rsp);
2650 rsp->port_xmit_wait =
2651 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
2652 rsp->port_rcv_fecn =
2653 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2654 rsp->port_rcv_becn =
2655 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2656 rsp->port_error_counter_summary =
2657 cpu_to_be64(get_error_counter_summary(ibdev, port,
2660 vlinfo = &rsp->vls[0];
2662 /* The vl_select_mask has been checked above, and we know
2663 * that it contains only entries which represent valid VLs.
2664 * So in the for_each_set_bit() loop below, we don't need
2665 * any additional checks for vl.
2667 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
2668 memset(vlinfo, 0, sizeof(*vlinfo));
2670 rsp->vls[vfi].port_vl_xmit_data =
2671 cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2674 rsp->vls[vfi].port_vl_rcv_data =
2675 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
2678 rsp->vls[vfi].port_vl_xmit_pkts =
2679 cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2682 rsp->vls[vfi].port_vl_rcv_pkts =
2683 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2686 rsp->vls[vfi].port_vl_xmit_wait =
2687 cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
2690 rsp->vls[vfi].port_vl_rcv_fecn =
2691 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2693 rsp->vls[vfi].port_vl_rcv_becn =
2694 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2697 /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
2698 /* rsp->port_vl_xmit_wasted_bw ??? */
2699 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
2700 * does this differ from rsp->vls[vfi].port_vl_xmit_wait
2702 /*rsp->vls[vfi].port_vl_mark_fecn =
2703 * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
2710 a0_datacounters(ppd, rsp);
2713 *resp_len += response_data_size;
2715 return reply((struct ib_mad_hdr *)pmp);
2718 static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
2719 struct ib_device *ibdev, u8 port)
2721 struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
2723 struct _port_dctrs rsp;
2725 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
2726 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2730 memset(&rsp, 0, sizeof(rsp));
2731 pma_get_opa_port_dctrs(ibdev, &rsp);
2733 p->port_xmit_data = rsp.port_xmit_data;
2734 p->port_rcv_data = rsp.port_rcv_data;
2735 p->port_xmit_packets = rsp.port_xmit_pkts;
2736 p->port_rcv_packets = rsp.port_rcv_pkts;
2737 p->port_unicast_xmit_packets = 0;
2738 p->port_unicast_rcv_packets = 0;
2739 p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
2740 p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
2743 return reply((struct ib_mad_hdr *)pmp);
2746 static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
2747 struct _port_ectrs *rsp, u8 port)
2750 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2751 struct hfi1_ibport *ibp = to_iport(ibdev, port);
2752 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2754 tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2755 tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2757 if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2758 /* overflow/wrapped */
2759 rsp->link_error_recovery = cpu_to_be32(~0);
2761 rsp->link_error_recovery = cpu_to_be32(tmp2);
2764 rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2766 rsp->port_rcv_errors =
2767 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2768 rsp->port_rcv_remote_physical_errors =
2769 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2771 rsp->port_rcv_switch_relay_errors = 0;
2772 rsp->port_xmit_discards =
2773 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2775 rsp->port_xmit_constraint_errors =
2776 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2778 rsp->port_rcv_constraint_errors =
2779 cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2781 rsp->local_link_integrity_errors =
2782 cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2784 rsp->excessive_buffer_overruns =
2785 cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2788 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
2789 struct ib_device *ibdev,
2790 u8 port, u32 *resp_len)
2792 size_t response_data_size;
2793 struct _port_ectrs *rsp;
2795 struct opa_port_error_counters64_msg *req;
2796 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2800 struct hfi1_ibport *ibp;
2801 struct hfi1_pportdata *ppd;
2802 struct _vls_ectrs *vlinfo;
2805 unsigned long vl_select_mask;
2808 req = (struct opa_port_error_counters64_msg *)pmp->data;
2810 num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2812 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2813 num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
2815 if (num_ports != 1 || num_ports != num_pslm) {
2816 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2817 return reply((struct ib_mad_hdr *)pmp);
2820 response_data_size = sizeof(struct opa_port_error_counters64_msg) +
2821 num_vls * sizeof(struct _vls_ectrs);
2823 if (response_data_size > sizeof(pmp->data)) {
2824 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2825 return reply((struct ib_mad_hdr *)pmp);
2828 * The bit set in the mask needs to be consistent with the
2829 * port the request came in on.
2831 port_mask = be64_to_cpu(req->port_select_mask[3]);
2832 port_num = find_first_bit((unsigned long *)&port_mask,
2833 sizeof(port_mask) * 8);
2835 if (port_num != port) {
2836 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2837 return reply((struct ib_mad_hdr *)pmp);
2840 rsp = &req->port[0];
2842 ibp = to_iport(ibdev, port_num);
2843 ppd = ppd_from_ibp(ibp);
2845 memset(rsp, 0, sizeof(*rsp));
2846 rsp->port_number = port_num;
2848 pma_get_opa_port_ectrs(ibdev, rsp, port_num);
2850 rsp->port_rcv_remote_physical_errors =
2851 cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2853 rsp->fm_config_errors =
2854 cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2856 tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2858 rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2859 rsp->port_rcv_errors =
2860 cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2861 vlinfo = &rsp->vls[0];
2863 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2864 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
2865 memset(vlinfo, 0, sizeof(*vlinfo));
2866 rsp->vls[vfi].port_vl_xmit_discards =
2867 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2874 *resp_len += response_data_size;
2876 return reply((struct ib_mad_hdr *)pmp);
2879 static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
2880 struct ib_device *ibdev, u8 port)
2882 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
2884 struct _port_ectrs rsp;
2885 u64 temp_link_overrun_errors;
2889 memset(&rsp, 0, sizeof(rsp));
2890 pma_get_opa_port_ectrs(ibdev, &rsp, port);
2892 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
2893 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2897 p->symbol_error_counter = 0; /* N/A for OPA */
2899 temp_32 = be32_to_cpu(rsp.link_error_recovery);
2900 if (temp_32 > 0xFFUL)
2901 p->link_error_recovery_counter = 0xFF;
2903 p->link_error_recovery_counter = (u8)temp_32;
2905 temp_32 = be32_to_cpu(rsp.link_downed);
2906 if (temp_32 > 0xFFUL)
2907 p->link_downed_counter = 0xFF;
2909 p->link_downed_counter = (u8)temp_32;
2911 temp_64 = be64_to_cpu(rsp.port_rcv_errors);
2912 if (temp_64 > 0xFFFFUL)
2913 p->port_rcv_errors = cpu_to_be16(0xFFFF);
2915 p->port_rcv_errors = cpu_to_be16((u16)temp_64);
2917 temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
2918 if (temp_64 > 0xFFFFUL)
2919 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
2921 p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
2923 temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
2924 p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
2926 temp_64 = be64_to_cpu(rsp.port_xmit_discards);
2927 if (temp_64 > 0xFFFFUL)
2928 p->port_xmit_discards = cpu_to_be16(0xFFFF);
2930 p->port_xmit_discards = cpu_to_be16((u16)temp_64);
2932 temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
2933 if (temp_64 > 0xFFUL)
2934 p->port_xmit_constraint_errors = 0xFF;
2936 p->port_xmit_constraint_errors = (u8)temp_64;
2938 temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
2939 if (temp_64 > 0xFFUL)
2940 p->port_rcv_constraint_errors = 0xFFUL;
2942 p->port_rcv_constraint_errors = (u8)temp_64;
2944 /* LocalLink: 7:4, BufferOverrun: 3:0 */
2945 temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
2946 if (temp_64 > 0xFUL)
2949 temp_link_overrun_errors = temp_64 << 4;
2951 temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
2952 if (temp_64 > 0xFUL)
2954 temp_link_overrun_errors |= temp_64;
2956 p->link_overrun_errors = (u8)temp_link_overrun_errors;
2958 p->vl15_dropped = 0; /* N/A for OPA */
2961 return reply((struct ib_mad_hdr *)pmp);
2964 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
2965 struct ib_device *ibdev,
2966 u8 port, u32 *resp_len)
2968 size_t response_data_size;
2969 struct _port_ei *rsp;
2970 struct opa_port_error_info_msg *req;
2971 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2978 req = (struct opa_port_error_info_msg *)pmp->data;
2979 rsp = &req->port[0];
2981 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
2982 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
2984 memset(rsp, 0, sizeof(*rsp));
2986 if (num_ports != 1 || num_ports != num_pslm) {
2987 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2988 return reply((struct ib_mad_hdr *)pmp);
2992 response_data_size = sizeof(struct opa_port_error_info_msg);
2994 if (response_data_size > sizeof(pmp->data)) {
2995 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2996 return reply((struct ib_mad_hdr *)pmp);
3000 * The bit set in the mask needs to be consistent with the port
3001 * the request came in on.
3003 port_mask = be64_to_cpu(req->port_select_mask[3]);
3004 port_num = find_first_bit((unsigned long *)&port_mask,
3005 sizeof(port_mask) * 8);
3007 if (port_num != port) {
3008 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3009 return reply((struct ib_mad_hdr *)pmp);
3012 /* PortRcvErrorInfo */
3013 rsp->port_rcv_ei.status_and_code =
3014 dd->err_info_rcvport.status_and_code;
3015 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
3016 &dd->err_info_rcvport.packet_flit1, sizeof(u64));
3017 memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
3018 &dd->err_info_rcvport.packet_flit2, sizeof(u64));
3020 /* ExcessiverBufferOverrunInfo */
3021 reg = read_csr(dd, RCV_ERR_INFO);
3022 if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
3024 * if the RcvExcessBufferOverrun bit is set, save SC of
3025 * first pkt that encountered an excess buffer overrun
3029 tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
3031 rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
3032 /* set the status bit */
3033 rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
3036 rsp->port_xmit_constraint_ei.status =
3037 dd->err_info_xmit_constraint.status;
3038 rsp->port_xmit_constraint_ei.pkey =
3039 cpu_to_be16(dd->err_info_xmit_constraint.pkey);
3040 rsp->port_xmit_constraint_ei.slid =
3041 cpu_to_be32(dd->err_info_xmit_constraint.slid);
3043 rsp->port_rcv_constraint_ei.status =
3044 dd->err_info_rcv_constraint.status;
3045 rsp->port_rcv_constraint_ei.pkey =
3046 cpu_to_be16(dd->err_info_rcv_constraint.pkey);
3047 rsp->port_rcv_constraint_ei.slid =
3048 cpu_to_be32(dd->err_info_rcv_constraint.slid);
3050 /* UncorrectableErrorInfo */
3051 rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
3053 /* FMConfigErrorInfo */
3054 rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
3057 *resp_len += response_data_size;
3059 return reply((struct ib_mad_hdr *)pmp);
3062 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
3063 struct ib_device *ibdev,
3064 u8 port, u32 *resp_len)
3066 struct opa_clear_port_status *req =
3067 (struct opa_clear_port_status *)pmp->data;
3068 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3069 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3070 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3071 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3072 u64 portn = be64_to_cpu(req->port_select_mask[3]);
3073 u32 counter_select = be32_to_cpu(req->counter_select_mask);
3074 unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
3077 if ((nports != 1) || (portn != 1 << port)) {
3078 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3079 return reply((struct ib_mad_hdr *)pmp);
3082 * only counters returned by pma_get_opa_portstatus() are
3083 * handled, so when pma_get_opa_portstatus() gets a fix,
3084 * the corresponding change should be made here as well.
3087 if (counter_select & CS_PORT_XMIT_DATA)
3088 write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
3090 if (counter_select & CS_PORT_RCV_DATA)
3091 write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
3093 if (counter_select & CS_PORT_XMIT_PKTS)
3094 write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3096 if (counter_select & CS_PORT_RCV_PKTS)
3097 write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
3099 if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
3100 write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3102 if (counter_select & CS_PORT_MCAST_RCV_PKTS)
3103 write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
3105 if (counter_select & CS_PORT_XMIT_WAIT)
3106 write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
3108 /* ignore cs_sw_portCongestion for HFIs */
3110 if (counter_select & CS_PORT_RCV_FECN)
3111 write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
3113 if (counter_select & CS_PORT_RCV_BECN)
3114 write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
3116 /* ignore cs_port_xmit_time_cong for HFIs */
3117 /* ignore cs_port_xmit_wasted_bw for now */
3118 /* ignore cs_port_xmit_wait_data for now */
3119 if (counter_select & CS_PORT_RCV_BUBBLE)
3120 write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
3122 /* Only applicable for switch */
3123 /* if (counter_select & CS_PORT_MARK_FECN)
3124 * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
3127 if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
3128 write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
3130 /* ignore cs_port_rcv_switch_relay_errors for HFIs */
3131 if (counter_select & CS_PORT_XMIT_DISCARDS)
3132 write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
3134 if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
3135 write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
3137 if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
3138 write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
3140 if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
3141 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3143 if (counter_select & CS_LINK_ERROR_RECOVERY) {
3144 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3145 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3146 CNTR_INVALID_VL, 0);
3149 if (counter_select & CS_PORT_RCV_ERRORS)
3150 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3152 if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
3153 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3154 dd->rcv_ovfl_cnt = 0;
3157 if (counter_select & CS_FM_CONFIG_ERRORS)
3158 write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
3160 if (counter_select & CS_LINK_DOWNED)
3161 write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
3163 if (counter_select & CS_UNCORRECTABLE_ERRORS)
3164 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
3166 for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
3167 if (counter_select & CS_PORT_XMIT_DATA)
3168 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3170 if (counter_select & CS_PORT_RCV_DATA)
3171 write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3173 if (counter_select & CS_PORT_XMIT_PKTS)
3174 write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3176 if (counter_select & CS_PORT_RCV_PKTS)
3177 write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3179 if (counter_select & CS_PORT_XMIT_WAIT)
3180 write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3182 /* sw_port_vl_congestion is 0 for HFIs */
3183 if (counter_select & CS_PORT_RCV_FECN)
3184 write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3186 if (counter_select & CS_PORT_RCV_BECN)
3187 write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3189 /* port_vl_xmit_time_cong is 0 for HFIs */
3190 /* port_vl_xmit_wasted_bw ??? */
3191 /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3192 if (counter_select & CS_PORT_RCV_BUBBLE)
3193 write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3195 /* if (counter_select & CS_PORT_MARK_FECN)
3196 * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3198 if (counter_select & C_SW_XMIT_DSCD_VL)
3199 write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3200 idx_from_vl(vl), 0);
3204 *resp_len += sizeof(*req);
3206 return reply((struct ib_mad_hdr *)pmp);
3209 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3210 struct ib_device *ibdev,
3211 u8 port, u32 *resp_len)
3213 struct _port_ei *rsp;
3214 struct opa_port_error_info_msg *req;
3215 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3220 u32 error_info_select;
3222 req = (struct opa_port_error_info_msg *)pmp->data;
3223 rsp = &req->port[0];
3225 num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3226 num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3228 memset(rsp, 0, sizeof(*rsp));
3230 if (num_ports != 1 || num_ports != num_pslm) {
3231 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3232 return reply((struct ib_mad_hdr *)pmp);
3236 * The bit set in the mask needs to be consistent with the port
3237 * the request came in on.
3239 port_mask = be64_to_cpu(req->port_select_mask[3]);
3240 port_num = find_first_bit((unsigned long *)&port_mask,
3241 sizeof(port_mask) * 8);
3243 if (port_num != port) {
3244 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3245 return reply((struct ib_mad_hdr *)pmp);
3248 error_info_select = be32_to_cpu(req->error_info_select_mask);
3250 /* PortRcvErrorInfo */
3251 if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3252 /* turn off status bit */
3253 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3255 /* ExcessiverBufferOverrunInfo */
3256 if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3258 * status bit is essentially kept in the h/w - bit 5 of
3261 write_csr(dd, RCV_ERR_INFO,
3262 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3264 if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3265 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3267 if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3268 dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3270 /* UncorrectableErrorInfo */
3271 if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3272 /* turn off status bit */
3273 dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3275 /* FMConfigErrorInfo */
3276 if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3277 /* turn off status bit */
3278 dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3281 *resp_len += sizeof(*req);
3283 return reply((struct ib_mad_hdr *)pmp);
3286 struct opa_congestion_info_attr {
3287 __be16 congestion_info;
3288 u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
3289 u8 congestion_log_length;
3292 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3293 struct ib_device *ibdev, u8 port,
3296 struct opa_congestion_info_attr *p =
3297 (struct opa_congestion_info_attr *)data;
3298 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3299 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3301 p->congestion_info = 0;
3302 p->control_table_cap = ppd->cc_max_table_entries;
3303 p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3306 *resp_len += sizeof(*p);
3308 return reply((struct ib_mad_hdr *)smp);
3311 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3312 u8 *data, struct ib_device *ibdev,
3313 u8 port, u32 *resp_len)
3316 struct opa_congestion_setting_attr *p =
3317 (struct opa_congestion_setting_attr *)data;
3318 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3319 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3320 struct opa_congestion_setting_entry_shadow *entries;
3321 struct cc_state *cc_state;
3325 cc_state = get_cc_state(ppd);
3329 return reply((struct ib_mad_hdr *)smp);
3332 entries = cc_state->cong_setting.entries;
3333 p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3334 p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3335 for (i = 0; i < OPA_MAX_SLS; i++) {
3336 p->entries[i].ccti_increase = entries[i].ccti_increase;
3337 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3338 p->entries[i].trigger_threshold =
3339 entries[i].trigger_threshold;
3340 p->entries[i].ccti_min = entries[i].ccti_min;
3346 *resp_len += sizeof(*p);
3348 return reply((struct ib_mad_hdr *)smp);
3352 * Apply congestion control information stored in the ppd to the
3355 static void apply_cc_state(struct hfi1_pportdata *ppd)
3357 struct cc_state *old_cc_state, *new_cc_state;
3359 new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3364 * Hold the lock for updating *and* to prevent ppd information
3365 * from changing during the update.
3367 spin_lock(&ppd->cc_state_lock);
3369 old_cc_state = get_cc_state_protected(ppd);
3370 if (!old_cc_state) {
3371 /* never active, or shutting down */
3372 spin_unlock(&ppd->cc_state_lock);
3373 kfree(new_cc_state);
3377 *new_cc_state = *old_cc_state;
3379 new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3380 memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3381 ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3383 new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3384 new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3385 memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3386 OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3388 rcu_assign_pointer(ppd->cc_state, new_cc_state);
3390 spin_unlock(&ppd->cc_state_lock);
3392 kfree_rcu(old_cc_state, rcu);
3395 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3396 struct ib_device *ibdev, u8 port,
3399 struct opa_congestion_setting_attr *p =
3400 (struct opa_congestion_setting_attr *)data;
3401 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3402 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3403 struct opa_congestion_setting_entry_shadow *entries;
3407 * Save details from packet into the ppd. Hold the cc_state_lock so
3408 * our information is consistent with anyone trying to apply the state.
3410 spin_lock(&ppd->cc_state_lock);
3411 ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3413 entries = ppd->congestion_entries;
3414 for (i = 0; i < OPA_MAX_SLS; i++) {
3415 entries[i].ccti_increase = p->entries[i].ccti_increase;
3416 entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3417 entries[i].trigger_threshold =
3418 p->entries[i].trigger_threshold;
3419 entries[i].ccti_min = p->entries[i].ccti_min;
3421 spin_unlock(&ppd->cc_state_lock);
3423 /* now apply the information */
3424 apply_cc_state(ppd);
3426 return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3430 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3431 u8 *data, struct ib_device *ibdev,
3432 u8 port, u32 *resp_len)
3434 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3435 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3436 struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3441 smp->status |= IB_SMP_INVALID_FIELD;
3442 return reply((struct ib_mad_hdr *)smp);
3445 spin_lock_irq(&ppd->cc_log_lock);
3447 cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3448 cong_log->congestion_flags = 0;
3449 cong_log->threshold_event_counter =
3450 cpu_to_be16(ppd->threshold_event_counter);
3451 memcpy(cong_log->threshold_cong_event_map,
3452 ppd->threshold_cong_event_map,
3453 sizeof(cong_log->threshold_cong_event_map));
3454 /* keep timestamp in units of 1.024 usec */
3455 ts = ktime_to_ns(ktime_get()) / 1024;
3456 cong_log->current_time_stamp = cpu_to_be32(ts);
3457 for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3458 struct opa_hfi1_cong_log_event_internal *cce =
3459 &ppd->cc_events[ppd->cc_mad_idx++];
3460 if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3461 ppd->cc_mad_idx = 0;
3463 * Entries which are older than twice the time
3464 * required to wrap the counter are supposed to
3465 * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3467 if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
3469 memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3470 memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3472 cong_log->events[i].sl_svc_type_cn_entry =
3473 ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3474 cong_log->events[i].remote_lid_cn_entry =
3475 cpu_to_be32(cce->rlid);
3476 cong_log->events[i].timestamp_cn_entry =
3477 cpu_to_be32(cce->timestamp);
3481 * Reset threshold_cong_event_map, and threshold_event_counter
3482 * to 0 when log is read.
3484 memset(ppd->threshold_cong_event_map, 0x0,
3485 sizeof(ppd->threshold_cong_event_map));
3486 ppd->threshold_event_counter = 0;
3488 spin_unlock_irq(&ppd->cc_log_lock);
3491 *resp_len += sizeof(struct opa_hfi1_cong_log);
3493 return reply((struct ib_mad_hdr *)smp);
3496 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3497 struct ib_device *ibdev, u8 port,
3500 struct ib_cc_table_attr *cc_table_attr =
3501 (struct ib_cc_table_attr *)data;
3502 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3503 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3504 u32 start_block = OPA_AM_START_BLK(am);
3505 u32 n_blocks = OPA_AM_NBLK(am);
3506 struct ib_cc_table_entry_shadow *entries;
3509 struct cc_state *cc_state;
3511 /* sanity check n_blocks, start_block */
3512 if (n_blocks == 0 ||
3513 start_block + n_blocks > ppd->cc_max_table_entries) {
3514 smp->status |= IB_SMP_INVALID_FIELD;
3515 return reply((struct ib_mad_hdr *)smp);
3520 cc_state = get_cc_state(ppd);
3524 return reply((struct ib_mad_hdr *)smp);
3527 sentry = start_block * IB_CCT_ENTRIES;
3528 eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3530 cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3532 entries = cc_state->cct.entries;
3534 /* return n_blocks, though the last block may not be full */
3535 for (j = 0, i = sentry; i < eentry; j++, i++)
3536 cc_table_attr->ccti_entries[j].entry =
3537 cpu_to_be16(entries[i].entry);
3542 *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
3544 return reply((struct ib_mad_hdr *)smp);
3547 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3548 struct ib_device *ibdev, u8 port,
3551 struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
3552 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3553 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3554 u32 start_block = OPA_AM_START_BLK(am);
3555 u32 n_blocks = OPA_AM_NBLK(am);
3556 struct ib_cc_table_entry_shadow *entries;
3561 /* sanity check n_blocks, start_block */
3562 if (n_blocks == 0 ||
3563 start_block + n_blocks > ppd->cc_max_table_entries) {
3564 smp->status |= IB_SMP_INVALID_FIELD;
3565 return reply((struct ib_mad_hdr *)smp);
3568 sentry = start_block * IB_CCT_ENTRIES;
3569 eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
3570 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
3572 /* sanity check ccti_limit */
3573 ccti_limit = be16_to_cpu(p->ccti_limit);
3574 if (ccti_limit + 1 > eentry) {
3575 smp->status |= IB_SMP_INVALID_FIELD;
3576 return reply((struct ib_mad_hdr *)smp);
3580 * Save details from packet into the ppd. Hold the cc_state_lock so
3581 * our information is consistent with anyone trying to apply the state.
3583 spin_lock(&ppd->cc_state_lock);
3584 ppd->total_cct_entry = ccti_limit + 1;
3585 entries = ppd->ccti_entries;
3586 for (j = 0, i = sentry; i < eentry; j++, i++)
3587 entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
3588 spin_unlock(&ppd->cc_state_lock);
3590 /* now apply the information */
3591 apply_cc_state(ppd);
3593 return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
3596 struct opa_led_info {
3597 __be32 rsvd_led_mask;
3601 #define OPA_LED_SHIFT 31
3602 #define OPA_LED_MASK BIT(OPA_LED_SHIFT)
3604 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3605 struct ib_device *ibdev, u8 port,
3608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3609 struct hfi1_pportdata *ppd = dd->pport;
3610 struct opa_led_info *p = (struct opa_led_info *)data;
3611 u32 nport = OPA_AM_NPORT(am);
3612 u32 is_beaconing_active;
3615 smp->status |= IB_SMP_INVALID_FIELD;
3616 return reply((struct ib_mad_hdr *)smp);
3620 * This pairs with the memory barrier in hfi1_start_led_override to
3621 * ensure that we read the correct state of LED beaconing represented
3622 * by led_override_timer_active
3625 is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
3626 p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
3629 *resp_len += sizeof(struct opa_led_info);
3631 return reply((struct ib_mad_hdr *)smp);
3634 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
3635 struct ib_device *ibdev, u8 port,
3638 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3639 struct opa_led_info *p = (struct opa_led_info *)data;
3640 u32 nport = OPA_AM_NPORT(am);
3641 int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
3644 smp->status |= IB_SMP_INVALID_FIELD;
3645 return reply((struct ib_mad_hdr *)smp);
3649 hfi1_start_led_override(dd->pport, 2000, 1500);
3651 shutdown_led_override(dd->pport);
3653 return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
3656 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3657 u8 *data, struct ib_device *ibdev, u8 port,
3661 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3664 case IB_SMP_ATTR_NODE_DESC:
3665 ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
3668 case IB_SMP_ATTR_NODE_INFO:
3669 ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
3672 case IB_SMP_ATTR_PORT_INFO:
3673 ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
3676 case IB_SMP_ATTR_PKEY_TABLE:
3677 ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
3680 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3681 ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
3684 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3685 ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
3688 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3689 ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
3692 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3693 ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3696 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3697 ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
3700 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3701 ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
3704 case OPA_ATTRIB_ID_CABLE_INFO:
3705 ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
3708 case IB_SMP_ATTR_VL_ARB_TABLE:
3709 ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
3712 case OPA_ATTRIB_ID_CONGESTION_INFO:
3713 ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
3716 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3717 ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
3720 case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
3721 ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
3724 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3725 ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
3728 case IB_SMP_ATTR_LED_INFO:
3729 ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
3732 case IB_SMP_ATTR_SM_INFO:
3733 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
3734 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3735 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
3736 return IB_MAD_RESULT_SUCCESS;
3739 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3740 ret = reply((struct ib_mad_hdr *)smp);
3746 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
3747 u8 *data, struct ib_device *ibdev, u8 port,
3751 struct hfi1_ibport *ibp = to_iport(ibdev, port);
3754 case IB_SMP_ATTR_PORT_INFO:
3755 ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
3758 case IB_SMP_ATTR_PKEY_TABLE:
3759 ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
3762 case OPA_ATTRIB_ID_SL_TO_SC_MAP:
3763 ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
3766 case OPA_ATTRIB_ID_SC_TO_SL_MAP:
3767 ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
3770 case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
3771 ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
3774 case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
3775 ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
3778 case OPA_ATTRIB_ID_PORT_STATE_INFO:
3779 ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
3782 case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
3783 ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
3786 case IB_SMP_ATTR_VL_ARB_TABLE:
3787 ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
3790 case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
3791 ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
3794 case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
3795 ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
3798 case IB_SMP_ATTR_LED_INFO:
3799 ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
3802 case IB_SMP_ATTR_SM_INFO:
3803 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
3804 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
3805 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
3806 return IB_MAD_RESULT_SUCCESS;
3809 smp->status |= IB_SMP_UNSUP_METH_ATTR;
3810 ret = reply((struct ib_mad_hdr *)smp);
3816 static inline void set_aggr_error(struct opa_aggregate *ag)
3818 ag->err_reqlength |= cpu_to_be16(0x8000);
3821 static int subn_get_opa_aggregate(struct opa_smp *smp,
3822 struct ib_device *ibdev, u8 port,
3826 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3827 u8 *next_smp = opa_get_smp_data(smp);
3829 if (num_attr < 1 || num_attr > 117) {
3830 smp->status |= IB_SMP_INVALID_FIELD;
3831 return reply((struct ib_mad_hdr *)smp);
3834 for (i = 0; i < num_attr; i++) {
3835 struct opa_aggregate *agg;
3836 size_t agg_data_len;
3840 agg = (struct opa_aggregate *)next_smp;
3841 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3842 agg_size = sizeof(*agg) + agg_data_len;
3843 am = be32_to_cpu(agg->attr_mod);
3845 *resp_len += agg_size;
3847 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3848 smp->status |= IB_SMP_INVALID_FIELD;
3849 return reply((struct ib_mad_hdr *)smp);
3852 /* zero the payload for this segment */
3853 memset(next_smp + sizeof(*agg), 0, agg_data_len);
3855 (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
3857 if (smp->status & ~IB_SMP_DIRECTION) {
3858 set_aggr_error(agg);
3859 return reply((struct ib_mad_hdr *)smp);
3861 next_smp += agg_size;
3864 return reply((struct ib_mad_hdr *)smp);
3867 static int subn_set_opa_aggregate(struct opa_smp *smp,
3868 struct ib_device *ibdev, u8 port,
3872 u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
3873 u8 *next_smp = opa_get_smp_data(smp);
3875 if (num_attr < 1 || num_attr > 117) {
3876 smp->status |= IB_SMP_INVALID_FIELD;
3877 return reply((struct ib_mad_hdr *)smp);
3880 for (i = 0; i < num_attr; i++) {
3881 struct opa_aggregate *agg;
3882 size_t agg_data_len;
3886 agg = (struct opa_aggregate *)next_smp;
3887 agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
3888 agg_size = sizeof(*agg) + agg_data_len;
3889 am = be32_to_cpu(agg->attr_mod);
3891 *resp_len += agg_size;
3893 if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
3894 smp->status |= IB_SMP_INVALID_FIELD;
3895 return reply((struct ib_mad_hdr *)smp);
3898 (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
3900 if (smp->status & ~IB_SMP_DIRECTION) {
3901 set_aggr_error(agg);
3902 return reply((struct ib_mad_hdr *)smp);
3904 next_smp += agg_size;
3907 return reply((struct ib_mad_hdr *)smp);
3911 * OPAv1 specifies that, on the transition to link up, these counters
3915 * LocalLinkIntegrityErrors
3916 * ExcessiveBufferOverruns [*]
3918 * [*] Error info associated with these counters is retained, but the
3919 * error info status is reset to 0.
3921 void clear_linkup_counters(struct hfi1_devdata *dd)
3924 write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3925 dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3926 /* LinkErrorRecovery */
3927 write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3928 write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
3929 /* LocalLinkIntegrityErrors */
3930 write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3931 /* ExcessiveBufferOverruns */
3932 write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3933 dd->rcv_ovfl_cnt = 0;
3934 dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3938 * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
3939 * local node, 0 otherwise.
3941 static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
3942 const struct ib_wc *in_wc)
3944 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3945 const struct opa_smp *smp = (const struct opa_smp *)mad;
3947 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
3948 return (smp->hop_cnt == 0 &&
3949 smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
3950 smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
3953 return (in_wc->slid == ppd->lid);
3957 * opa_local_smp_check() should only be called on MADs for which
3958 * is_local_mad() returns true. It applies the SMP checks that are
3959 * specific to SMPs which are sent from, and destined to this node.
3960 * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
3963 * SMPs which arrive from other nodes are instead checked by
3966 static int opa_local_smp_check(struct hfi1_ibport *ibp,
3967 const struct ib_wc *in_wc)
3969 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3970 u16 slid = in_wc->slid;
3973 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
3976 pkey = ppd->pkeys[in_wc->pkey_index];
3978 * We need to do the "node-local" checks specified in OPAv1,
3979 * rev 0.90, section 9.10.26, which are:
3980 * - pkey is 0x7fff, or 0xffff
3981 * - Source QPN == 0 || Destination QPN == 0
3982 * - the MAD header's management class is either
3983 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
3984 * IB_MGMT_CLASS_SUBN_LID_ROUTED
3987 * However, we know (and so don't need to check again) that,
3988 * for local SMPs, the MAD stack passes MADs with:
3990 * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
3991 * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
3992 * our own port's lid
3995 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
3997 ingress_pkey_table_fail(ppd, pkey, slid);
4001 static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
4002 u8 port, const struct opa_mad *in_mad,
4003 struct opa_mad *out_mad,
4006 struct opa_smp *smp = (struct opa_smp *)out_mad;
4007 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4014 data = opa_get_smp_data(smp);
4016 am = be32_to_cpu(smp->attr_mod);
4017 attr_id = smp->attr_id;
4018 if (smp->class_version != OPA_SMI_CLASS_VERSION) {
4019 smp->status |= IB_SMP_UNSUP_VERSION;
4020 ret = reply((struct ib_mad_hdr *)smp);
4023 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
4024 smp->route.dr.dr_slid, smp->route.dr.return_path,
4027 u32 port_num = be32_to_cpu(smp->attr_mod);
4030 * If this is a get/set portinfo, we already check the
4031 * M_Key if the MAD is for another port and the M_Key
4032 * is OK on the receiving port. This check is needed
4033 * to increment the error counters when the M_Key
4034 * fails to match on *both* ports.
4036 if (attr_id == IB_SMP_ATTR_PORT_INFO &&
4037 (smp->method == IB_MGMT_METHOD_GET ||
4038 smp->method == IB_MGMT_METHOD_SET) &&
4039 port_num && port_num <= ibdev->phys_port_cnt &&
4041 (void)check_mkey(to_iport(ibdev, port_num),
4042 (struct ib_mad_hdr *)smp, 0,
4043 smp->mkey, smp->route.dr.dr_slid,
4044 smp->route.dr.return_path,
4046 ret = IB_MAD_RESULT_FAILURE;
4050 *resp_len = opa_get_smp_header_size(smp);
4052 switch (smp->method) {
4053 case IB_MGMT_METHOD_GET:
4056 clear_opa_smp_data(smp);
4057 ret = subn_get_opa_sma(attr_id, smp, am, data,
4058 ibdev, port, resp_len);
4060 case OPA_ATTRIB_ID_AGGREGATE:
4061 ret = subn_get_opa_aggregate(smp, ibdev, port,
4066 case IB_MGMT_METHOD_SET:
4069 ret = subn_set_opa_sma(attr_id, smp, am, data,
4070 ibdev, port, resp_len);
4072 case OPA_ATTRIB_ID_AGGREGATE:
4073 ret = subn_set_opa_aggregate(smp, ibdev, port,
4078 case IB_MGMT_METHOD_TRAP:
4079 case IB_MGMT_METHOD_REPORT:
4080 case IB_MGMT_METHOD_REPORT_RESP:
4081 case IB_MGMT_METHOD_GET_RESP:
4083 * The ib_mad module will call us to process responses
4084 * before checking for other consumers.
4085 * Just tell the caller to process it normally.
4087 ret = IB_MAD_RESULT_SUCCESS;
4090 smp->status |= IB_SMP_UNSUP_METHOD;
4091 ret = reply((struct ib_mad_hdr *)smp);
4098 static int process_subn(struct ib_device *ibdev, int mad_flags,
4099 u8 port, const struct ib_mad *in_mad,
4100 struct ib_mad *out_mad)
4102 struct ib_smp *smp = (struct ib_smp *)out_mad;
4103 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4107 if (smp->class_version != 1) {
4108 smp->status |= IB_SMP_UNSUP_VERSION;
4109 ret = reply((struct ib_mad_hdr *)smp);
4113 ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
4114 smp->mkey, (__force __be32)smp->dr_slid,
4115 smp->return_path, smp->hop_cnt);
4117 u32 port_num = be32_to_cpu(smp->attr_mod);
4120 * If this is a get/set portinfo, we already check the
4121 * M_Key if the MAD is for another port and the M_Key
4122 * is OK on the receiving port. This check is needed
4123 * to increment the error counters when the M_Key
4124 * fails to match on *both* ports.
4126 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
4127 (smp->method == IB_MGMT_METHOD_GET ||
4128 smp->method == IB_MGMT_METHOD_SET) &&
4129 port_num && port_num <= ibdev->phys_port_cnt &&
4131 (void)check_mkey(to_iport(ibdev, port_num),
4132 (struct ib_mad_hdr *)smp, 0,
4134 (__force __be32)smp->dr_slid,
4135 smp->return_path, smp->hop_cnt);
4136 ret = IB_MAD_RESULT_FAILURE;
4140 switch (smp->method) {
4141 case IB_MGMT_METHOD_GET:
4142 switch (smp->attr_id) {
4143 case IB_SMP_ATTR_NODE_INFO:
4144 ret = subn_get_nodeinfo(smp, ibdev, port);
4147 smp->status |= IB_SMP_UNSUP_METH_ATTR;
4148 ret = reply((struct ib_mad_hdr *)smp);
4157 static int process_perf(struct ib_device *ibdev, u8 port,
4158 const struct ib_mad *in_mad,
4159 struct ib_mad *out_mad)
4161 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
4162 struct ib_class_port_info *cpi = (struct ib_class_port_info *)
4164 int ret = IB_MAD_RESULT_FAILURE;
4167 if (pmp->mad_hdr.class_version != 1) {
4168 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4169 ret = reply((struct ib_mad_hdr *)pmp);
4173 switch (pmp->mad_hdr.method) {
4174 case IB_MGMT_METHOD_GET:
4175 switch (pmp->mad_hdr.attr_id) {
4176 case IB_PMA_PORT_COUNTERS:
4177 ret = pma_get_ib_portcounters(pmp, ibdev, port);
4179 case IB_PMA_PORT_COUNTERS_EXT:
4180 ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
4182 case IB_PMA_CLASS_PORT_INFO:
4183 cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4184 ret = reply((struct ib_mad_hdr *)pmp);
4187 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4188 ret = reply((struct ib_mad_hdr *)pmp);
4193 case IB_MGMT_METHOD_SET:
4194 if (pmp->mad_hdr.attr_id) {
4195 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4196 ret = reply((struct ib_mad_hdr *)pmp);
4200 case IB_MGMT_METHOD_TRAP:
4201 case IB_MGMT_METHOD_GET_RESP:
4203 * The ib_mad module will call us to process responses
4204 * before checking for other consumers.
4205 * Just tell the caller to process it normally.
4207 ret = IB_MAD_RESULT_SUCCESS;
4211 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4212 ret = reply((struct ib_mad_hdr *)pmp);
4219 static int process_perf_opa(struct ib_device *ibdev, u8 port,
4220 const struct opa_mad *in_mad,
4221 struct opa_mad *out_mad, u32 *resp_len)
4223 struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
4228 if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
4229 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4230 return reply((struct ib_mad_hdr *)pmp);
4233 *resp_len = sizeof(pmp->mad_hdr);
4235 switch (pmp->mad_hdr.method) {
4236 case IB_MGMT_METHOD_GET:
4237 switch (pmp->mad_hdr.attr_id) {
4238 case IB_PMA_CLASS_PORT_INFO:
4239 ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
4241 case OPA_PM_ATTRIB_ID_PORT_STATUS:
4242 ret = pma_get_opa_portstatus(pmp, ibdev, port,
4245 case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
4246 ret = pma_get_opa_datacounters(pmp, ibdev, port,
4249 case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
4250 ret = pma_get_opa_porterrors(pmp, ibdev, port,
4253 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4254 ret = pma_get_opa_errorinfo(pmp, ibdev, port,
4258 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4259 ret = reply((struct ib_mad_hdr *)pmp);
4264 case IB_MGMT_METHOD_SET:
4265 switch (pmp->mad_hdr.attr_id) {
4266 case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4267 ret = pma_set_opa_portstatus(pmp, ibdev, port,
4270 case OPA_PM_ATTRIB_ID_ERROR_INFO:
4271 ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4275 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4276 ret = reply((struct ib_mad_hdr *)pmp);
4281 case IB_MGMT_METHOD_TRAP:
4282 case IB_MGMT_METHOD_GET_RESP:
4284 * The ib_mad module will call us to process responses
4285 * before checking for other consumers.
4286 * Just tell the caller to process it normally.
4288 ret = IB_MAD_RESULT_SUCCESS;
4292 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4293 ret = reply((struct ib_mad_hdr *)pmp);
4300 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
4301 u8 port, const struct ib_wc *in_wc,
4302 const struct ib_grh *in_grh,
4303 const struct opa_mad *in_mad,
4304 struct opa_mad *out_mad, size_t *out_mad_size,
4305 u16 *out_mad_pkey_index)
4310 struct hfi1_ibport *ibp = to_iport(ibdev, port);
4312 pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4314 pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4315 hfi1_get_pkey(ibp, 1));
4318 *out_mad_pkey_index = (u16)pkey_idx;
4320 switch (in_mad->mad_hdr.mgmt_class) {
4321 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4322 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4323 if (is_local_mad(ibp, in_mad, in_wc)) {
4324 ret = opa_local_smp_check(ibp, in_wc);
4326 return IB_MAD_RESULT_FAILURE;
4328 ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4329 out_mad, &resp_len);
4331 case IB_MGMT_CLASS_PERF_MGMT:
4332 ret = process_perf_opa(ibdev, port, in_mad, out_mad,
4337 ret = IB_MAD_RESULT_SUCCESS;
4341 if (ret & IB_MAD_RESULT_REPLY)
4342 *out_mad_size = round_up(resp_len, 8);
4343 else if (ret & IB_MAD_RESULT_SUCCESS)
4344 *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4349 static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4350 const struct ib_wc *in_wc,
4351 const struct ib_grh *in_grh,
4352 const struct ib_mad *in_mad,
4353 struct ib_mad *out_mad)
4357 switch (in_mad->mad_hdr.mgmt_class) {
4358 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4359 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4360 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4362 case IB_MGMT_CLASS_PERF_MGMT:
4363 ret = process_perf(ibdev, port, in_mad, out_mad);
4366 ret = IB_MAD_RESULT_SUCCESS;
4374 * hfi1_process_mad - process an incoming MAD packet
4375 * @ibdev: the infiniband device this packet came in on
4376 * @mad_flags: MAD flags
4377 * @port: the port number this packet came in on
4378 * @in_wc: the work completion entry for this packet
4379 * @in_grh: the global route header for this packet
4380 * @in_mad: the incoming MAD
4381 * @out_mad: any outgoing MAD reply
4383 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4384 * interested in processing.
4386 * Note that the verbs framework has already done the MAD sanity checks,
4387 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4390 * This is called by the ib_mad module.
4392 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4393 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4394 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4395 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4396 u16 *out_mad_pkey_index)
4398 switch (in_mad->base_version) {
4399 case OPA_MGMT_BASE_VERSION:
4400 if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4401 dev_err(ibdev->dma_device, "invalid in_mad_size\n");
4402 return IB_MAD_RESULT_FAILURE;
4404 return hfi1_process_opa_mad(ibdev, mad_flags, port,
4406 (struct opa_mad *)in_mad,
4407 (struct opa_mad *)out_mad,
4409 out_mad_pkey_index);
4410 case IB_MGMT_BASE_VERSION:
4411 return hfi1_process_ib_mad(ibdev, mad_flags, port,
4413 (const struct ib_mad *)in_mad,
4414 (struct ib_mad *)out_mad);
4419 return IB_MAD_RESULT_FAILURE;