2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/delay.h>
35 #include <linux/pci.h>
36 #include <linux/vmalloc.h>
42 * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
45 #define QSFP_MAX_RETRY 4
47 static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
49 struct qib_devdata *dd = ppd->dd;
51 int ret, cnt, pass = 0;
55 ret = mutex_lock_interruptible(&dd->eep_lock);
59 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
65 * We presume, if we are called at all, that this board has
66 * QSFP. This is on the same i2c chain as the legacy parts,
67 * but only responds if the module is selected via GPIO pins.
68 * Further, there are very long setup and hold requirements
71 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
72 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
74 mask <<= QSFP_GPIO_PORT2_SHIFT;
75 out <<= QSFP_GPIO_PORT2_SHIFT;
78 dd->f_gpio_mod(dd, out, mask, mask);
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait.
86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd);
89 qib_dev_porterr(dd, ppd->port,
90 "QSFP interface Reset for read failed\n");
96 /* All QSFP modules are at A0 */
101 int wlen = len - cnt;
103 in_page = addr % QSFP_PAGESIZE;
104 if ((in_page + wlen) > QSFP_PAGESIZE)
105 wlen = QSFP_PAGESIZE - in_page;
106 ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
107 /* Some QSFP's fail first try. Retry as experiment */
108 if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
111 /* qib_twsi_blk_rd() 1 for error, else 0 */
122 * Module could take up to 10 uSec after transfer before
123 * ready to respond to MOD_SEL negation, and there is no way
124 * to tell if it is ready, so we must wait.
127 /* set QSFP MODSEL, RST. LP all high */
128 dd->f_gpio_mod(dd, mask, mask, mask);
131 * Module could take up to 2 Msec to respond to MOD_SEL
132 * going away, and there is no way to tell if it is ready.
136 qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
138 if (pass >= QSFP_MAX_RETRY && ret)
139 qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
141 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
146 mutex_unlock(&dd->eep_lock);
154 * We do not ordinarily write the QSFP, but this is needed to select
155 * the page on non-flat QSFPs, and possibly later unusual cases
157 static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
160 struct qib_devdata *dd = ppd->dd;
165 ret = mutex_lock_interruptible(&dd->eep_lock);
169 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
175 * We presume, if we are called at all, that this board has
176 * QSFP. This is on the same i2c chain as the legacy parts,
177 * but only responds if the module is selected via GPIO pins.
178 * Further, there are very long setup and hold requirements
181 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
182 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
184 mask <<= QSFP_GPIO_PORT2_SHIFT;
185 out <<= QSFP_GPIO_PORT2_SHIFT;
187 dd->f_gpio_mod(dd, out, mask, mask);
190 * Module could take up to 2 Msec to respond to MOD_SEL,
191 * and there is no way to tell if it is ready, so we must wait.
195 /* Make sure TWSI bus is in sane state. */
196 ret = qib_twsi_reset(dd);
198 qib_dev_porterr(dd, ppd->port,
199 "QSFP interface Reset for write failed\n");
204 /* All QSFP modules are at A0 */
209 int wlen = len - cnt;
211 in_page = addr % QSFP_PAGESIZE;
212 if ((in_page + wlen) > QSFP_PAGESIZE)
213 wlen = QSFP_PAGESIZE - in_page;
214 ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
216 /* qib_twsi_blk_wr() 1 for error, else 0 */
227 * Module could take up to 10 uSec after transfer before
228 * ready to respond to MOD_SEL negation, and there is no way
229 * to tell if it is ready, so we must wait.
232 /* set QSFP MODSEL, RST, LP high */
233 dd->f_gpio_mod(dd, mask, mask, mask);
235 * Module could take up to 2 Msec to respond to MOD_SEL
236 * going away, and there is no way to tell if it is ready.
242 mutex_unlock(&dd->eep_lock);
249 * For validation, we want to check the checksums, even of the
250 * fields we do not otherwise use. This function reads the bytes from
251 * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
253 static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
260 while (first < next) {
261 ret = qsfp_read(ppd, first, &bval, 1);
273 int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
280 /* ensure sane contents on invalid reads, for cable swaps */
281 memset(cp, 0, sizeof(*cp));
283 if (!qib_qsfp_mod_present(ppd)) {
288 ret = qsfp_read(ppd, 0, peek, 3);
291 if ((peek[0] & 0xFE) != 0x0C)
292 qib_dev_porterr(ppd->dd, ppd->port,
293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
295 if ((peek[2] & 4) == 0) {
297 * If cable is paged, rather than "flat memory", we need to
298 * set the page to zero, Even if it already appears to be zero.
302 ret = qib_qsfp_write(ppd, 127, &poke, 1);
305 qib_dev_porterr(ppd->dd, ppd->port,
306 "Failed QSFP Page set\n");
311 ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
314 if ((cp->id & 0xFE) != 0x0C)
315 qib_dev_porterr(ppd->dd, ppd->port,
316 "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
319 ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
324 ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
329 ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
334 ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
339 ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
342 for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
343 cks += cp->vendor[idx];
345 ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
350 ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
353 for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
356 ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
359 for (idx = 0; idx < QSFP_PN_LEN; ++idx)
360 cks += cp->partnum[idx];
362 ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
365 for (idx = 0; idx < QSFP_REV_LEN; ++idx)
368 ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
371 for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
372 cks += cp->atten[idx];
374 ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
380 ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
384 qib_dev_porterr(ppd->dd, ppd->port,
385 "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
388 /* Second checksum covers 192 to (serial, date, lot) */
389 ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
394 ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
397 for (idx = 0; idx < QSFP_SN_LEN; ++idx)
398 cks += cp->serial[idx];
400 ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
403 for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
404 cks += cp->date[idx];
406 ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
409 for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
412 ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
417 ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
422 qib_dev_porterr(ppd->dd, ppd->port,
423 "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
432 const char * const qib_qsfp_devtech[16] = {
433 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
434 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
435 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
436 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
439 #define QSFP_DUMP_CHUNK 16 /* Holds longest string */
440 #define QSFP_DEFAULT_HDR_CNT 224
442 static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
444 int qib_qsfp_mod_present(struct qib_pportdata *ppd)
449 mask = QSFP_GPIO_MOD_PRS_N <<
450 (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
451 ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
453 return !((ret & mask) >>
454 ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
458 * Initialize structures that control access to QSFP. Called once per port
459 * on cards that support QSFP.
461 void qib_qsfp_init(struct qib_qsfp_data *qd,
462 void (*fevent)(struct work_struct *))
466 struct qib_devdata *dd = qd->ppd->dd;
468 /* Initialize work struct for later QSFP events */
469 INIT_WORK(&qd->work, fevent);
472 * Later, we may want more validation. For now, just set up pins and
473 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
474 * to do further init.
476 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
477 highs = mask - QSFP_GPIO_MOD_RST_N;
478 if (qd->ppd->hw_pidx) {
479 mask <<= QSFP_GPIO_PORT2_SHIFT;
480 highs <<= QSFP_GPIO_PORT2_SHIFT;
482 dd->f_gpio_mod(dd, highs, mask, mask);
483 udelay(20); /* Generous RST dwell */
485 dd->f_gpio_mod(dd, mask, mask, mask);
488 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
491 * There is nothing to do here for now. our work is scheduled
492 * with queue_work(), and flush_workqueue() from remove_one
493 * will block until all work setup with queue_work()
498 int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
500 struct qib_qsfp_cache cd;
501 u8 bin_buff[QSFP_DUMP_CHUNK];
507 ret = qib_refresh_qsfp_cache(ppd, &cd);
513 if (QSFP_IS_CU(cd.tech))
514 sprintf(lenstr, "%dM ", cd.len);
516 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
517 (QSFP_PWR(cd.pwr) * 4));
519 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
520 qib_qsfp_devtech[cd.tech >> 4]);
522 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
523 QSFP_VEND_LEN, cd.vendor);
525 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
528 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
529 QSFP_PN_LEN, cd.partnum);
530 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
531 QSFP_REV_LEN, cd.rev);
532 if (QSFP_IS_CU(cd.tech))
533 sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
534 QSFP_ATTEN_SDR(cd.atten),
535 QSFP_ATTEN_DDR(cd.atten));
536 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
537 QSFP_SN_LEN, cd.serial);
538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
539 QSFP_DATE_LEN, cd.date);
540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
541 QSFP_LOT_LEN, cd.lot);
543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
546 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
549 for (iidx = 0; iidx < ret; ++iidx) {
550 sofar += scnprintf(buf + sofar, len-sofar, " %02X",
553 sofar += scnprintf(buf + sofar, len - sofar, "\n");
554 bidx += QSFP_DUMP_CHUNK;