2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <rdma/mlx5-abi.h>
40 MLX5_CYCLES_SHIFT = 23
44 MLX5_PIN_MODE_IN = 0x0,
45 MLX5_PIN_MODE_OUT = 0x1,
49 MLX5_OUT_PATTERN_PULSE = 0x0,
50 MLX5_OUT_PATTERN_PERIODIC = 0x1,
54 MLX5_EVENT_MODE_DISABLE = 0x0,
55 MLX5_EVENT_MODE_REPETETIVE = 0x1,
56 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
60 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
61 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
62 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
63 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
64 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
65 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
68 static u64 read_internal_timer(const struct cyclecounter *cc)
70 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
71 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
74 return mlx5_read_internal_timer(mdev) & cc->mask;
77 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
79 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
80 struct mlx5_clock *clock = &mdev->clock;
86 sign = smp_load_acquire(&clock_info->sign);
87 smp_store_mb(clock_info->sign,
88 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
90 clock_info->cycles = clock->tc.cycle_last;
91 clock_info->mult = clock->cycles.mult;
92 clock_info->nsec = clock->tc.nsec;
93 clock_info->frac = clock->tc.frac;
95 smp_store_release(&clock_info->sign,
96 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
99 static void mlx5_pps_out(struct work_struct *work)
101 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
103 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
105 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
107 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
111 for (i = 0; i < clock->ptp_info.n_pins; i++) {
114 write_lock_irqsave(&clock->lock, flags);
115 tstart = clock->pps_info.start[i];
116 clock->pps_info.start[i] = 0;
117 write_unlock_irqrestore(&clock->lock, flags);
121 MLX5_SET(mtpps_reg, in, pin, i);
122 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
123 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
124 mlx5_set_mtpps(mdev, in, sizeof(in));
128 static void mlx5_timestamp_overflow(struct work_struct *work)
130 struct delayed_work *dwork = to_delayed_work(work);
131 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
135 write_lock_irqsave(&clock->lock, flags);
136 timecounter_read(&clock->tc);
137 mlx5_update_clock_info_page(clock->mdev);
138 write_unlock_irqrestore(&clock->lock, flags);
139 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
142 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
143 const struct timespec64 *ts)
145 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
147 u64 ns = timespec64_to_ns(ts);
150 write_lock_irqsave(&clock->lock, flags);
151 timecounter_init(&clock->tc, &clock->cycles, ns);
152 mlx5_update_clock_info_page(clock->mdev);
153 write_unlock_irqrestore(&clock->lock, flags);
158 static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
160 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
165 write_lock_irqsave(&clock->lock, flags);
166 ns = timecounter_read(&clock->tc);
167 write_unlock_irqrestore(&clock->lock, flags);
169 *ts = ns_to_timespec64(ns);
174 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
176 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
180 write_lock_irqsave(&clock->lock, flags);
181 timecounter_adjtime(&clock->tc, delta);
182 mlx5_update_clock_info_page(clock->mdev);
183 write_unlock_irqrestore(&clock->lock, flags);
188 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
194 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
202 adj = clock->nominal_c_mult;
204 diff = div_u64(adj, 1000000000ULL);
206 write_lock_irqsave(&clock->lock, flags);
207 timecounter_read(&clock->tc);
208 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
209 clock->nominal_c_mult + diff;
210 mlx5_update_clock_info_page(clock->mdev);
211 write_unlock_irqrestore(&clock->lock, flags);
216 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
217 struct ptp_clock_request *rq,
220 struct mlx5_clock *clock =
221 container_of(ptp, struct mlx5_clock, ptp_info);
222 struct mlx5_core_dev *mdev =
223 container_of(clock, struct mlx5_core_dev, clock);
224 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
225 u32 field_select = 0;
231 if (!MLX5_PPS_CAP(mdev))
234 if (rq->extts.index >= clock->ptp_info.n_pins)
238 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
241 pin_mode = MLX5_PIN_MODE_IN;
242 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
243 field_select = MLX5_MTPPS_FS_PIN_MODE |
244 MLX5_MTPPS_FS_PATTERN |
245 MLX5_MTPPS_FS_ENABLE;
247 pin = rq->extts.index;
248 field_select = MLX5_MTPPS_FS_ENABLE;
251 MLX5_SET(mtpps_reg, in, pin, pin);
252 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
253 MLX5_SET(mtpps_reg, in, pattern, pattern);
254 MLX5_SET(mtpps_reg, in, enable, on);
255 MLX5_SET(mtpps_reg, in, field_select, field_select);
257 err = mlx5_set_mtpps(mdev, in, sizeof(in));
261 return mlx5_set_mtppse(mdev, pin, 0,
262 MLX5_EVENT_MODE_REPETETIVE & on);
265 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
266 struct ptp_clock_request *rq,
269 struct mlx5_clock *clock =
270 container_of(ptp, struct mlx5_clock, ptp_info);
271 struct mlx5_core_dev *mdev =
272 container_of(clock, struct mlx5_core_dev, clock);
273 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
274 u64 nsec_now, nsec_delta, time_stamp = 0;
275 u64 cycles_now, cycles_delta;
276 struct timespec64 ts;
278 u32 field_select = 0;
285 if (!MLX5_PPS_CAP(mdev))
288 if (rq->perout.index >= clock->ptp_info.n_pins)
292 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
297 pin_mode = MLX5_PIN_MODE_OUT;
298 pattern = MLX5_OUT_PATTERN_PERIODIC;
299 ts.tv_sec = rq->perout.period.sec;
300 ts.tv_nsec = rq->perout.period.nsec;
301 ns = timespec64_to_ns(&ts);
303 if ((ns >> 1) != 500000000LL)
306 ts.tv_sec = rq->perout.start.sec;
307 ts.tv_nsec = rq->perout.start.nsec;
308 ns = timespec64_to_ns(&ts);
309 cycles_now = mlx5_read_internal_timer(mdev);
310 write_lock_irqsave(&clock->lock, flags);
311 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
312 nsec_delta = ns - nsec_now;
313 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
315 write_unlock_irqrestore(&clock->lock, flags);
316 time_stamp = cycles_now + cycles_delta;
317 field_select = MLX5_MTPPS_FS_PIN_MODE |
318 MLX5_MTPPS_FS_PATTERN |
319 MLX5_MTPPS_FS_ENABLE |
320 MLX5_MTPPS_FS_TIME_STAMP;
322 pin = rq->perout.index;
323 field_select = MLX5_MTPPS_FS_ENABLE;
326 MLX5_SET(mtpps_reg, in, pin, pin);
327 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
328 MLX5_SET(mtpps_reg, in, pattern, pattern);
329 MLX5_SET(mtpps_reg, in, enable, on);
330 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
331 MLX5_SET(mtpps_reg, in, field_select, field_select);
333 err = mlx5_set_mtpps(mdev, in, sizeof(in));
337 return mlx5_set_mtppse(mdev, pin, 0,
338 MLX5_EVENT_MODE_REPETETIVE & on);
341 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
342 struct ptp_clock_request *rq,
345 struct mlx5_clock *clock =
346 container_of(ptp, struct mlx5_clock, ptp_info);
348 clock->pps_info.enabled = !!on;
352 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
353 struct ptp_clock_request *rq,
357 case PTP_CLK_REQ_EXTTS:
358 return mlx5_extts_configure(ptp, rq, on);
359 case PTP_CLK_REQ_PEROUT:
360 return mlx5_perout_configure(ptp, rq, on);
361 case PTP_CLK_REQ_PPS:
362 return mlx5_pps_configure(ptp, rq, on);
370 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
371 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
374 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
375 enum ptp_pin_function func, unsigned int chan)
377 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
384 return !(clock->pps_info.pin_caps[pin] &
385 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
387 return !(clock->pps_info.pin_caps[pin] &
388 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
396 static const struct ptp_clock_info mlx5_ptp_clock_info = {
397 .owner = THIS_MODULE,
399 .max_adj = 100000000,
405 .adjfreq = mlx5_ptp_adjfreq,
406 .adjtime = mlx5_ptp_adjtime,
407 .gettime64 = mlx5_ptp_gettime,
408 .settime64 = mlx5_ptp_settime,
413 static int mlx5_init_pin_config(struct mlx5_clock *clock)
417 clock->ptp_info.pin_config =
418 kcalloc(clock->ptp_info.n_pins,
419 sizeof(*clock->ptp_info.pin_config),
421 if (!clock->ptp_info.pin_config)
423 clock->ptp_info.enable = mlx5_ptp_enable;
424 clock->ptp_info.verify = mlx5_ptp_verify;
425 clock->ptp_info.pps = 1;
427 for (i = 0; i < clock->ptp_info.n_pins; i++) {
428 snprintf(clock->ptp_info.pin_config[i].name,
429 sizeof(clock->ptp_info.pin_config[i].name),
431 clock->ptp_info.pin_config[i].index = i;
432 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
433 clock->ptp_info.pin_config[i].chan = i;
439 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
441 struct mlx5_clock *clock = &mdev->clock;
442 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
444 mlx5_query_mtpps(mdev, out, sizeof(out));
446 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
447 cap_number_of_pps_pins);
448 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
449 cap_max_num_of_pps_in_pins);
450 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
451 cap_max_num_of_pps_out_pins);
453 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
454 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
455 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
456 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
457 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
458 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
459 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
460 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
463 void mlx5_pps_event(struct mlx5_core_dev *mdev,
464 struct mlx5_eqe *eqe)
466 struct mlx5_clock *clock = &mdev->clock;
467 struct ptp_clock_event ptp_event;
468 struct timespec64 ts;
469 u64 nsec_now, nsec_delta;
470 u64 cycles_now, cycles_delta;
471 int pin = eqe->data.pps.pin;
475 switch (clock->ptp_info.pin_config[pin].func) {
477 ptp_event.index = pin;
478 ptp_event.timestamp =
479 mlx5_timecounter_cyc2time(clock,
480 be64_to_cpu(eqe->data.pps.time_stamp));
481 if (clock->pps_info.enabled) {
482 ptp_event.type = PTP_CLOCK_PPSUSR;
483 ptp_event.pps_times.ts_real =
484 ns_to_timespec64(ptp_event.timestamp);
486 ptp_event.type = PTP_CLOCK_EXTTS;
488 ptp_clock_event(clock->ptp, &ptp_event);
491 mlx5_ptp_gettime(&clock->ptp_info, &ts);
492 cycles_now = mlx5_read_internal_timer(mdev);
495 ns = timespec64_to_ns(&ts);
496 write_lock_irqsave(&clock->lock, flags);
497 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
498 nsec_delta = ns - nsec_now;
499 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
501 clock->pps_info.start[pin] = cycles_now + cycles_delta;
502 schedule_work(&clock->pps_info.out_work);
503 write_unlock_irqrestore(&clock->lock, flags);
506 mlx5_core_err(mdev, " Unhandled event\n");
510 void mlx5_init_clock(struct mlx5_core_dev *mdev)
512 struct mlx5_clock *clock = &mdev->clock;
518 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
520 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
523 rwlock_init(&clock->lock);
524 clock->cycles.read = read_internal_timer;
525 clock->cycles.shift = MLX5_CYCLES_SHIFT;
526 clock->cycles.mult = clocksource_khz2mult(dev_freq,
527 clock->cycles.shift);
528 clock->nominal_c_mult = clock->cycles.mult;
529 clock->cycles.mask = CLOCKSOURCE_MASK(41);
532 timecounter_init(&clock->tc, &clock->cycles,
533 ktime_to_ns(ktime_get_real()));
535 /* Calculate period in seconds to call the overflow watchdog - to make
536 * sure counter is checked at least twice every wrap around.
537 * The period is calculated as the minimum between max HW cycles count
538 * (The clock source mask) and max amount of cycles that can be
539 * multiplied by clock multiplier where the result doesn't exceed
542 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
543 overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
545 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
547 do_div(ns, NSEC_PER_SEC / HZ);
548 clock->overflow_period = ns;
550 mdev->clock_info_page = alloc_page(GFP_KERNEL);
551 if (mdev->clock_info_page) {
552 mdev->clock_info = kmap(mdev->clock_info_page);
553 if (!mdev->clock_info) {
554 __free_page(mdev->clock_info_page);
555 mlx5_core_warn(mdev, "failed to map clock page\n");
557 mdev->clock_info->sign = 0;
558 mdev->clock_info->nsec = clock->tc.nsec;
559 mdev->clock_info->cycles = clock->tc.cycle_last;
560 mdev->clock_info->mask = clock->cycles.mask;
561 mdev->clock_info->mult = clock->nominal_c_mult;
562 mdev->clock_info->shift = clock->cycles.shift;
563 mdev->clock_info->frac = clock->tc.frac;
564 mdev->clock_info->overflow_period =
565 clock->overflow_period;
569 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
570 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
571 if (clock->overflow_period)
572 schedule_delayed_work(&clock->overflow_work, 0);
574 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
576 /* Configure the PHC */
577 clock->ptp_info = mlx5_ptp_clock_info;
579 /* Initialize 1PPS data structures */
580 if (MLX5_PPS_CAP(mdev))
581 mlx5_get_pps_caps(mdev);
582 if (clock->ptp_info.n_pins)
583 mlx5_init_pin_config(clock);
585 clock->ptp = ptp_clock_register(&clock->ptp_info,
587 if (IS_ERR(clock->ptp)) {
588 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
589 PTR_ERR(clock->ptp));
594 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
596 struct mlx5_clock *clock = &mdev->clock;
598 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
602 ptp_clock_unregister(clock->ptp);
606 cancel_work_sync(&clock->pps_info.out_work);
607 cancel_delayed_work_sync(&clock->overflow_work);
609 if (mdev->clock_info) {
610 kunmap(mdev->clock_info_page);
611 __free_page(mdev->clock_info_page);
612 mdev->clock_info = NULL;
615 kfree(clock->ptp_info.pin_config);