1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/log2.h>
36 #include <linux/ptp_clock_kernel.h>
37 #include <rdma/mlx5-abi.h>
38 #include "lib/eq.h"
39 #include "en.h"
40 #include "clock.h"
41 #ifdef CONFIG_X86
42 #include <linux/timekeeping.h>
43 #include <linux/cpufeature.h>
44 #endif /* CONFIG_X86 */
45 
46 enum {
47 	MLX5_PIN_MODE_IN		= 0x0,
48 	MLX5_PIN_MODE_OUT		= 0x1,
49 };
50 
51 enum {
52 	MLX5_OUT_PATTERN_PULSE		= 0x0,
53 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
54 };
55 
56 enum {
57 	MLX5_EVENT_MODE_DISABLE	= 0x0,
58 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
59 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
60 };
61 
62 enum {
63 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
64 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
65 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
66 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
67 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
68 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
69 	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
70 	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
71 };
72 
73 enum {
74 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN          = S16_MIN,
75 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX          = S16_MAX,
76 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
77 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
78 };
79 
mlx5_real_time_mode(struct mlx5_core_dev * mdev)80 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
81 {
82 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
83 }
84 
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)85 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
86 {
87 	return (mlx5_real_time_mode(mdev) &&
88 		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
89 		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
90 }
91 
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)92 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
93 {
94 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
95 }
96 
mlx5_ptp_shift_constant(u32 dev_freq_khz)97 static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
98 {
99 	/* Optimal shift constant leads to corrections above just 1 scaled ppm.
100 	 *
101 	 * Two sets of equations are needed to derive the optimal shift
102 	 * constant for the cyclecounter.
103 	 *
104 	 *    dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
105 	 *    ppb = scaled_ppm * 1000 / 2^16
106 	 *
107 	 * Using the two equations together
108 	 *
109 	 *    dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
110 	 *    dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
111 	 *    dev_freq_khz = 2^(shift_constant - 16)
112 	 *
113 	 * then yields
114 	 *
115 	 *    shift_constant = ilog2(dev_freq_khz) + 16
116 	 */
117 
118 	return min(ilog2(dev_freq_khz) + 16,
119 		   ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
120 }
121 
mlx5_ptp_getmaxphase(struct ptp_clock_info * ptp)122 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
123 {
124 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
125 	struct mlx5_core_dev *mdev;
126 
127 	mdev = container_of(clock, struct mlx5_core_dev, clock);
128 
129 	return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
130 		       MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
131 			     MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
132 }
133 
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)134 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
135 {
136 	s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
137 
138 	if (delta < -max || delta > max)
139 		return false;
140 
141 	return true;
142 }
143 
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)144 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
145 {
146 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
147 
148 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
149 		return -EOPNOTSUPP;
150 
151 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
152 				    MLX5_REG_MTUTC, 0, 1);
153 }
154 
155 #ifdef CONFIG_X86
mlx5_is_ptm_source_time_available(struct mlx5_core_dev * dev)156 static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
157 {
158 	u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
159 	u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
160 	int err;
161 
162 	if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
163 		return false;
164 
165 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
166 				   0, 0);
167 	if (err)
168 		return false;
169 
170 	return !!MLX5_GET(mtptm_reg, out, psta);
171 }
172 
mlx5_mtctr_syncdevicetime(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx)173 static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
174 				     struct system_counterval_t *sys_counterval,
175 				     void *ctx)
176 {
177 	u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
178 	u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
179 	struct mlx5_core_dev *mdev = ctx;
180 	bool real_time_mode;
181 	u64 host, device;
182 	int err;
183 
184 	real_time_mode = mlx5_real_time_mode(mdev);
185 
186 	MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
187 		 MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
188 	MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
189 		 real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
190 		 MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
191 
192 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
193 				   0, 0);
194 	if (err)
195 		return err;
196 
197 	if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
198 	    !MLX5_GET(mtctr_reg, out, second_clock_valid))
199 		return -EINVAL;
200 
201 	host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
202 	*sys_counterval = (struct system_counterval_t) {
203 			.cycles = host,
204 			.cs_id = CSID_X86_ART,
205 			.use_nsecs = true,
206 	};
207 
208 	device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
209 	if (real_time_mode)
210 		*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
211 	else
212 		*device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
213 
214 	return 0;
215 }
216 
mlx5_ptp_getcrosststamp(struct ptp_clock_info * ptp,struct system_device_crosststamp * cts)217 static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
218 				   struct system_device_crosststamp *cts)
219 {
220 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
221 	struct system_time_snapshot history_begin = {0};
222 	struct mlx5_core_dev *mdev;
223 
224 	mdev = container_of(clock, struct mlx5_core_dev, clock);
225 
226 	if (!mlx5_is_ptm_source_time_available(mdev))
227 		return -EBUSY;
228 
229 	ktime_get_snapshot(&history_begin);
230 
231 	return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
232 					     &history_begin, cts);
233 }
234 #endif /* CONFIG_X86 */
235 
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)236 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
237 			  struct ptp_system_timestamp *sts,
238 			  bool real_time)
239 {
240 	u32 timer_h, timer_h1, timer_l;
241 
242 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
243 			     &dev->iseg->internal_timer_h);
244 	ptp_read_system_prets(sts);
245 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
246 			     &dev->iseg->internal_timer_l);
247 	ptp_read_system_postts(sts);
248 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
249 			      &dev->iseg->internal_timer_h);
250 	if (timer_h != timer_h1) {
251 		/* wrap around */
252 		ptp_read_system_prets(sts);
253 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
254 				     &dev->iseg->internal_timer_l);
255 		ptp_read_system_postts(sts);
256 	}
257 
258 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
259 			   (u64)timer_l | (u64)timer_h1 << 32;
260 }
261 
read_internal_timer(const struct cyclecounter * cc)262 static u64 read_internal_timer(const struct cyclecounter *cc)
263 {
264 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
265 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
266 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
267 						  clock);
268 
269 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
270 }
271 
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)272 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
273 {
274 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
275 	struct mlx5_clock *clock = &mdev->clock;
276 	struct mlx5_timer *timer;
277 	u32 sign;
278 
279 	if (!clock_info)
280 		return;
281 
282 	sign = smp_load_acquire(&clock_info->sign);
283 	smp_store_mb(clock_info->sign,
284 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
285 
286 	timer = &clock->timer;
287 	clock_info->cycles = timer->tc.cycle_last;
288 	clock_info->mult   = timer->cycles.mult;
289 	clock_info->nsec   = timer->tc.nsec;
290 	clock_info->frac   = timer->tc.frac;
291 
292 	smp_store_release(&clock_info->sign,
293 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
294 }
295 
mlx5_pps_out(struct work_struct * work)296 static void mlx5_pps_out(struct work_struct *work)
297 {
298 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
299 						 out_work);
300 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
301 						pps_info);
302 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
303 						  clock);
304 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
305 	unsigned long flags;
306 	int i;
307 
308 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
309 		u64 tstart;
310 
311 		write_seqlock_irqsave(&clock->lock, flags);
312 		tstart = clock->pps_info.start[i];
313 		clock->pps_info.start[i] = 0;
314 		write_sequnlock_irqrestore(&clock->lock, flags);
315 		if (!tstart)
316 			continue;
317 
318 		MLX5_SET(mtpps_reg, in, pin, i);
319 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
320 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
321 		mlx5_set_mtpps(mdev, in, sizeof(in));
322 	}
323 }
324 
mlx5_timestamp_overflow(struct work_struct * work)325 static void mlx5_timestamp_overflow(struct work_struct *work)
326 {
327 	struct delayed_work *dwork = to_delayed_work(work);
328 	struct mlx5_core_dev *mdev;
329 	struct mlx5_timer *timer;
330 	struct mlx5_clock *clock;
331 	unsigned long flags;
332 
333 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
334 	clock = container_of(timer, struct mlx5_clock, timer);
335 	mdev = container_of(clock, struct mlx5_core_dev, clock);
336 
337 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
338 		goto out;
339 
340 	write_seqlock_irqsave(&clock->lock, flags);
341 	timecounter_read(&timer->tc);
342 	mlx5_update_clock_info_page(mdev);
343 	write_sequnlock_irqrestore(&clock->lock, flags);
344 
345 out:
346 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
347 }
348 
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)349 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
350 				      const struct timespec64 *ts)
351 {
352 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
353 
354 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
355 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
356 		return -EINVAL;
357 
358 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
359 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
360 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
361 
362 	return mlx5_set_mtutc(mdev, in, sizeof(in));
363 }
364 
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)365 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
366 {
367 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
368 	struct mlx5_timer *timer = &clock->timer;
369 	struct mlx5_core_dev *mdev;
370 	unsigned long flags;
371 
372 	mdev = container_of(clock, struct mlx5_core_dev, clock);
373 
374 	if (mlx5_modify_mtutc_allowed(mdev)) {
375 		int err = mlx5_ptp_settime_real_time(mdev, ts);
376 
377 		if (err)
378 			return err;
379 	}
380 
381 	write_seqlock_irqsave(&clock->lock, flags);
382 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
383 	mlx5_update_clock_info_page(mdev);
384 	write_sequnlock_irqrestore(&clock->lock, flags);
385 
386 	return 0;
387 }
388 
389 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)390 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
391 					      struct ptp_system_timestamp *sts)
392 {
393 	struct timespec64 ts;
394 	u64 time;
395 
396 	time = mlx5_read_time(mdev, sts, true);
397 	ts = ns_to_timespec64(time);
398 	return ts;
399 }
400 
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)401 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
402 			     struct ptp_system_timestamp *sts)
403 {
404 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
405 	struct mlx5_timer *timer = &clock->timer;
406 	struct mlx5_core_dev *mdev;
407 	unsigned long flags;
408 	u64 cycles, ns;
409 
410 	mdev = container_of(clock, struct mlx5_core_dev, clock);
411 	if (mlx5_real_time_mode(mdev)) {
412 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
413 		goto out;
414 	}
415 
416 	write_seqlock_irqsave(&clock->lock, flags);
417 	cycles = mlx5_read_time(mdev, sts, false);
418 	ns = timecounter_cyc2time(&timer->tc, cycles);
419 	write_sequnlock_irqrestore(&clock->lock, flags);
420 	*ts = ns_to_timespec64(ns);
421 out:
422 	return 0;
423 }
424 
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)425 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
426 {
427 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
428 
429 	/* HW time adjustment range is checked. If out of range, settime instead */
430 	if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
431 		struct timespec64 ts;
432 		s64 ns;
433 
434 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
435 		ns = timespec64_to_ns(&ts) + delta;
436 		ts = ns_to_timespec64(ns);
437 		return mlx5_ptp_settime_real_time(mdev, &ts);
438 	}
439 
440 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
441 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
442 
443 	return mlx5_set_mtutc(mdev, in, sizeof(in));
444 }
445 
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)446 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
447 {
448 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
449 	struct mlx5_timer *timer = &clock->timer;
450 	struct mlx5_core_dev *mdev;
451 	unsigned long flags;
452 
453 	mdev = container_of(clock, struct mlx5_core_dev, clock);
454 
455 	if (mlx5_modify_mtutc_allowed(mdev)) {
456 		int err = mlx5_ptp_adjtime_real_time(mdev, delta);
457 
458 		if (err)
459 			return err;
460 	}
461 
462 	write_seqlock_irqsave(&clock->lock, flags);
463 	timecounter_adjtime(&timer->tc, delta);
464 	mlx5_update_clock_info_page(mdev);
465 	write_sequnlock_irqrestore(&clock->lock, flags);
466 
467 	return 0;
468 }
469 
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)470 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
471 {
472 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
473 	struct mlx5_core_dev *mdev;
474 
475 	mdev = container_of(clock, struct mlx5_core_dev, clock);
476 
477 	return mlx5_ptp_adjtime_real_time(mdev, delta);
478 }
479 
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)480 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
481 {
482 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
483 
484 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
485 
486 	if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
487 	    scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
488 		/* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
489 		MLX5_SET(mtutc_reg, in, freq_adj_units,
490 			 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
491 		MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
492 	} else {
493 		MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
494 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
495 	}
496 
497 	return mlx5_set_mtutc(mdev, in, sizeof(in));
498 }
499 
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)500 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
501 {
502 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
503 	struct mlx5_timer *timer = &clock->timer;
504 	struct mlx5_core_dev *mdev;
505 	unsigned long flags;
506 	u32 mult;
507 
508 	mdev = container_of(clock, struct mlx5_core_dev, clock);
509 
510 	if (mlx5_modify_mtutc_allowed(mdev)) {
511 		int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
512 
513 		if (err)
514 			return err;
515 	}
516 
517 	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
518 
519 	write_seqlock_irqsave(&clock->lock, flags);
520 	timecounter_read(&timer->tc);
521 	timer->cycles.mult = mult;
522 	mlx5_update_clock_info_page(mdev);
523 	write_sequnlock_irqrestore(&clock->lock, flags);
524 
525 	return 0;
526 }
527 
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)528 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
529 				struct ptp_clock_request *rq,
530 				int on)
531 {
532 	struct mlx5_clock *clock =
533 			container_of(ptp, struct mlx5_clock, ptp_info);
534 	struct mlx5_core_dev *mdev =
535 			container_of(clock, struct mlx5_core_dev, clock);
536 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
537 	u32 field_select = 0;
538 	u8 pin_mode = 0;
539 	u8 pattern = 0;
540 	int pin = -1;
541 	int err = 0;
542 
543 	if (!MLX5_PPS_CAP(mdev))
544 		return -EOPNOTSUPP;
545 
546 	/* Reject requests with unsupported flags */
547 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
548 				PTP_RISING_EDGE |
549 				PTP_FALLING_EDGE |
550 				PTP_STRICT_FLAGS))
551 		return -EOPNOTSUPP;
552 
553 	/* Reject requests to enable time stamping on both edges. */
554 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
555 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
556 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
557 		return -EOPNOTSUPP;
558 
559 	if (rq->extts.index >= clock->ptp_info.n_pins)
560 		return -EINVAL;
561 
562 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
563 	if (pin < 0)
564 		return -EBUSY;
565 
566 	if (on) {
567 		pin_mode = MLX5_PIN_MODE_IN;
568 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
569 		field_select = MLX5_MTPPS_FS_PIN_MODE |
570 			       MLX5_MTPPS_FS_PATTERN |
571 			       MLX5_MTPPS_FS_ENABLE;
572 	} else {
573 		field_select = MLX5_MTPPS_FS_ENABLE;
574 	}
575 
576 	MLX5_SET(mtpps_reg, in, pin, pin);
577 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
578 	MLX5_SET(mtpps_reg, in, pattern, pattern);
579 	MLX5_SET(mtpps_reg, in, enable, on);
580 	MLX5_SET(mtpps_reg, in, field_select, field_select);
581 
582 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
583 	if (err)
584 		return err;
585 
586 	return mlx5_set_mtppse(mdev, pin, 0,
587 			       MLX5_EVENT_MODE_REPETETIVE & on);
588 }
589 
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)590 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
591 {
592 	struct mlx5_clock *clock = &mdev->clock;
593 	u64 cycles_now, cycles_delta;
594 	u64 nsec_now, nsec_delta;
595 	struct mlx5_timer *timer;
596 	unsigned long flags;
597 
598 	timer = &clock->timer;
599 
600 	cycles_now = mlx5_read_time(mdev, NULL, false);
601 	write_seqlock_irqsave(&clock->lock, flags);
602 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
603 	nsec_delta = target_ns - nsec_now;
604 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
605 				 timer->cycles.mult);
606 	write_sequnlock_irqrestore(&clock->lock, flags);
607 
608 	return cycles_now + cycles_delta;
609 }
610 
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)611 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
612 {
613 	struct timespec64 ts = {};
614 	s64 target_ns;
615 
616 	ts.tv_sec = sec;
617 	target_ns = timespec64_to_ns(&ts);
618 
619 	return find_target_cycles(mdev, target_ns);
620 }
621 
perout_conf_real_time(s64 sec,u32 nsec)622 static u64 perout_conf_real_time(s64 sec, u32 nsec)
623 {
624 	return (u64)nsec | (u64)sec << 32;
625 }
626 
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)627 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
628 			    u64 *time_stamp, bool real_time)
629 {
630 	struct timespec64 ts;
631 	s64 ns;
632 
633 	ts.tv_nsec = rq->perout.period.nsec;
634 	ts.tv_sec = rq->perout.period.sec;
635 	ns = timespec64_to_ns(&ts);
636 
637 	if ((ns >> 1) != 500000000LL)
638 		return -EINVAL;
639 
640 	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
641 		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
642 
643 	return 0;
644 }
645 
646 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)647 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
648 					       struct ptp_clock_request *rq,
649 					       u32 *out_pulse_duration_ns)
650 {
651 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
652 	u32 out_pulse_duration;
653 	struct timespec64 ts;
654 
655 	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
656 		ts.tv_sec = rq->perout.on.sec;
657 		ts.tv_nsec = rq->perout.on.nsec;
658 		out_pulse_duration = (u32)timespec64_to_ns(&ts);
659 	} else {
660 		/* out_pulse_duration_ns should be up to 50% of the
661 		 * pulse period as default
662 		 */
663 		ts.tv_sec = rq->perout.period.sec;
664 		ts.tv_nsec = rq->perout.period.nsec;
665 		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
666 	}
667 
668 	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
669 	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
670 		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
671 			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
672 			      MLX5_MAX_PULSE_DURATION);
673 		return -EINVAL;
674 	}
675 	*out_pulse_duration_ns = out_pulse_duration;
676 
677 	return 0;
678 }
679 
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)680 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
681 				      u32 *field_select, u32 *out_pulse_duration_ns,
682 				      u64 *period, u64 *time_stamp)
683 {
684 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
685 	struct ptp_clock_time *time = &rq->perout.start;
686 	struct timespec64 ts;
687 
688 	ts.tv_sec = rq->perout.period.sec;
689 	ts.tv_nsec = rq->perout.period.nsec;
690 	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
691 		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
692 			      pps_info->min_npps_period);
693 		return -EINVAL;
694 	}
695 	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
696 
697 	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
698 		return -EINVAL;
699 
700 	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
701 	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
702 			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
703 
704 	return 0;
705 }
706 
mlx5_perout_verify_flags(struct mlx5_core_dev * mdev,unsigned int flags)707 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
708 {
709 	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
710 		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
711 }
712 
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)713 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
714 				 struct ptp_clock_request *rq,
715 				 int on)
716 {
717 	struct mlx5_clock *clock =
718 			container_of(ptp, struct mlx5_clock, ptp_info);
719 	struct mlx5_core_dev *mdev =
720 			container_of(clock, struct mlx5_core_dev, clock);
721 	bool rt_mode = mlx5_real_time_mode(mdev);
722 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
723 	u32 out_pulse_duration_ns = 0;
724 	u32 field_select = 0;
725 	u64 npps_period = 0;
726 	u64 time_stamp = 0;
727 	u8 pin_mode = 0;
728 	u8 pattern = 0;
729 	int pin = -1;
730 	int err = 0;
731 
732 	if (!MLX5_PPS_CAP(mdev))
733 		return -EOPNOTSUPP;
734 
735 	/* Reject requests with unsupported flags */
736 	if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
737 		return -EOPNOTSUPP;
738 
739 	if (rq->perout.index >= clock->ptp_info.n_pins)
740 		return -EINVAL;
741 
742 	field_select = MLX5_MTPPS_FS_ENABLE;
743 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
744 	if (pin < 0)
745 		return -EBUSY;
746 
747 	if (on) {
748 		bool rt_mode = mlx5_real_time_mode(mdev);
749 
750 		pin_mode = MLX5_PIN_MODE_OUT;
751 		pattern = MLX5_OUT_PATTERN_PERIODIC;
752 
753 		if (rt_mode &&  rq->perout.start.sec > U32_MAX)
754 			return -EINVAL;
755 
756 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
757 				MLX5_MTPPS_FS_PATTERN |
758 				MLX5_MTPPS_FS_TIME_STAMP;
759 
760 		if (mlx5_npps_real_time_supported(mdev))
761 			err = perout_conf_npps_real_time(mdev, rq, &field_select,
762 							 &out_pulse_duration_ns, &npps_period,
763 							 &time_stamp);
764 		else
765 			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
766 		if (err)
767 			return err;
768 	}
769 
770 	MLX5_SET(mtpps_reg, in, pin, pin);
771 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
772 	MLX5_SET(mtpps_reg, in, pattern, pattern);
773 	MLX5_SET(mtpps_reg, in, enable, on);
774 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
775 	MLX5_SET(mtpps_reg, in, field_select, field_select);
776 	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
777 	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
778 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
779 	if (err)
780 		return err;
781 
782 	if (rt_mode)
783 		return 0;
784 
785 	return mlx5_set_mtppse(mdev, pin, 0,
786 			       MLX5_EVENT_MODE_REPETETIVE & on);
787 }
788 
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)789 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
790 			      struct ptp_clock_request *rq,
791 			      int on)
792 {
793 	struct mlx5_clock *clock =
794 			container_of(ptp, struct mlx5_clock, ptp_info);
795 
796 	clock->pps_info.enabled = !!on;
797 	return 0;
798 }
799 
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)800 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
801 			   struct ptp_clock_request *rq,
802 			   int on)
803 {
804 	switch (rq->type) {
805 	case PTP_CLK_REQ_EXTTS:
806 		return mlx5_extts_configure(ptp, rq, on);
807 	case PTP_CLK_REQ_PEROUT:
808 		return mlx5_perout_configure(ptp, rq, on);
809 	case PTP_CLK_REQ_PPS:
810 		return mlx5_pps_configure(ptp, rq, on);
811 	default:
812 		return -EOPNOTSUPP;
813 	}
814 	return 0;
815 }
816 
817 enum {
818 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
819 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
820 };
821 
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)822 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
823 			   enum ptp_pin_function func, unsigned int chan)
824 {
825 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
826 						ptp_info);
827 
828 	switch (func) {
829 	case PTP_PF_NONE:
830 		return 0;
831 	case PTP_PF_EXTTS:
832 		return !(clock->pps_info.pin_caps[pin] &
833 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
834 	case PTP_PF_PEROUT:
835 		return !(clock->pps_info.pin_caps[pin] &
836 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
837 	default:
838 		return -EOPNOTSUPP;
839 	}
840 }
841 
842 static const struct ptp_clock_info mlx5_ptp_clock_info = {
843 	.owner		= THIS_MODULE,
844 	.name		= "mlx5_ptp",
845 	.max_adj	= 50000000,
846 	.n_alarm	= 0,
847 	.n_ext_ts	= 0,
848 	.n_per_out	= 0,
849 	.n_pins		= 0,
850 	.pps		= 0,
851 	.adjfine	= mlx5_ptp_adjfine,
852 	.adjphase	= mlx5_ptp_adjphase,
853 	.getmaxphase    = mlx5_ptp_getmaxphase,
854 	.adjtime	= mlx5_ptp_adjtime,
855 	.gettimex64	= mlx5_ptp_gettimex,
856 	.settime64	= mlx5_ptp_settime,
857 	.enable		= NULL,
858 	.verify		= NULL,
859 };
860 
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)861 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
862 				     u32 *mtpps, u32 mtpps_size)
863 {
864 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
865 
866 	MLX5_SET(mtpps_reg, in, pin, pin);
867 
868 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
869 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
870 }
871 
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)872 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
873 {
874 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
875 
876 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
877 	u8 mode;
878 	int err;
879 
880 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
881 	if (err || !MLX5_GET(mtpps_reg, out, enable))
882 		return PTP_PF_NONE;
883 
884 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
885 
886 	if (mode == MLX5_PIN_MODE_IN)
887 		return PTP_PF_EXTTS;
888 	else if (mode == MLX5_PIN_MODE_OUT)
889 		return PTP_PF_PEROUT;
890 
891 	return PTP_PF_NONE;
892 }
893 
mlx5_init_pin_config(struct mlx5_clock * clock)894 static void mlx5_init_pin_config(struct mlx5_clock *clock)
895 {
896 	int i;
897 
898 	if (!clock->ptp_info.n_pins)
899 		return;
900 
901 	clock->ptp_info.pin_config =
902 			kcalloc(clock->ptp_info.n_pins,
903 				sizeof(*clock->ptp_info.pin_config),
904 				GFP_KERNEL);
905 	if (!clock->ptp_info.pin_config)
906 		return;
907 	clock->ptp_info.enable = mlx5_ptp_enable;
908 	clock->ptp_info.verify = mlx5_ptp_verify;
909 	clock->ptp_info.pps = 1;
910 
911 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
912 		snprintf(clock->ptp_info.pin_config[i].name,
913 			 sizeof(clock->ptp_info.pin_config[i].name),
914 			 "mlx5_pps%d", i);
915 		clock->ptp_info.pin_config[i].index = i;
916 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
917 		clock->ptp_info.pin_config[i].chan = 0;
918 	}
919 }
920 
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)921 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
922 {
923 	struct mlx5_clock *clock = &mdev->clock;
924 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
925 
926 	mlx5_query_mtpps(mdev, out, sizeof(out));
927 
928 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
929 					  cap_number_of_pps_pins);
930 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
931 					    cap_max_num_of_pps_in_pins);
932 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
933 					     cap_max_num_of_pps_out_pins);
934 
935 	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
936 		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
937 								cap_log_min_npps_period);
938 	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
939 		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
940 								cap_log_min_out_pulse_duration_ns);
941 
942 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
943 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
944 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
945 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
946 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
947 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
948 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
949 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
950 }
951 
ts_next_sec(struct timespec64 * ts)952 static void ts_next_sec(struct timespec64 *ts)
953 {
954 	ts->tv_sec += 1;
955 	ts->tv_nsec = 0;
956 }
957 
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)958 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
959 					struct mlx5_clock *clock)
960 {
961 	struct timespec64 ts;
962 	s64 target_ns;
963 
964 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
965 	ts_next_sec(&ts);
966 	target_ns = timespec64_to_ns(&ts);
967 
968 	return find_target_cycles(mdev, target_ns);
969 }
970 
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)971 static int mlx5_pps_event(struct notifier_block *nb,
972 			  unsigned long type, void *data)
973 {
974 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
975 	struct ptp_clock_event ptp_event;
976 	struct mlx5_eqe *eqe = data;
977 	int pin = eqe->data.pps.pin;
978 	struct mlx5_core_dev *mdev;
979 	unsigned long flags;
980 	u64 ns;
981 
982 	mdev = container_of(clock, struct mlx5_core_dev, clock);
983 
984 	switch (clock->ptp_info.pin_config[pin].func) {
985 	case PTP_PF_EXTTS:
986 		ptp_event.index = pin;
987 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
988 			mlx5_real_time_cyc2time(clock,
989 						be64_to_cpu(eqe->data.pps.time_stamp)) :
990 			mlx5_timecounter_cyc2time(clock,
991 						  be64_to_cpu(eqe->data.pps.time_stamp));
992 		if (clock->pps_info.enabled) {
993 			ptp_event.type = PTP_CLOCK_PPSUSR;
994 			ptp_event.pps_times.ts_real =
995 					ns_to_timespec64(ptp_event.timestamp);
996 		} else {
997 			ptp_event.type = PTP_CLOCK_EXTTS;
998 		}
999 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
1000 		ptp_clock_event(clock->ptp, &ptp_event);
1001 		break;
1002 	case PTP_PF_PEROUT:
1003 		ns = perout_conf_next_event_timer(mdev, clock);
1004 		write_seqlock_irqsave(&clock->lock, flags);
1005 		clock->pps_info.start[pin] = ns;
1006 		write_sequnlock_irqrestore(&clock->lock, flags);
1007 		schedule_work(&clock->pps_info.out_work);
1008 		break;
1009 	default:
1010 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
1011 			      clock->ptp_info.pin_config[pin].func);
1012 	}
1013 
1014 	return NOTIFY_OK;
1015 }
1016 
mlx5_timecounter_init(struct mlx5_core_dev * mdev)1017 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
1018 {
1019 	struct mlx5_clock *clock = &mdev->clock;
1020 	struct mlx5_timer *timer = &clock->timer;
1021 	u32 dev_freq;
1022 
1023 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
1024 	timer->cycles.read = read_internal_timer;
1025 	timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
1026 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
1027 						  timer->cycles.shift);
1028 	timer->nominal_c_mult = timer->cycles.mult;
1029 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
1030 
1031 	timecounter_init(&timer->tc, &timer->cycles,
1032 			 ktime_to_ns(ktime_get_real()));
1033 }
1034 
mlx5_init_overflow_period(struct mlx5_clock * clock)1035 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
1036 {
1037 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
1038 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
1039 	struct mlx5_timer *timer = &clock->timer;
1040 	u64 overflow_cycles;
1041 	u64 frac = 0;
1042 	u64 ns;
1043 
1044 	/* Calculate period in seconds to call the overflow watchdog - to make
1045 	 * sure counter is checked at least twice every wrap around.
1046 	 * The period is calculated as the minimum between max HW cycles count
1047 	 * (The clock source mask) and max amount of cycles that can be
1048 	 * multiplied by clock multiplier where the result doesn't exceed
1049 	 * 64bits.
1050 	 */
1051 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
1052 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
1053 
1054 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
1055 				 frac, &frac);
1056 	do_div(ns, NSEC_PER_SEC / HZ);
1057 	timer->overflow_period = ns;
1058 
1059 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
1060 	if (timer->overflow_period)
1061 		schedule_delayed_work(&timer->overflow_work, 0);
1062 	else
1063 		mlx5_core_warn(mdev,
1064 			       "invalid overflow period, overflow_work is not scheduled\n");
1065 
1066 	if (clock_info)
1067 		clock_info->overflow_period = timer->overflow_period;
1068 }
1069 
mlx5_init_clock_info(struct mlx5_core_dev * mdev)1070 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
1071 {
1072 	struct mlx5_clock *clock = &mdev->clock;
1073 	struct mlx5_ib_clock_info *info;
1074 	struct mlx5_timer *timer;
1075 
1076 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
1077 	if (!mdev->clock_info) {
1078 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
1079 		return;
1080 	}
1081 
1082 	info = mdev->clock_info;
1083 	timer = &clock->timer;
1084 
1085 	info->nsec = timer->tc.nsec;
1086 	info->cycles = timer->tc.cycle_last;
1087 	info->mask = timer->cycles.mask;
1088 	info->mult = timer->nominal_c_mult;
1089 	info->shift = timer->cycles.shift;
1090 	info->frac = timer->tc.frac;
1091 }
1092 
mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev * mdev)1093 static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
1094 {
1095 	struct mlx5_clock *clock = &mdev->clock;
1096 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1097 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1098 	u8 log_max_freq_adjustment = 0;
1099 	int err;
1100 
1101 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1102 				   MLX5_REG_MTUTC, 0, 0);
1103 	if (!err)
1104 		log_max_freq_adjustment =
1105 			MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
1106 
1107 	if (log_max_freq_adjustment)
1108 		clock->ptp_info.max_adj =
1109 			min(S32_MAX, 1 << log_max_freq_adjustment);
1110 }
1111 
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)1112 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1113 {
1114 	struct mlx5_clock *clock = &mdev->clock;
1115 
1116 	/* Configure the PHC */
1117 	clock->ptp_info = mlx5_ptp_clock_info;
1118 
1119 	if (MLX5_CAP_MCAM_REG(mdev, mtutc))
1120 		mlx5_init_timer_max_freq_adjustment(mdev);
1121 
1122 #ifdef CONFIG_X86
1123 	if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
1124 	    MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
1125 		clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
1126 #endif /* CONFIG_X86 */
1127 
1128 	mlx5_timecounter_init(mdev);
1129 	mlx5_init_clock_info(mdev);
1130 	mlx5_init_overflow_period(clock);
1131 
1132 	if (mlx5_real_time_mode(mdev)) {
1133 		struct timespec64 ts;
1134 
1135 		ktime_get_real_ts64(&ts);
1136 		mlx5_ptp_settime(&clock->ptp_info, &ts);
1137 	}
1138 }
1139 
mlx5_init_pps(struct mlx5_core_dev * mdev)1140 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1141 {
1142 	struct mlx5_clock *clock = &mdev->clock;
1143 
1144 	if (!MLX5_PPS_CAP(mdev))
1145 		return;
1146 
1147 	mlx5_get_pps_caps(mdev);
1148 	mlx5_init_pin_config(clock);
1149 }
1150 
mlx5_init_clock(struct mlx5_core_dev * mdev)1151 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1152 {
1153 	struct mlx5_clock *clock = &mdev->clock;
1154 
1155 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1156 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1157 		return;
1158 	}
1159 
1160 	seqlock_init(&clock->lock);
1161 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1162 
1163 	/* Initialize the device clock */
1164 	mlx5_init_timer_clock(mdev);
1165 
1166 	/* Initialize 1PPS data structures */
1167 	mlx5_init_pps(mdev);
1168 
1169 	clock->ptp = ptp_clock_register(&clock->ptp_info,
1170 					&mdev->pdev->dev);
1171 	if (IS_ERR(clock->ptp)) {
1172 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1173 			       PTR_ERR(clock->ptp));
1174 		clock->ptp = NULL;
1175 	}
1176 
1177 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1178 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1179 }
1180 
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1181 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1182 {
1183 	struct mlx5_clock *clock = &mdev->clock;
1184 
1185 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1186 		return;
1187 
1188 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1189 	if (clock->ptp) {
1190 		ptp_clock_unregister(clock->ptp);
1191 		clock->ptp = NULL;
1192 	}
1193 
1194 	cancel_work_sync(&clock->pps_info.out_work);
1195 	cancel_delayed_work_sync(&clock->timer.overflow_work);
1196 
1197 	if (mdev->clock_info) {
1198 		free_page((unsigned long)mdev->clock_info);
1199 		mdev->clock_info = NULL;
1200 	}
1201 
1202 	kfree(clock->ptp_info.pin_config);
1203 }
1204