1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/log2.h>
36 #include <linux/ptp_clock_kernel.h>
37 #include <rdma/mlx5-abi.h>
38 #include "lib/eq.h"
39 #include "en.h"
40 #include "clock.h"
41 #ifdef CONFIG_X86
42 #include <linux/timekeeping.h>
43 #include <linux/cpufeature.h>
44 #endif /* CONFIG_X86 */
45 
46 #define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
47 
48 enum {
49 	MLX5_PIN_MODE_IN		= 0x0,
50 	MLX5_PIN_MODE_OUT		= 0x1,
51 };
52 
53 enum {
54 	MLX5_OUT_PATTERN_PULSE		= 0x0,
55 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
56 };
57 
58 enum {
59 	MLX5_EVENT_MODE_DISABLE	= 0x0,
60 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
61 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
62 };
63 
64 enum {
65 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
66 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
67 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
68 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
69 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
70 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
71 	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
72 	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
73 };
74 
75 enum {
76 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN          = S16_MIN,
77 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX          = S16_MAX,
78 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
79 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
80 };
81 
82 struct mlx5_clock_dev_state {
83 	struct mlx5_core_dev *mdev;
84 	struct mlx5_devcom_comp_dev *compdev;
85 	struct mlx5_nb pps_nb;
86 	struct work_struct out_work;
87 };
88 
89 struct mlx5_clock_priv {
90 	struct mlx5_clock clock;
91 	struct mlx5_core_dev *mdev;
92 	struct mutex lock; /* protect mdev and used in PTP callbacks */
93 	struct mlx5_core_dev *event_mdev;
94 };
95 
clock_priv(struct mlx5_clock * clock)96 static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
97 {
98 	return container_of(clock, struct mlx5_clock_priv, clock);
99 }
100 
mlx5_clock_lockdep_assert(struct mlx5_clock * clock)101 static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
102 {
103 	if (!clock->shared)
104 		return;
105 
106 	lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
107 }
108 
mlx5_clock_mdev_get(struct mlx5_clock * clock)109 static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
110 {
111 	mlx5_clock_lockdep_assert(clock);
112 
113 	return clock_priv(clock)->mdev;
114 }
115 
mlx5_clock_lock(struct mlx5_clock * clock)116 static void mlx5_clock_lock(struct mlx5_clock *clock)
117 {
118 	if (!clock->shared)
119 		return;
120 
121 	mutex_lock(&clock_priv(clock)->lock);
122 }
123 
mlx5_clock_unlock(struct mlx5_clock * clock)124 static void mlx5_clock_unlock(struct mlx5_clock *clock)
125 {
126 	if (!clock->shared)
127 		return;
128 
129 	mutex_unlock(&clock_priv(clock)->lock);
130 }
131 
mlx5_real_time_mode(struct mlx5_core_dev * mdev)132 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
133 {
134 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
135 }
136 
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)137 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
138 {
139 	return (mlx5_real_time_mode(mdev) &&
140 		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
141 		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
142 }
143 
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)144 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
145 {
146 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
147 }
148 
mlx5_clock_identity_get(struct mlx5_core_dev * mdev,u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])149 static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
150 				   u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
151 {
152 	u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
153 	u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
154 	int err;
155 
156 	err = mlx5_core_access_reg(mdev, in, sizeof(in),
157 				   out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
158 	if (!err)
159 		memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
160 		       MLX5_RT_CLOCK_IDENTITY_SIZE);
161 
162 	return err;
163 }
164 
mlx5_ptp_shift_constant(u32 dev_freq_khz)165 static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
166 {
167 	/* Optimal shift constant leads to corrections above just 1 scaled ppm.
168 	 *
169 	 * Two sets of equations are needed to derive the optimal shift
170 	 * constant for the cyclecounter.
171 	 *
172 	 *    dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
173 	 *    ppb = scaled_ppm * 1000 / 2^16
174 	 *
175 	 * Using the two equations together
176 	 *
177 	 *    dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
178 	 *    dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
179 	 *    dev_freq_khz = 2^(shift_constant - 16)
180 	 *
181 	 * then yields
182 	 *
183 	 *    shift_constant = ilog2(dev_freq_khz) + 16
184 	 */
185 
186 	return min(ilog2(dev_freq_khz) + 16,
187 		   ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
188 }
189 
mlx5_clock_getmaxphase(struct mlx5_core_dev * mdev)190 static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
191 {
192 	return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
193 		       MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
194 			     MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
195 }
196 
mlx5_ptp_getmaxphase(struct ptp_clock_info * ptp)197 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
198 {
199 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
200 	struct mlx5_core_dev *mdev;
201 	s32 ret;
202 
203 	mlx5_clock_lock(clock);
204 	mdev = mlx5_clock_mdev_get(clock);
205 	ret = mlx5_clock_getmaxphase(mdev);
206 	mlx5_clock_unlock(clock);
207 
208 	return ret;
209 }
210 
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)211 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
212 {
213 	s64 max = mlx5_clock_getmaxphase(mdev);
214 
215 	if (delta < -max || delta > max)
216 		return false;
217 
218 	return true;
219 }
220 
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)221 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
222 {
223 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
224 
225 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
226 		return -EOPNOTSUPP;
227 
228 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
229 				    MLX5_REG_MTUTC, 0, 1);
230 }
231 
232 #ifdef CONFIG_X86
mlx5_is_ptm_source_time_available(struct mlx5_core_dev * dev)233 static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
234 {
235 	u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
236 	u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
237 	int err;
238 
239 	if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
240 		return false;
241 
242 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
243 				   0, 0);
244 	if (err)
245 		return false;
246 
247 	return !!MLX5_GET(mtptm_reg, out, psta);
248 }
249 
mlx5_mtctr_syncdevicetime(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx)250 static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
251 				     struct system_counterval_t *sys_counterval,
252 				     void *ctx)
253 {
254 	u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
255 	u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
256 	struct mlx5_core_dev *mdev = ctx;
257 	bool real_time_mode;
258 	u64 host, device;
259 	int err;
260 
261 	real_time_mode = mlx5_real_time_mode(mdev);
262 
263 	MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
264 		 MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
265 	MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
266 		 real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
267 		 MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
268 
269 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
270 				   0, 0);
271 	if (err)
272 		return err;
273 
274 	if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
275 	    !MLX5_GET(mtctr_reg, out, second_clock_valid))
276 		return -EINVAL;
277 
278 	host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
279 	*sys_counterval = (struct system_counterval_t) {
280 			.cycles = host,
281 			.cs_id = CSID_X86_ART,
282 			.use_nsecs = true,
283 	};
284 
285 	device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
286 	if (real_time_mode)
287 		*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
288 	else
289 		*device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
290 
291 	return 0;
292 }
293 
mlx5_ptp_getcrosststamp(struct ptp_clock_info * ptp,struct system_device_crosststamp * cts)294 static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
295 				   struct system_device_crosststamp *cts)
296 {
297 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
298 	struct system_time_snapshot history_begin = {0};
299 	struct mlx5_core_dev *mdev;
300 	int err;
301 
302 	mlx5_clock_lock(clock);
303 	mdev = mlx5_clock_mdev_get(clock);
304 
305 	if (!mlx5_is_ptm_source_time_available(mdev)) {
306 		err = -EBUSY;
307 		goto unlock;
308 	}
309 
310 	ktime_get_snapshot(&history_begin);
311 
312 	err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
313 					    &history_begin, cts);
314 unlock:
315 	mlx5_clock_unlock(clock);
316 	return err;
317 }
318 #endif /* CONFIG_X86 */
319 
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)320 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
321 			  struct ptp_system_timestamp *sts,
322 			  bool real_time)
323 {
324 	u32 timer_h, timer_h1, timer_l;
325 
326 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
327 			     &dev->iseg->internal_timer_h);
328 	ptp_read_system_prets(sts);
329 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
330 			     &dev->iseg->internal_timer_l);
331 	ptp_read_system_postts(sts);
332 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
333 			      &dev->iseg->internal_timer_h);
334 	if (timer_h != timer_h1) {
335 		/* wrap around */
336 		ptp_read_system_prets(sts);
337 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
338 				     &dev->iseg->internal_timer_l);
339 		ptp_read_system_postts(sts);
340 	}
341 
342 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
343 			   (u64)timer_l | (u64)timer_h1 << 32;
344 }
345 
read_internal_timer(const struct cyclecounter * cc)346 static u64 read_internal_timer(const struct cyclecounter *cc)
347 {
348 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
349 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
350 	struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
351 
352 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
353 }
354 
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)355 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
356 {
357 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
358 	struct mlx5_clock *clock = mdev->clock;
359 	struct mlx5_timer *timer;
360 	u32 sign;
361 
362 	if (!clock_info)
363 		return;
364 
365 	sign = smp_load_acquire(&clock_info->sign);
366 	smp_store_mb(clock_info->sign,
367 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
368 
369 	timer = &clock->timer;
370 	clock_info->cycles = timer->tc.cycle_last;
371 	clock_info->mult   = timer->cycles.mult;
372 	clock_info->nsec   = timer->tc.nsec;
373 	clock_info->frac   = timer->tc.frac;
374 
375 	smp_store_release(&clock_info->sign,
376 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
377 }
378 
mlx5_pps_out(struct work_struct * work)379 static void mlx5_pps_out(struct work_struct *work)
380 {
381 	struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
382 								out_work);
383 	struct mlx5_core_dev *mdev = clock_state->mdev;
384 	struct mlx5_clock *clock = mdev->clock;
385 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
386 	unsigned long flags;
387 	int i;
388 
389 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
390 		u64 tstart;
391 
392 		write_seqlock_irqsave(&clock->lock, flags);
393 		tstart = clock->pps_info.start[i];
394 		clock->pps_info.start[i] = 0;
395 		write_sequnlock_irqrestore(&clock->lock, flags);
396 		if (!tstart)
397 			continue;
398 
399 		MLX5_SET(mtpps_reg, in, pin, i);
400 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
401 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
402 		mlx5_set_mtpps(mdev, in, sizeof(in));
403 	}
404 }
405 
mlx5_timestamp_overflow(struct ptp_clock_info * ptp_info)406 static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
407 {
408 	struct mlx5_core_dev *mdev;
409 	struct mlx5_timer *timer;
410 	struct mlx5_clock *clock;
411 	unsigned long flags;
412 
413 	clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
414 	mlx5_clock_lock(clock);
415 	mdev = mlx5_clock_mdev_get(clock);
416 	timer = &clock->timer;
417 
418 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
419 		goto out;
420 
421 	write_seqlock_irqsave(&clock->lock, flags);
422 	timecounter_read(&timer->tc);
423 	mlx5_update_clock_info_page(mdev);
424 	write_sequnlock_irqrestore(&clock->lock, flags);
425 
426 out:
427 	mlx5_clock_unlock(clock);
428 	return timer->overflow_period;
429 }
430 
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)431 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
432 				      const struct timespec64 *ts)
433 {
434 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
435 
436 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
437 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
438 		return -EINVAL;
439 
440 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
441 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
442 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
443 
444 	return mlx5_set_mtutc(mdev, in, sizeof(in));
445 }
446 
mlx5_clock_settime(struct mlx5_core_dev * mdev,struct mlx5_clock * clock,const struct timespec64 * ts)447 static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
448 			      const struct timespec64 *ts)
449 {
450 	struct mlx5_timer *timer = &clock->timer;
451 	unsigned long flags;
452 
453 	if (mlx5_modify_mtutc_allowed(mdev)) {
454 		int err = mlx5_ptp_settime_real_time(mdev, ts);
455 
456 		if (err)
457 			return err;
458 	}
459 
460 	write_seqlock_irqsave(&clock->lock, flags);
461 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
462 	mlx5_update_clock_info_page(mdev);
463 	write_sequnlock_irqrestore(&clock->lock, flags);
464 
465 	return 0;
466 }
467 
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)468 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
469 {
470 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
471 	struct mlx5_core_dev *mdev;
472 	int err;
473 
474 	mlx5_clock_lock(clock);
475 	mdev = mlx5_clock_mdev_get(clock);
476 	err = mlx5_clock_settime(mdev, clock, ts);
477 	mlx5_clock_unlock(clock);
478 
479 	return err;
480 }
481 
482 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)483 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
484 					      struct ptp_system_timestamp *sts)
485 {
486 	struct timespec64 ts;
487 	u64 time;
488 
489 	time = mlx5_read_time(mdev, sts, true);
490 	ts = ns_to_timespec64(time);
491 	return ts;
492 }
493 
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)494 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
495 			     struct ptp_system_timestamp *sts)
496 {
497 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
498 	struct mlx5_core_dev *mdev;
499 	u64 cycles, ns;
500 
501 	mlx5_clock_lock(clock);
502 	mdev = mlx5_clock_mdev_get(clock);
503 	if (mlx5_real_time_mode(mdev)) {
504 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
505 		goto out;
506 	}
507 
508 	cycles = mlx5_read_time(mdev, sts, false);
509 	ns = mlx5_timecounter_cyc2time(clock, cycles);
510 	*ts = ns_to_timespec64(ns);
511 out:
512 	mlx5_clock_unlock(clock);
513 	return 0;
514 }
515 
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)516 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
517 {
518 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
519 
520 	/* HW time adjustment range is checked. If out of range, settime instead */
521 	if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
522 		struct timespec64 ts;
523 		s64 ns;
524 
525 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
526 		ns = timespec64_to_ns(&ts) + delta;
527 		ts = ns_to_timespec64(ns);
528 		return mlx5_ptp_settime_real_time(mdev, &ts);
529 	}
530 
531 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
532 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
533 
534 	return mlx5_set_mtutc(mdev, in, sizeof(in));
535 }
536 
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)537 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
538 {
539 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
540 	struct mlx5_timer *timer = &clock->timer;
541 	struct mlx5_core_dev *mdev;
542 	unsigned long flags;
543 	int err = 0;
544 
545 	mlx5_clock_lock(clock);
546 	mdev = mlx5_clock_mdev_get(clock);
547 
548 	if (mlx5_modify_mtutc_allowed(mdev)) {
549 		err = mlx5_ptp_adjtime_real_time(mdev, delta);
550 
551 		if (err)
552 			goto unlock;
553 	}
554 
555 	write_seqlock_irqsave(&clock->lock, flags);
556 	timecounter_adjtime(&timer->tc, delta);
557 	mlx5_update_clock_info_page(mdev);
558 	write_sequnlock_irqrestore(&clock->lock, flags);
559 
560 unlock:
561 	mlx5_clock_unlock(clock);
562 	return err;
563 }
564 
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)565 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
566 {
567 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
568 	struct mlx5_core_dev *mdev;
569 	int err;
570 
571 	mlx5_clock_lock(clock);
572 	mdev = mlx5_clock_mdev_get(clock);
573 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
574 	mlx5_clock_unlock(clock);
575 
576 	return err;
577 }
578 
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)579 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
580 {
581 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
582 
583 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
584 
585 	if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
586 	    scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
587 		/* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
588 		MLX5_SET(mtutc_reg, in, freq_adj_units,
589 			 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
590 		MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
591 	} else {
592 		MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
593 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
594 	}
595 
596 	return mlx5_set_mtutc(mdev, in, sizeof(in));
597 }
598 
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)599 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
600 {
601 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
602 	struct mlx5_timer *timer = &clock->timer;
603 	struct mlx5_core_dev *mdev;
604 	unsigned long flags;
605 	int err = 0;
606 	u32 mult;
607 
608 	mlx5_clock_lock(clock);
609 	mdev = mlx5_clock_mdev_get(clock);
610 
611 	if (mlx5_modify_mtutc_allowed(mdev)) {
612 		err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
613 
614 		if (err)
615 			goto unlock;
616 	}
617 
618 	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
619 
620 	write_seqlock_irqsave(&clock->lock, flags);
621 	timecounter_read(&timer->tc);
622 	timer->cycles.mult = mult;
623 	mlx5_update_clock_info_page(mdev);
624 	write_sequnlock_irqrestore(&clock->lock, flags);
625 	ptp_schedule_worker(clock->ptp, timer->overflow_period);
626 
627 unlock:
628 	mlx5_clock_unlock(clock);
629 	return err;
630 }
631 
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)632 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
633 				struct ptp_clock_request *rq,
634 				int on)
635 {
636 	struct mlx5_clock *clock =
637 			container_of(ptp, struct mlx5_clock, ptp_info);
638 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
639 	struct mlx5_core_dev *mdev;
640 	u32 field_select = 0;
641 	u8 pin_mode = 0;
642 	u8 pattern = 0;
643 	int pin = -1;
644 	int err = 0;
645 
646 	/* Reject requests with unsupported flags */
647 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
648 				PTP_RISING_EDGE |
649 				PTP_FALLING_EDGE |
650 				PTP_STRICT_FLAGS))
651 		return -EOPNOTSUPP;
652 
653 	/* Reject requests to enable time stamping on both edges. */
654 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
655 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
656 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
657 		return -EOPNOTSUPP;
658 
659 	if (rq->extts.index >= clock->ptp_info.n_pins)
660 		return -EINVAL;
661 
662 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
663 	if (pin < 0)
664 		return -EBUSY;
665 
666 	if (on) {
667 		pin_mode = MLX5_PIN_MODE_IN;
668 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
669 		field_select = MLX5_MTPPS_FS_PIN_MODE |
670 			       MLX5_MTPPS_FS_PATTERN |
671 			       MLX5_MTPPS_FS_ENABLE;
672 	} else {
673 		field_select = MLX5_MTPPS_FS_ENABLE;
674 	}
675 
676 	mlx5_clock_lock(clock);
677 	mdev = mlx5_clock_mdev_get(clock);
678 
679 	if (!MLX5_PPS_CAP(mdev)) {
680 		err = -EOPNOTSUPP;
681 		goto unlock;
682 	}
683 
684 	MLX5_SET(mtpps_reg, in, pin, pin);
685 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
686 	MLX5_SET(mtpps_reg, in, pattern, pattern);
687 	MLX5_SET(mtpps_reg, in, enable, on);
688 	MLX5_SET(mtpps_reg, in, field_select, field_select);
689 
690 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
691 	if (err)
692 		goto unlock;
693 
694 	err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
695 	if (err)
696 		goto unlock;
697 
698 	clock->pps_info.pin_armed[pin] = on;
699 	clock_priv(clock)->event_mdev = mdev;
700 
701 unlock:
702 	mlx5_clock_unlock(clock);
703 	return err;
704 }
705 
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)706 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
707 {
708 	struct mlx5_clock *clock = mdev->clock;
709 	u64 cycles_now, cycles_delta;
710 	u64 nsec_now, nsec_delta;
711 	struct mlx5_timer *timer;
712 	unsigned long flags;
713 
714 	timer = &clock->timer;
715 
716 	cycles_now = mlx5_read_time(mdev, NULL, false);
717 	write_seqlock_irqsave(&clock->lock, flags);
718 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
719 	nsec_delta = target_ns - nsec_now;
720 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
721 				 timer->cycles.mult);
722 	write_sequnlock_irqrestore(&clock->lock, flags);
723 
724 	return cycles_now + cycles_delta;
725 }
726 
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)727 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
728 {
729 	struct timespec64 ts = {};
730 	s64 target_ns;
731 
732 	ts.tv_sec = sec;
733 	target_ns = timespec64_to_ns(&ts);
734 
735 	return find_target_cycles(mdev, target_ns);
736 }
737 
perout_conf_real_time(s64 sec,u32 nsec)738 static u64 perout_conf_real_time(s64 sec, u32 nsec)
739 {
740 	return (u64)nsec | (u64)sec << 32;
741 }
742 
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)743 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
744 			    u64 *time_stamp, bool real_time)
745 {
746 	struct timespec64 ts;
747 	s64 ns;
748 
749 	ts.tv_nsec = rq->perout.period.nsec;
750 	ts.tv_sec = rq->perout.period.sec;
751 	ns = timespec64_to_ns(&ts);
752 
753 	if ((ns >> 1) != 500000000LL)
754 		return -EINVAL;
755 
756 	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
757 		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
758 
759 	return 0;
760 }
761 
762 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)763 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
764 					       struct ptp_clock_request *rq,
765 					       u32 *out_pulse_duration_ns)
766 {
767 	struct mlx5_pps *pps_info = &mdev->clock->pps_info;
768 	u32 out_pulse_duration;
769 	struct timespec64 ts;
770 
771 	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
772 		ts.tv_sec = rq->perout.on.sec;
773 		ts.tv_nsec = rq->perout.on.nsec;
774 		out_pulse_duration = (u32)timespec64_to_ns(&ts);
775 	} else {
776 		/* out_pulse_duration_ns should be up to 50% of the
777 		 * pulse period as default
778 		 */
779 		ts.tv_sec = rq->perout.period.sec;
780 		ts.tv_nsec = rq->perout.period.nsec;
781 		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
782 	}
783 
784 	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
785 	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
786 		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
787 			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
788 			      MLX5_MAX_PULSE_DURATION);
789 		return -EINVAL;
790 	}
791 	*out_pulse_duration_ns = out_pulse_duration;
792 
793 	return 0;
794 }
795 
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)796 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
797 				      u32 *field_select, u32 *out_pulse_duration_ns,
798 				      u64 *period, u64 *time_stamp)
799 {
800 	struct mlx5_pps *pps_info = &mdev->clock->pps_info;
801 	struct ptp_clock_time *time = &rq->perout.start;
802 	struct timespec64 ts;
803 
804 	ts.tv_sec = rq->perout.period.sec;
805 	ts.tv_nsec = rq->perout.period.nsec;
806 	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
807 		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
808 			      pps_info->min_npps_period);
809 		return -EINVAL;
810 	}
811 	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
812 
813 	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
814 		return -EINVAL;
815 
816 	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
817 	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
818 			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
819 
820 	return 0;
821 }
822 
mlx5_perout_verify_flags(struct mlx5_core_dev * mdev,unsigned int flags)823 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
824 {
825 	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
826 		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
827 }
828 
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)829 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
830 				 struct ptp_clock_request *rq,
831 				 int on)
832 {
833 	struct mlx5_clock *clock =
834 			container_of(ptp, struct mlx5_clock, ptp_info);
835 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
836 	u32 out_pulse_duration_ns = 0;
837 	struct mlx5_core_dev *mdev;
838 	u32 field_select = 0;
839 	u64 npps_period = 0;
840 	u64 time_stamp = 0;
841 	u8 pin_mode = 0;
842 	u8 pattern = 0;
843 	bool rt_mode;
844 	int pin = -1;
845 	int err = 0;
846 
847 	if (rq->perout.index >= clock->ptp_info.n_pins)
848 		return -EINVAL;
849 
850 	field_select = MLX5_MTPPS_FS_ENABLE;
851 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
852 	if (pin < 0)
853 		return -EBUSY;
854 
855 	mlx5_clock_lock(clock);
856 	mdev = mlx5_clock_mdev_get(clock);
857 	rt_mode = mlx5_real_time_mode(mdev);
858 
859 	if (!MLX5_PPS_CAP(mdev)) {
860 		err = -EOPNOTSUPP;
861 		goto unlock;
862 	}
863 
864 	/* Reject requests with unsupported flags */
865 	if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
866 		err = -EOPNOTSUPP;
867 		goto unlock;
868 	}
869 
870 	if (on) {
871 		pin_mode = MLX5_PIN_MODE_OUT;
872 		pattern = MLX5_OUT_PATTERN_PERIODIC;
873 
874 		if (rt_mode &&  rq->perout.start.sec > U32_MAX) {
875 			err = -EINVAL;
876 			goto unlock;
877 		}
878 
879 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
880 				MLX5_MTPPS_FS_PATTERN |
881 				MLX5_MTPPS_FS_TIME_STAMP;
882 
883 		if (mlx5_npps_real_time_supported(mdev))
884 			err = perout_conf_npps_real_time(mdev, rq, &field_select,
885 							 &out_pulse_duration_ns, &npps_period,
886 							 &time_stamp);
887 		else
888 			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
889 		if (err)
890 			goto unlock;
891 	}
892 
893 	MLX5_SET(mtpps_reg, in, pin, pin);
894 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
895 	MLX5_SET(mtpps_reg, in, pattern, pattern);
896 	MLX5_SET(mtpps_reg, in, enable, on);
897 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
898 	MLX5_SET(mtpps_reg, in, field_select, field_select);
899 	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
900 	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
901 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
902 	if (err)
903 		goto unlock;
904 
905 	if (rt_mode)
906 		goto unlock;
907 
908 	err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
909 
910 unlock:
911 	mlx5_clock_unlock(clock);
912 	return err;
913 }
914 
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)915 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
916 			      struct ptp_clock_request *rq,
917 			      int on)
918 {
919 	struct mlx5_clock *clock =
920 			container_of(ptp, struct mlx5_clock, ptp_info);
921 
922 	clock->pps_info.enabled = !!on;
923 	return 0;
924 }
925 
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)926 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
927 			   struct ptp_clock_request *rq,
928 			   int on)
929 {
930 	switch (rq->type) {
931 	case PTP_CLK_REQ_EXTTS:
932 		return mlx5_extts_configure(ptp, rq, on);
933 	case PTP_CLK_REQ_PEROUT:
934 		return mlx5_perout_configure(ptp, rq, on);
935 	case PTP_CLK_REQ_PPS:
936 		return mlx5_pps_configure(ptp, rq, on);
937 	default:
938 		return -EOPNOTSUPP;
939 	}
940 	return 0;
941 }
942 
943 enum {
944 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
945 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
946 };
947 
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)948 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
949 			   enum ptp_pin_function func, unsigned int chan)
950 {
951 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
952 						ptp_info);
953 
954 	switch (func) {
955 	case PTP_PF_NONE:
956 		return 0;
957 	case PTP_PF_EXTTS:
958 		return !(clock->pps_info.pin_caps[pin] &
959 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
960 	case PTP_PF_PEROUT:
961 		return !(clock->pps_info.pin_caps[pin] &
962 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
963 	default:
964 		return -EOPNOTSUPP;
965 	}
966 }
967 
968 static const struct ptp_clock_info mlx5_ptp_clock_info = {
969 	.owner		= THIS_MODULE,
970 	.name		= "mlx5_ptp",
971 	.max_adj	= 50000000,
972 	.n_alarm	= 0,
973 	.n_ext_ts	= 0,
974 	.n_per_out	= 0,
975 	.n_pins		= 0,
976 	.pps		= 0,
977 	.adjfine	= mlx5_ptp_adjfine,
978 	.adjphase	= mlx5_ptp_adjphase,
979 	.getmaxphase    = mlx5_ptp_getmaxphase,
980 	.adjtime	= mlx5_ptp_adjtime,
981 	.gettimex64	= mlx5_ptp_gettimex,
982 	.settime64	= mlx5_ptp_settime,
983 	.enable		= NULL,
984 	.verify		= NULL,
985 	.do_aux_work	= mlx5_timestamp_overflow,
986 };
987 
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)988 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
989 				     u32 *mtpps, u32 mtpps_size)
990 {
991 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
992 
993 	MLX5_SET(mtpps_reg, in, pin, pin);
994 
995 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
996 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
997 }
998 
mlx5_get_pps_pin_mode(struct mlx5_core_dev * mdev,u8 pin)999 static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
1000 {
1001 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
1002 	u8 mode;
1003 	int err;
1004 
1005 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
1006 	if (err || !MLX5_GET(mtpps_reg, out, enable))
1007 		return PTP_PF_NONE;
1008 
1009 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
1010 
1011 	if (mode == MLX5_PIN_MODE_IN)
1012 		return PTP_PF_EXTTS;
1013 	else if (mode == MLX5_PIN_MODE_OUT)
1014 		return PTP_PF_PEROUT;
1015 
1016 	return PTP_PF_NONE;
1017 }
1018 
mlx5_init_pin_config(struct mlx5_core_dev * mdev)1019 static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
1020 {
1021 	struct mlx5_clock *clock = mdev->clock;
1022 	int i;
1023 
1024 	if (!clock->ptp_info.n_pins)
1025 		return;
1026 
1027 	clock->ptp_info.pin_config =
1028 			kcalloc(clock->ptp_info.n_pins,
1029 				sizeof(*clock->ptp_info.pin_config),
1030 				GFP_KERNEL);
1031 	if (!clock->ptp_info.pin_config)
1032 		return;
1033 	clock->ptp_info.enable = mlx5_ptp_enable;
1034 	clock->ptp_info.verify = mlx5_ptp_verify;
1035 	clock->ptp_info.pps = 1;
1036 
1037 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
1038 		snprintf(clock->ptp_info.pin_config[i].name,
1039 			 sizeof(clock->ptp_info.pin_config[i].name),
1040 			 "mlx5_pps%d", i);
1041 		clock->ptp_info.pin_config[i].index = i;
1042 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
1043 		clock->ptp_info.pin_config[i].chan = 0;
1044 	}
1045 }
1046 
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)1047 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
1048 {
1049 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1050 	struct mlx5_clock *clock = mdev->clock;
1051 
1052 	mlx5_query_mtpps(mdev, out, sizeof(out));
1053 
1054 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
1055 					  cap_number_of_pps_pins);
1056 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
1057 					    cap_max_num_of_pps_in_pins);
1058 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
1059 					     cap_max_num_of_pps_out_pins);
1060 
1061 	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
1062 		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
1063 								cap_log_min_npps_period);
1064 	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
1065 		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
1066 								cap_log_min_out_pulse_duration_ns);
1067 
1068 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
1069 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
1070 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
1071 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
1072 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
1073 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
1074 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
1075 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
1076 }
1077 
ts_next_sec(struct timespec64 * ts)1078 static void ts_next_sec(struct timespec64 *ts)
1079 {
1080 	ts->tv_sec += 1;
1081 	ts->tv_nsec = 0;
1082 }
1083 
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)1084 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
1085 					struct mlx5_clock *clock)
1086 {
1087 	struct timespec64 ts;
1088 	s64 target_ns;
1089 
1090 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
1091 	ts_next_sec(&ts);
1092 	target_ns = timespec64_to_ns(&ts);
1093 
1094 	return find_target_cycles(mdev, target_ns);
1095 }
1096 
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)1097 static int mlx5_pps_event(struct notifier_block *nb,
1098 			  unsigned long type, void *data)
1099 {
1100 	struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
1101 							       pps_nb);
1102 	struct mlx5_core_dev *mdev = clock_state->mdev;
1103 	struct mlx5_clock *clock = mdev->clock;
1104 	struct ptp_clock_event ptp_event;
1105 	struct mlx5_eqe *eqe = data;
1106 	int pin = eqe->data.pps.pin;
1107 	unsigned long flags;
1108 	u64 ns;
1109 
1110 	switch (clock->ptp_info.pin_config[pin].func) {
1111 	case PTP_PF_EXTTS:
1112 		ptp_event.index = pin;
1113 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
1114 			mlx5_real_time_cyc2time(clock,
1115 						be64_to_cpu(eqe->data.pps.time_stamp)) :
1116 			mlx5_timecounter_cyc2time(clock,
1117 						  be64_to_cpu(eqe->data.pps.time_stamp));
1118 		if (clock->pps_info.enabled) {
1119 			ptp_event.type = PTP_CLOCK_PPSUSR;
1120 			ptp_event.pps_times.ts_real =
1121 					ns_to_timespec64(ptp_event.timestamp);
1122 		} else {
1123 			ptp_event.type = PTP_CLOCK_EXTTS;
1124 		}
1125 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
1126 		ptp_clock_event(clock->ptp, &ptp_event);
1127 		break;
1128 	case PTP_PF_PEROUT:
1129 		if (clock->shared) {
1130 			mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
1131 			break;
1132 		}
1133 		ns = perout_conf_next_event_timer(mdev, clock);
1134 		write_seqlock_irqsave(&clock->lock, flags);
1135 		clock->pps_info.start[pin] = ns;
1136 		write_sequnlock_irqrestore(&clock->lock, flags);
1137 		schedule_work(&clock_state->out_work);
1138 		break;
1139 	default:
1140 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
1141 			      clock->ptp_info.pin_config[pin].func);
1142 	}
1143 
1144 	return NOTIFY_OK;
1145 }
1146 
mlx5_timecounter_init(struct mlx5_core_dev * mdev)1147 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
1148 {
1149 	struct mlx5_clock *clock = mdev->clock;
1150 	struct mlx5_timer *timer = &clock->timer;
1151 	u32 dev_freq;
1152 
1153 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
1154 	timer->cycles.read = read_internal_timer;
1155 	timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
1156 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
1157 						  timer->cycles.shift);
1158 	timer->nominal_c_mult = timer->cycles.mult;
1159 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
1160 
1161 	timecounter_init(&timer->tc, &timer->cycles,
1162 			 ktime_to_ns(ktime_get_real()));
1163 }
1164 
mlx5_init_overflow_period(struct mlx5_core_dev * mdev)1165 static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
1166 {
1167 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
1168 	struct mlx5_clock *clock = mdev->clock;
1169 	struct mlx5_timer *timer = &clock->timer;
1170 	u64 overflow_cycles;
1171 	u64 frac = 0;
1172 	u64 ns;
1173 
1174 	/* Calculate period in seconds to call the overflow watchdog - to make
1175 	 * sure counter is checked at least twice every wrap around.
1176 	 * The period is calculated as the minimum between max HW cycles count
1177 	 * (The clock source mask) and max amount of cycles that can be
1178 	 * multiplied by clock multiplier where the result doesn't exceed
1179 	 * 64bits.
1180 	 */
1181 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
1182 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
1183 
1184 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
1185 				 frac, &frac);
1186 	do_div(ns, NSEC_PER_SEC / HZ);
1187 	timer->overflow_period = ns;
1188 
1189 	if (!timer->overflow_period) {
1190 		timer->overflow_period = HZ;
1191 		mlx5_core_warn(mdev,
1192 			       "invalid overflow period, overflow_work is scheduled once per second\n");
1193 	}
1194 
1195 	if (clock_info)
1196 		clock_info->overflow_period = timer->overflow_period;
1197 }
1198 
mlx5_init_clock_info(struct mlx5_core_dev * mdev)1199 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
1200 {
1201 	struct mlx5_clock *clock = mdev->clock;
1202 	struct mlx5_ib_clock_info *info;
1203 	struct mlx5_timer *timer;
1204 
1205 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
1206 	if (!mdev->clock_info) {
1207 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
1208 		return;
1209 	}
1210 
1211 	info = mdev->clock_info;
1212 	timer = &clock->timer;
1213 
1214 	info->nsec = timer->tc.nsec;
1215 	info->cycles = timer->tc.cycle_last;
1216 	info->mask = timer->cycles.mask;
1217 	info->mult = timer->nominal_c_mult;
1218 	info->shift = timer->cycles.shift;
1219 	info->frac = timer->tc.frac;
1220 }
1221 
mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev * mdev)1222 static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
1223 {
1224 	struct mlx5_clock *clock = mdev->clock;
1225 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1226 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1227 	u8 log_max_freq_adjustment = 0;
1228 	int err;
1229 
1230 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1231 				   MLX5_REG_MTUTC, 0, 0);
1232 	if (!err)
1233 		log_max_freq_adjustment =
1234 			MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
1235 
1236 	if (log_max_freq_adjustment)
1237 		clock->ptp_info.max_adj =
1238 			min(S32_MAX, 1 << log_max_freq_adjustment);
1239 }
1240 
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)1241 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1242 {
1243 	struct mlx5_clock *clock = mdev->clock;
1244 
1245 	/* Configure the PHC */
1246 	clock->ptp_info = mlx5_ptp_clock_info;
1247 
1248 	if (MLX5_CAP_MCAM_REG(mdev, mtutc))
1249 		mlx5_init_timer_max_freq_adjustment(mdev);
1250 
1251 #ifdef CONFIG_X86
1252 	if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
1253 	    MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
1254 		clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
1255 #endif /* CONFIG_X86 */
1256 
1257 	mlx5_timecounter_init(mdev);
1258 	mlx5_init_clock_info(mdev);
1259 	mlx5_init_overflow_period(mdev);
1260 
1261 	if (mlx5_real_time_mode(mdev)) {
1262 		struct timespec64 ts;
1263 
1264 		ktime_get_real_ts64(&ts);
1265 		mlx5_clock_settime(mdev, clock, &ts);
1266 	}
1267 }
1268 
mlx5_init_pps(struct mlx5_core_dev * mdev)1269 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1270 {
1271 	if (!MLX5_PPS_CAP(mdev))
1272 		return;
1273 
1274 	mlx5_get_pps_caps(mdev);
1275 	mlx5_init_pin_config(mdev);
1276 }
1277 
mlx5_init_clock_dev(struct mlx5_core_dev * mdev)1278 static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
1279 {
1280 	struct mlx5_clock *clock = mdev->clock;
1281 
1282 	seqlock_init(&clock->lock);
1283 
1284 	/* Initialize the device clock */
1285 	mlx5_init_timer_clock(mdev);
1286 
1287 	/* Initialize 1PPS data structures */
1288 	mlx5_init_pps(mdev);
1289 
1290 	clock->ptp = ptp_clock_register(&clock->ptp_info,
1291 					clock->shared ? NULL : &mdev->pdev->dev);
1292 	if (IS_ERR(clock->ptp)) {
1293 		mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
1294 			       clock->shared ? "shared clock " : "",
1295 			       PTR_ERR(clock->ptp));
1296 		clock->ptp = NULL;
1297 	}
1298 
1299 	if (clock->ptp)
1300 		ptp_schedule_worker(clock->ptp, 0);
1301 }
1302 
mlx5_destroy_clock_dev(struct mlx5_core_dev * mdev)1303 static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
1304 {
1305 	struct mlx5_clock *clock = mdev->clock;
1306 
1307 	if (clock->ptp) {
1308 		ptp_clock_unregister(clock->ptp);
1309 		clock->ptp = NULL;
1310 	}
1311 
1312 	if (mdev->clock_info) {
1313 		free_page((unsigned long)mdev->clock_info);
1314 		mdev->clock_info = NULL;
1315 	}
1316 
1317 	kfree(clock->ptp_info.pin_config);
1318 }
1319 
mlx5_clock_free(struct mlx5_core_dev * mdev)1320 static void mlx5_clock_free(struct mlx5_core_dev *mdev)
1321 {
1322 	struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
1323 
1324 	mlx5_destroy_clock_dev(mdev);
1325 	mutex_destroy(&cpriv->lock);
1326 	kfree(cpriv);
1327 	mdev->clock = NULL;
1328 }
1329 
mlx5_clock_alloc(struct mlx5_core_dev * mdev,bool shared)1330 static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
1331 {
1332 	struct mlx5_clock_priv *cpriv;
1333 	struct mlx5_clock *clock;
1334 
1335 	cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
1336 	if (!cpriv)
1337 		return -ENOMEM;
1338 
1339 	mutex_init(&cpriv->lock);
1340 	cpriv->mdev = mdev;
1341 	clock = &cpriv->clock;
1342 	clock->shared = shared;
1343 	mdev->clock = clock;
1344 	mlx5_clock_lock(clock);
1345 	mlx5_init_clock_dev(mdev);
1346 	mlx5_clock_unlock(clock);
1347 
1348 	if (!clock->shared)
1349 		return 0;
1350 
1351 	if (!clock->ptp) {
1352 		mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
1353 		mlx5_clock_free(mdev);
1354 		return -EINVAL;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
mlx5_shared_clock_register(struct mlx5_core_dev * mdev,u64 key)1360 static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
1361 {
1362 	struct mlx5_core_dev *peer_dev, *next = NULL;
1363 	struct mlx5_devcom_comp_dev *pos;
1364 
1365 	mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
1366 								    MLX5_DEVCOM_SHARED_CLOCK,
1367 								    key, NULL, mdev);
1368 	if (IS_ERR(mdev->clock_state->compdev))
1369 		return;
1370 
1371 	mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1372 	mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1373 		if (peer_dev->clock) {
1374 			next = peer_dev;
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (next) {
1380 		mdev->clock = next->clock;
1381 		/* clock info is shared among all the functions using the same clock */
1382 		mdev->clock_info = next->clock_info;
1383 	} else {
1384 		mlx5_clock_alloc(mdev, true);
1385 	}
1386 	mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1387 
1388 	if (!mdev->clock) {
1389 		mlx5_devcom_unregister_component(mdev->clock_state->compdev);
1390 		mdev->clock_state->compdev = NULL;
1391 	}
1392 }
1393 
mlx5_shared_clock_unregister(struct mlx5_core_dev * mdev)1394 static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
1395 {
1396 	struct mlx5_core_dev *peer_dev, *next = NULL;
1397 	struct mlx5_clock *clock = mdev->clock;
1398 	struct mlx5_devcom_comp_dev *pos;
1399 
1400 	mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1401 	mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1402 		if (peer_dev->clock && peer_dev != mdev) {
1403 			next = peer_dev;
1404 			break;
1405 		}
1406 	}
1407 
1408 	if (next) {
1409 		struct mlx5_clock_priv *cpriv = clock_priv(clock);
1410 
1411 		mlx5_clock_lock(clock);
1412 		if (mdev == cpriv->mdev)
1413 			cpriv->mdev = next;
1414 		mlx5_clock_unlock(clock);
1415 	} else {
1416 		mlx5_clock_free(mdev);
1417 	}
1418 
1419 	mdev->clock = NULL;
1420 	mdev->clock_info = NULL;
1421 	mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1422 
1423 	mlx5_devcom_unregister_component(mdev->clock_state->compdev);
1424 }
1425 
mlx5_clock_arm_pps_in_event(struct mlx5_clock * clock,struct mlx5_core_dev * new_mdev,struct mlx5_core_dev * old_mdev)1426 static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
1427 					struct mlx5_core_dev *new_mdev,
1428 					struct mlx5_core_dev *old_mdev)
1429 {
1430 	struct ptp_clock_info *ptp_info = &clock->ptp_info;
1431 	struct mlx5_clock_priv *cpriv = clock_priv(clock);
1432 	int i;
1433 
1434 	for (i = 0; i < ptp_info->n_pins; i++) {
1435 		if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
1436 		    !clock->pps_info.pin_armed[i])
1437 			continue;
1438 
1439 		if (new_mdev) {
1440 			mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
1441 			cpriv->event_mdev = new_mdev;
1442 		} else {
1443 			cpriv->event_mdev = NULL;
1444 		}
1445 
1446 		if (old_mdev)
1447 			mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
1448 	}
1449 }
1450 
mlx5_clock_load(struct mlx5_core_dev * mdev)1451 void mlx5_clock_load(struct mlx5_core_dev *mdev)
1452 {
1453 	struct mlx5_clock *clock = mdev->clock;
1454 	struct mlx5_clock_priv *cpriv;
1455 
1456 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1457 		return;
1458 
1459 	INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
1460 	MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
1461 	mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
1462 
1463 	if (!clock->shared) {
1464 		mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
1465 		return;
1466 	}
1467 
1468 	cpriv = clock_priv(clock);
1469 	mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1470 	mlx5_clock_lock(clock);
1471 	if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
1472 		mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
1473 	mlx5_clock_unlock(clock);
1474 	mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1475 }
1476 
mlx5_clock_unload(struct mlx5_core_dev * mdev)1477 void mlx5_clock_unload(struct mlx5_core_dev *mdev)
1478 {
1479 	struct mlx5_core_dev *peer_dev, *next = NULL;
1480 	struct mlx5_clock *clock = mdev->clock;
1481 	struct mlx5_devcom_comp_dev *pos;
1482 
1483 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1484 		return;
1485 
1486 	if (!clock->shared) {
1487 		mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
1488 		goto out;
1489 	}
1490 
1491 	mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1492 	mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1493 		if (peer_dev->clock && peer_dev != mdev) {
1494 			next = peer_dev;
1495 			break;
1496 		}
1497 	}
1498 
1499 	mlx5_clock_lock(clock);
1500 	if (mdev == clock_priv(clock)->event_mdev)
1501 		mlx5_clock_arm_pps_in_event(clock, next, mdev);
1502 	mlx5_clock_unlock(clock);
1503 	mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1504 
1505 out:
1506 	mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
1507 	cancel_work_sync(&mdev->clock_state->out_work);
1508 }
1509 
1510 static struct mlx5_clock null_clock;
1511 
mlx5_init_clock(struct mlx5_core_dev * mdev)1512 int mlx5_init_clock(struct mlx5_core_dev *mdev)
1513 {
1514 	u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
1515 	struct mlx5_clock_dev_state *clock_state;
1516 	u64 key;
1517 	int err;
1518 
1519 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1520 		mdev->clock = &null_clock;
1521 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1522 		return 0;
1523 	}
1524 
1525 	clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
1526 	if (!clock_state)
1527 		return -ENOMEM;
1528 	clock_state->mdev = mdev;
1529 	mdev->clock_state = clock_state;
1530 
1531 	if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
1532 		if (mlx5_clock_identity_get(mdev, identity)) {
1533 			mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
1534 		} else {
1535 			memcpy(&key, &identity, sizeof(key));
1536 			mlx5_shared_clock_register(mdev, key);
1537 		}
1538 	}
1539 
1540 	if (!mdev->clock) {
1541 		err = mlx5_clock_alloc(mdev, false);
1542 		if (err) {
1543 			kfree(clock_state);
1544 			mdev->clock_state = NULL;
1545 			return err;
1546 		}
1547 	}
1548 
1549 	return 0;
1550 }
1551 
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1552 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1553 {
1554 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1555 		return;
1556 
1557 	if (mdev->clock->shared)
1558 		mlx5_shared_clock_unregister(mdev);
1559 	else
1560 		mlx5_clock_free(mdev);
1561 	kfree(mdev->clock_state);
1562 	mdev->clock_state = NULL;
1563 }
1564