1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 23
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 };
69 
mlx5_read_internal_timer(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts)70 static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
71 				    struct ptp_system_timestamp *sts)
72 {
73 	u32 timer_h, timer_h1, timer_l;
74 
75 	timer_h = ioread32be(&dev->iseg->internal_timer_h);
76 	ptp_read_system_prets(sts);
77 	timer_l = ioread32be(&dev->iseg->internal_timer_l);
78 	ptp_read_system_postts(sts);
79 	timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
80 	if (timer_h != timer_h1) {
81 		/* wrap around */
82 		ptp_read_system_prets(sts);
83 		timer_l = ioread32be(&dev->iseg->internal_timer_l);
84 		ptp_read_system_postts(sts);
85 	}
86 
87 	return (u64)timer_l | (u64)timer_h1 << 32;
88 }
89 
read_internal_timer(const struct cyclecounter * cc)90 static u64 read_internal_timer(const struct cyclecounter *cc)
91 {
92 	struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
93 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
94 						  clock);
95 
96 	return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
97 }
98 
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)99 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
100 {
101 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
102 	struct mlx5_clock *clock = &mdev->clock;
103 	u32 sign;
104 
105 	if (!clock_info)
106 		return;
107 
108 	sign = smp_load_acquire(&clock_info->sign);
109 	smp_store_mb(clock_info->sign,
110 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
111 
112 	clock_info->cycles = clock->tc.cycle_last;
113 	clock_info->mult   = clock->cycles.mult;
114 	clock_info->nsec   = clock->tc.nsec;
115 	clock_info->frac   = clock->tc.frac;
116 
117 	smp_store_release(&clock_info->sign,
118 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
119 }
120 
mlx5_pps_out(struct work_struct * work)121 static void mlx5_pps_out(struct work_struct *work)
122 {
123 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
124 						 out_work);
125 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
126 						pps_info);
127 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
128 						  clock);
129 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
130 	unsigned long flags;
131 	int i;
132 
133 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
134 		u64 tstart;
135 
136 		write_seqlock_irqsave(&clock->lock, flags);
137 		tstart = clock->pps_info.start[i];
138 		clock->pps_info.start[i] = 0;
139 		write_sequnlock_irqrestore(&clock->lock, flags);
140 		if (!tstart)
141 			continue;
142 
143 		MLX5_SET(mtpps_reg, in, pin, i);
144 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
145 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
146 		mlx5_set_mtpps(mdev, in, sizeof(in));
147 	}
148 }
149 
mlx5_timestamp_overflow(struct work_struct * work)150 static void mlx5_timestamp_overflow(struct work_struct *work)
151 {
152 	struct delayed_work *dwork = to_delayed_work(work);
153 	struct mlx5_core_dev *mdev;
154 	struct mlx5_clock *clock;
155 	unsigned long flags;
156 
157 	clock = container_of(dwork, struct mlx5_clock, overflow_work);
158 	mdev = container_of(clock, struct mlx5_core_dev, clock);
159 	write_seqlock_irqsave(&clock->lock, flags);
160 	timecounter_read(&clock->tc);
161 	mlx5_update_clock_info_page(mdev);
162 	write_sequnlock_irqrestore(&clock->lock, flags);
163 	schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
164 }
165 
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)166 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
167 {
168 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
169 	u64 ns = timespec64_to_ns(ts);
170 	struct mlx5_core_dev *mdev;
171 	unsigned long flags;
172 
173 	mdev = container_of(clock, struct mlx5_core_dev, clock);
174 	write_seqlock_irqsave(&clock->lock, flags);
175 	timecounter_init(&clock->tc, &clock->cycles, ns);
176 	mlx5_update_clock_info_page(mdev);
177 	write_sequnlock_irqrestore(&clock->lock, flags);
178 
179 	return 0;
180 }
181 
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)182 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
183 			     struct ptp_system_timestamp *sts)
184 {
185 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
186 	struct mlx5_core_dev *mdev;
187 	unsigned long flags;
188 	u64 cycles, ns;
189 
190 	mdev = container_of(clock, struct mlx5_core_dev, clock);
191 	write_seqlock_irqsave(&clock->lock, flags);
192 	cycles = mlx5_read_internal_timer(mdev, sts);
193 	ns = timecounter_cyc2time(&clock->tc, cycles);
194 	write_sequnlock_irqrestore(&clock->lock, flags);
195 
196 	*ts = ns_to_timespec64(ns);
197 
198 	return 0;
199 }
200 
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)201 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
202 {
203 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
204 	struct mlx5_core_dev *mdev;
205 	unsigned long flags;
206 
207 	mdev = container_of(clock, struct mlx5_core_dev, clock);
208 	write_seqlock_irqsave(&clock->lock, flags);
209 	timecounter_adjtime(&clock->tc, delta);
210 	mlx5_update_clock_info_page(mdev);
211 	write_sequnlock_irqrestore(&clock->lock, flags);
212 
213 	return 0;
214 }
215 
mlx5_ptp_adjfreq(struct ptp_clock_info * ptp,s32 delta)216 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
217 {
218 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
219 	struct mlx5_core_dev *mdev;
220 	unsigned long flags;
221 	int neg_adj = 0;
222 	u32 diff;
223 	u64 adj;
224 
225 
226 	if (delta < 0) {
227 		neg_adj = 1;
228 		delta = -delta;
229 	}
230 
231 	adj = clock->nominal_c_mult;
232 	adj *= delta;
233 	diff = div_u64(adj, 1000000000ULL);
234 
235 	mdev = container_of(clock, struct mlx5_core_dev, clock);
236 	write_seqlock_irqsave(&clock->lock, flags);
237 	timecounter_read(&clock->tc);
238 	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
239 				       clock->nominal_c_mult + diff;
240 	mlx5_update_clock_info_page(mdev);
241 	write_sequnlock_irqrestore(&clock->lock, flags);
242 
243 	return 0;
244 }
245 
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)246 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
247 				struct ptp_clock_request *rq,
248 				int on)
249 {
250 	struct mlx5_clock *clock =
251 			container_of(ptp, struct mlx5_clock, ptp_info);
252 	struct mlx5_core_dev *mdev =
253 			container_of(clock, struct mlx5_core_dev, clock);
254 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
255 	u32 field_select = 0;
256 	u8 pin_mode = 0;
257 	u8 pattern = 0;
258 	int pin = -1;
259 	int err = 0;
260 
261 	if (!MLX5_PPS_CAP(mdev))
262 		return -EOPNOTSUPP;
263 
264 	/* Reject requests with unsupported flags */
265 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
266 				PTP_RISING_EDGE |
267 				PTP_FALLING_EDGE |
268 				PTP_STRICT_FLAGS))
269 		return -EOPNOTSUPP;
270 
271 	/* Reject requests to enable time stamping on both edges. */
272 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
273 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
274 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
275 		return -EOPNOTSUPP;
276 
277 	if (rq->extts.index >= clock->ptp_info.n_pins)
278 		return -EINVAL;
279 
280 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
281 	if (pin < 0)
282 		return -EBUSY;
283 
284 	if (on) {
285 		pin_mode = MLX5_PIN_MODE_IN;
286 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
287 		field_select = MLX5_MTPPS_FS_PIN_MODE |
288 			       MLX5_MTPPS_FS_PATTERN |
289 			       MLX5_MTPPS_FS_ENABLE;
290 	} else {
291 		field_select = MLX5_MTPPS_FS_ENABLE;
292 	}
293 
294 	MLX5_SET(mtpps_reg, in, pin, pin);
295 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
296 	MLX5_SET(mtpps_reg, in, pattern, pattern);
297 	MLX5_SET(mtpps_reg, in, enable, on);
298 	MLX5_SET(mtpps_reg, in, field_select, field_select);
299 
300 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
301 	if (err)
302 		return err;
303 
304 	return mlx5_set_mtppse(mdev, pin, 0,
305 			       MLX5_EVENT_MODE_REPETETIVE & on);
306 }
307 
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)308 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
309 				 struct ptp_clock_request *rq,
310 				 int on)
311 {
312 	struct mlx5_clock *clock =
313 			container_of(ptp, struct mlx5_clock, ptp_info);
314 	struct mlx5_core_dev *mdev =
315 			container_of(clock, struct mlx5_core_dev, clock);
316 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
317 	u64 nsec_now, nsec_delta, time_stamp = 0;
318 	u64 cycles_now, cycles_delta;
319 	struct timespec64 ts;
320 	unsigned long flags;
321 	u32 field_select = 0;
322 	u8 pin_mode = 0;
323 	u8 pattern = 0;
324 	int pin = -1;
325 	int err = 0;
326 	s64 ns;
327 
328 	if (!MLX5_PPS_CAP(mdev))
329 		return -EOPNOTSUPP;
330 
331 	/* Reject requests with unsupported flags */
332 	if (rq->perout.flags)
333 		return -EOPNOTSUPP;
334 
335 	if (rq->perout.index >= clock->ptp_info.n_pins)
336 		return -EINVAL;
337 
338 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
339 			   rq->perout.index);
340 	if (pin < 0)
341 		return -EBUSY;
342 
343 	if (on) {
344 		pin_mode = MLX5_PIN_MODE_OUT;
345 		pattern = MLX5_OUT_PATTERN_PERIODIC;
346 		ts.tv_sec = rq->perout.period.sec;
347 		ts.tv_nsec = rq->perout.period.nsec;
348 		ns = timespec64_to_ns(&ts);
349 
350 		if ((ns >> 1) != 500000000LL)
351 			return -EINVAL;
352 
353 		ts.tv_sec = rq->perout.start.sec;
354 		ts.tv_nsec = rq->perout.start.nsec;
355 		ns = timespec64_to_ns(&ts);
356 		cycles_now = mlx5_read_internal_timer(mdev, NULL);
357 		write_seqlock_irqsave(&clock->lock, flags);
358 		nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
359 		nsec_delta = ns - nsec_now;
360 		cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
361 					 clock->cycles.mult);
362 		write_sequnlock_irqrestore(&clock->lock, flags);
363 		time_stamp = cycles_now + cycles_delta;
364 		field_select = MLX5_MTPPS_FS_PIN_MODE |
365 			       MLX5_MTPPS_FS_PATTERN |
366 			       MLX5_MTPPS_FS_ENABLE |
367 			       MLX5_MTPPS_FS_TIME_STAMP;
368 	} else {
369 		field_select = MLX5_MTPPS_FS_ENABLE;
370 	}
371 
372 	MLX5_SET(mtpps_reg, in, pin, pin);
373 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
374 	MLX5_SET(mtpps_reg, in, pattern, pattern);
375 	MLX5_SET(mtpps_reg, in, enable, on);
376 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
377 	MLX5_SET(mtpps_reg, in, field_select, field_select);
378 
379 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
380 	if (err)
381 		return err;
382 
383 	return mlx5_set_mtppse(mdev, pin, 0,
384 			       MLX5_EVENT_MODE_REPETETIVE & on);
385 }
386 
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)387 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
388 			      struct ptp_clock_request *rq,
389 			      int on)
390 {
391 	struct mlx5_clock *clock =
392 			container_of(ptp, struct mlx5_clock, ptp_info);
393 
394 	clock->pps_info.enabled = !!on;
395 	return 0;
396 }
397 
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)398 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
399 			   struct ptp_clock_request *rq,
400 			   int on)
401 {
402 	switch (rq->type) {
403 	case PTP_CLK_REQ_EXTTS:
404 		return mlx5_extts_configure(ptp, rq, on);
405 	case PTP_CLK_REQ_PEROUT:
406 		return mlx5_perout_configure(ptp, rq, on);
407 	case PTP_CLK_REQ_PPS:
408 		return mlx5_pps_configure(ptp, rq, on);
409 	default:
410 		return -EOPNOTSUPP;
411 	}
412 	return 0;
413 }
414 
415 enum {
416 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
417 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
418 };
419 
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)420 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
421 			   enum ptp_pin_function func, unsigned int chan)
422 {
423 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
424 						ptp_info);
425 
426 	switch (func) {
427 	case PTP_PF_NONE:
428 		return 0;
429 	case PTP_PF_EXTTS:
430 		return !(clock->pps_info.pin_caps[pin] &
431 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
432 	case PTP_PF_PEROUT:
433 		return !(clock->pps_info.pin_caps[pin] &
434 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
435 	default:
436 		return -EOPNOTSUPP;
437 	}
438 }
439 
440 static const struct ptp_clock_info mlx5_ptp_clock_info = {
441 	.owner		= THIS_MODULE,
442 	.name		= "mlx5_ptp",
443 	.max_adj	= 100000000,
444 	.n_alarm	= 0,
445 	.n_ext_ts	= 0,
446 	.n_per_out	= 0,
447 	.n_pins		= 0,
448 	.pps		= 0,
449 	.adjfreq	= mlx5_ptp_adjfreq,
450 	.adjtime	= mlx5_ptp_adjtime,
451 	.gettimex64	= mlx5_ptp_gettimex,
452 	.settime64	= mlx5_ptp_settime,
453 	.enable		= NULL,
454 	.verify		= NULL,
455 };
456 
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)457 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
458 				     u32 *mtpps, u32 mtpps_size)
459 {
460 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
461 
462 	MLX5_SET(mtpps_reg, in, pin, pin);
463 
464 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
465 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
466 }
467 
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)468 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
469 {
470 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
471 
472 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
473 	u8 mode;
474 	int err;
475 
476 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
477 	if (err || !MLX5_GET(mtpps_reg, out, enable))
478 		return PTP_PF_NONE;
479 
480 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
481 
482 	if (mode == MLX5_PIN_MODE_IN)
483 		return PTP_PF_EXTTS;
484 	else if (mode == MLX5_PIN_MODE_OUT)
485 		return PTP_PF_PEROUT;
486 
487 	return PTP_PF_NONE;
488 }
489 
mlx5_init_pin_config(struct mlx5_clock * clock)490 static int mlx5_init_pin_config(struct mlx5_clock *clock)
491 {
492 	int i;
493 
494 	clock->ptp_info.pin_config =
495 			kcalloc(clock->ptp_info.n_pins,
496 				sizeof(*clock->ptp_info.pin_config),
497 				GFP_KERNEL);
498 	if (!clock->ptp_info.pin_config)
499 		return -ENOMEM;
500 	clock->ptp_info.enable = mlx5_ptp_enable;
501 	clock->ptp_info.verify = mlx5_ptp_verify;
502 	clock->ptp_info.pps = 1;
503 
504 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
505 		snprintf(clock->ptp_info.pin_config[i].name,
506 			 sizeof(clock->ptp_info.pin_config[i].name),
507 			 "mlx5_pps%d", i);
508 		clock->ptp_info.pin_config[i].index = i;
509 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
510 		clock->ptp_info.pin_config[i].chan = 0;
511 	}
512 
513 	return 0;
514 }
515 
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)516 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
517 {
518 	struct mlx5_clock *clock = &mdev->clock;
519 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
520 
521 	mlx5_query_mtpps(mdev, out, sizeof(out));
522 
523 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
524 					  cap_number_of_pps_pins);
525 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
526 					    cap_max_num_of_pps_in_pins);
527 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
528 					     cap_max_num_of_pps_out_pins);
529 
530 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
531 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
532 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
533 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
534 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
535 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
536 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
537 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
538 }
539 
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)540 static int mlx5_pps_event(struct notifier_block *nb,
541 			  unsigned long type, void *data)
542 {
543 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
544 	struct ptp_clock_event ptp_event;
545 	u64 cycles_now, cycles_delta;
546 	u64 nsec_now, nsec_delta, ns;
547 	struct mlx5_eqe *eqe = data;
548 	int pin = eqe->data.pps.pin;
549 	struct mlx5_core_dev *mdev;
550 	struct timespec64 ts;
551 	unsigned long flags;
552 
553 	mdev = container_of(clock, struct mlx5_core_dev, clock);
554 
555 	switch (clock->ptp_info.pin_config[pin].func) {
556 	case PTP_PF_EXTTS:
557 		ptp_event.index = pin;
558 		ptp_event.timestamp =
559 			mlx5_timecounter_cyc2time(clock,
560 						  be64_to_cpu(eqe->data.pps.time_stamp));
561 		if (clock->pps_info.enabled) {
562 			ptp_event.type = PTP_CLOCK_PPSUSR;
563 			ptp_event.pps_times.ts_real =
564 					ns_to_timespec64(ptp_event.timestamp);
565 		} else {
566 			ptp_event.type = PTP_CLOCK_EXTTS;
567 		}
568 		/* TODOL clock->ptp can be NULL if ptp_clock_register failes */
569 		ptp_clock_event(clock->ptp, &ptp_event);
570 		break;
571 	case PTP_PF_PEROUT:
572 		mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
573 		cycles_now = mlx5_read_internal_timer(mdev, NULL);
574 		ts.tv_sec += 1;
575 		ts.tv_nsec = 0;
576 		ns = timespec64_to_ns(&ts);
577 		write_seqlock_irqsave(&clock->lock, flags);
578 		nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
579 		nsec_delta = ns - nsec_now;
580 		cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
581 					 clock->cycles.mult);
582 		clock->pps_info.start[pin] = cycles_now + cycles_delta;
583 		write_sequnlock_irqrestore(&clock->lock, flags);
584 		schedule_work(&clock->pps_info.out_work);
585 		break;
586 	default:
587 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
588 			      clock->ptp_info.pin_config[pin].func);
589 	}
590 
591 	return NOTIFY_OK;
592 }
593 
mlx5_init_clock(struct mlx5_core_dev * mdev)594 void mlx5_init_clock(struct mlx5_core_dev *mdev)
595 {
596 	struct mlx5_clock *clock = &mdev->clock;
597 	u64 overflow_cycles;
598 	u64 ns;
599 	u64 frac = 0;
600 	u32 dev_freq;
601 
602 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
603 	if (!dev_freq) {
604 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
605 		return;
606 	}
607 	seqlock_init(&clock->lock);
608 	clock->cycles.read = read_internal_timer;
609 	clock->cycles.shift = MLX5_CYCLES_SHIFT;
610 	clock->cycles.mult = clocksource_khz2mult(dev_freq,
611 						  clock->cycles.shift);
612 	clock->nominal_c_mult = clock->cycles.mult;
613 	clock->cycles.mask = CLOCKSOURCE_MASK(41);
614 
615 	timecounter_init(&clock->tc, &clock->cycles,
616 			 ktime_to_ns(ktime_get_real()));
617 
618 	/* Calculate period in seconds to call the overflow watchdog - to make
619 	 * sure counter is checked at least twice every wrap around.
620 	 * The period is calculated as the minimum between max HW cycles count
621 	 * (The clock source mask) and max amount of cycles that can be
622 	 * multiplied by clock multiplier where the result doesn't exceed
623 	 * 64bits.
624 	 */
625 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
626 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
627 
628 	ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
629 				 frac, &frac);
630 	do_div(ns, NSEC_PER_SEC / HZ);
631 	clock->overflow_period = ns;
632 
633 	mdev->clock_info =
634 		(struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
635 	if (mdev->clock_info) {
636 		mdev->clock_info->nsec = clock->tc.nsec;
637 		mdev->clock_info->cycles = clock->tc.cycle_last;
638 		mdev->clock_info->mask = clock->cycles.mask;
639 		mdev->clock_info->mult = clock->nominal_c_mult;
640 		mdev->clock_info->shift = clock->cycles.shift;
641 		mdev->clock_info->frac = clock->tc.frac;
642 		mdev->clock_info->overflow_period = clock->overflow_period;
643 	}
644 
645 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
646 	INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
647 	if (clock->overflow_period)
648 		schedule_delayed_work(&clock->overflow_work, 0);
649 	else
650 		mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
651 
652 	/* Configure the PHC */
653 	clock->ptp_info = mlx5_ptp_clock_info;
654 
655 	/* Initialize 1PPS data structures */
656 	if (MLX5_PPS_CAP(mdev))
657 		mlx5_get_pps_caps(mdev);
658 	if (clock->ptp_info.n_pins)
659 		mlx5_init_pin_config(clock);
660 
661 	clock->ptp = ptp_clock_register(&clock->ptp_info,
662 					&mdev->pdev->dev);
663 	if (IS_ERR(clock->ptp)) {
664 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
665 			       PTR_ERR(clock->ptp));
666 		clock->ptp = NULL;
667 	}
668 
669 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
670 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
671 }
672 
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)673 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
674 {
675 	struct mlx5_clock *clock = &mdev->clock;
676 
677 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
678 		return;
679 
680 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
681 	if (clock->ptp) {
682 		ptp_clock_unregister(clock->ptp);
683 		clock->ptp = NULL;
684 	}
685 
686 	cancel_work_sync(&clock->pps_info.out_work);
687 	cancel_delayed_work_sync(&clock->overflow_work);
688 
689 	if (mdev->clock_info) {
690 		free_page((unsigned long)mdev->clock_info);
691 		mdev->clock_info = NULL;
692 	}
693 
694 	kfree(clock->ptp_info.pin_config);
695 }
696