xref: /linux/drivers/net/ethernet/ti/icssg/icss_iep.c (revision 63467137ecc0ff6f804d53903ad87a2f0397a18b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4  *
5  * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/timekeeping.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_irq.h>
20 #include <linux/workqueue.h>
21 
22 #include "icss_iep.h"
23 
24 #define IEP_MAX_DEF_INC		0xf
25 #define IEP_MAX_COMPEN_INC		0xfff
26 #define IEP_MAX_COMPEN_COUNT	0xffffff
27 
28 #define IEP_GLOBAL_CFG_CNT_ENABLE	BIT(0)
29 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK		GENMASK(7, 4)
30 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT	4
31 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK		GENMASK(19, 8)
32 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT		8
33 
34 #define IEP_GLOBAL_STATUS_CNT_OVF	BIT(0)
35 
36 #define IEP_CMP_CFG_SHADOW_EN		BIT(17)
37 #define IEP_CMP_CFG_CMP0_RST_CNT_EN	BIT(0)
38 #define IEP_CMP_CFG_CMP_EN(cmp)		(GENMASK(16, 1) & (1 << ((cmp) + 1)))
39 
40 #define IEP_CMP_STATUS(cmp)		(1 << (cmp))
41 
42 #define IEP_SYNC_CTRL_SYNC_EN		BIT(0)
43 #define IEP_SYNC_CTRL_SYNC_N_EN(n)	(GENMASK(2, 1) & (BIT(1) << (n)))
44 
45 #define IEP_MIN_CMP	0
46 #define IEP_MAX_CMP	15
47 
48 #define ICSS_IEP_64BIT_COUNTER_SUPPORT		BIT(0)
49 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT	BIT(1)
50 #define ICSS_IEP_SHADOW_MODE_SUPPORT		BIT(2)
51 
52 #define LATCH_INDEX(ts_index)			((ts_index) + 6)
53 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n)	BIT(LATCH_INDEX(n))
54 #define IEP_CAP_CFG_CAP_ASYNC_EN(n)		BIT(LATCH_INDEX(n) + 10)
55 
56 /**
57  * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
58  * @iep: Pointer to structure representing IEP.
59  *
60  * Return: upper 32 bit IEP counter
61  */
icss_iep_get_count_hi(struct icss_iep * iep)62 int icss_iep_get_count_hi(struct icss_iep *iep)
63 {
64 	u32 val = 0;
65 
66 	if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
67 		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
68 
69 	return val;
70 }
71 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
72 
73 /**
74  * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
75  * @iep: Pointer to structure representing IEP.
76  *
77  * Return: lower 32 bit IEP counter
78  */
icss_iep_get_count_low(struct icss_iep * iep)79 int icss_iep_get_count_low(struct icss_iep *iep)
80 {
81 	u32 val = 0;
82 
83 	if (iep)
84 		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
85 
86 	return val;
87 }
88 EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
89 
90 /**
91  * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
92  * @iep: Pointer to structure representing IEP.
93  *
94  * Return: PTP clock index, -1 if not registered
95  */
icss_iep_get_ptp_clock_idx(struct icss_iep * iep)96 int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
97 {
98 	if (!iep || !iep->ptp_clock)
99 		return -1;
100 	return ptp_clock_index(iep->ptp_clock);
101 }
102 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
103 
icss_iep_set_counter(struct icss_iep * iep,u64 ns)104 static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
105 {
106 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
107 		writel(upper_32_bits(ns), iep->base +
108 		       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
109 	writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
110 }
111 
112 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
113 
114 /**
115  * icss_iep_settime() - Set time of the PTP clock using IEP driver
116  * @iep: Pointer to structure representing IEP.
117  * @ns: Time to be set in nanoseconds
118  *
119  * This API uses writel() instead of regmap_write() for write operations as
120  * regmap_write() is too slow and this API is time sensitive.
121  */
icss_iep_settime(struct icss_iep * iep,u64 ns)122 static void icss_iep_settime(struct icss_iep *iep, u64 ns)
123 {
124 	if (iep->ops && iep->ops->settime) {
125 		iep->ops->settime(iep->clockops_data, ns);
126 		return;
127 	}
128 
129 	if (iep->pps_enabled || iep->perout_enabled)
130 		writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
131 
132 	icss_iep_set_counter(iep, ns);
133 
134 	if (iep->pps_enabled || iep->perout_enabled) {
135 		icss_iep_update_to_next_boundary(iep, ns);
136 		writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
137 		       iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
138 	}
139 }
140 
141 /**
142  * icss_iep_gettime() - Get time of the PTP clock using IEP driver
143  * @iep: Pointer to structure representing IEP.
144  * @sts: Pointer to structure representing PTP system timestamp.
145  *
146  * This API uses readl() instead of regmap_read() for read operations as
147  * regmap_read() is too slow and this API is time sensitive.
148  *
149  * Return: The current timestamp of the PTP clock using IEP driver
150  */
icss_iep_gettime(struct icss_iep * iep,struct ptp_system_timestamp * sts)151 static u64 icss_iep_gettime(struct icss_iep *iep,
152 			    struct ptp_system_timestamp *sts)
153 {
154 	u32 ts_hi = 0, ts_lo;
155 	unsigned long flags;
156 
157 	if (iep->ops && iep->ops->gettime)
158 		return iep->ops->gettime(iep->clockops_data, sts);
159 
160 	/* use local_irq_x() to make it work for both RT/non-RT */
161 	local_irq_save(flags);
162 
163 	/* no need to play with hi-lo, hi is latched when lo is read */
164 	ptp_read_system_prets(sts);
165 	ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
166 	ptp_read_system_postts(sts);
167 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
168 		ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
169 
170 	local_irq_restore(flags);
171 
172 	return (u64)ts_lo | (u64)ts_hi << 32;
173 }
174 
icss_iep_enable(struct icss_iep * iep)175 static void icss_iep_enable(struct icss_iep *iep)
176 {
177 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
178 			   IEP_GLOBAL_CFG_CNT_ENABLE,
179 			   IEP_GLOBAL_CFG_CNT_ENABLE);
180 }
181 
icss_iep_disable(struct icss_iep * iep)182 static void icss_iep_disable(struct icss_iep *iep)
183 {
184 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
185 			   IEP_GLOBAL_CFG_CNT_ENABLE,
186 			   0);
187 }
188 
icss_iep_enable_shadow_mode(struct icss_iep * iep)189 static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
190 {
191 	u32 cycle_time;
192 	int cmp;
193 
194 	cycle_time = iep->cycle_time_ns - iep->def_inc;
195 
196 	icss_iep_disable(iep);
197 
198 	/* disable shadow mode */
199 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
200 			   IEP_CMP_CFG_SHADOW_EN, 0);
201 
202 	/* enable shadow mode */
203 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
204 			   IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
205 
206 	/* clear counters */
207 	icss_iep_set_counter(iep, 0);
208 
209 	/* clear overflow status */
210 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
211 			   IEP_GLOBAL_STATUS_CNT_OVF,
212 			   IEP_GLOBAL_STATUS_CNT_OVF);
213 
214 	/* clear compare status */
215 	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
216 		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
217 				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
218 
219 		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
220 				   IEP_CMP_CFG_CMP_EN(cmp), 0);
221 	}
222 
223 	/* enable reset counter on CMP0 event */
224 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
225 			   IEP_CMP_CFG_CMP0_RST_CNT_EN,
226 			   IEP_CMP_CFG_CMP0_RST_CNT_EN);
227 	/* enable compare */
228 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
229 			   IEP_CMP_CFG_CMP_EN(0),
230 			   IEP_CMP_CFG_CMP_EN(0));
231 
232 	/* set CMP0 value to cycle time */
233 	regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
234 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
235 		regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
236 
237 	icss_iep_set_counter(iep, 0);
238 	icss_iep_enable(iep);
239 }
240 
icss_iep_set_default_inc(struct icss_iep * iep,u8 def_inc)241 static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
242 {
243 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
244 			   IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
245 			   def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
246 }
247 
icss_iep_set_compensation_inc(struct icss_iep * iep,u16 compen_inc)248 static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
249 {
250 	struct device *dev = regmap_get_device(iep->map);
251 
252 	if (compen_inc > IEP_MAX_COMPEN_INC) {
253 		dev_err(dev, "%s: too high compensation inc %d\n",
254 			__func__, compen_inc);
255 		compen_inc = IEP_MAX_COMPEN_INC;
256 	}
257 
258 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
259 			   IEP_GLOBAL_CFG_COMPEN_INC_MASK,
260 			   compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
261 }
262 
icss_iep_set_compensation_count(struct icss_iep * iep,u32 compen_count)263 static void icss_iep_set_compensation_count(struct icss_iep *iep,
264 					    u32 compen_count)
265 {
266 	struct device *dev = regmap_get_device(iep->map);
267 
268 	if (compen_count > IEP_MAX_COMPEN_COUNT) {
269 		dev_err(dev, "%s: too high compensation count %d\n",
270 			__func__, compen_count);
271 		compen_count = IEP_MAX_COMPEN_COUNT;
272 	}
273 
274 	regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
275 }
276 
icss_iep_set_slow_compensation_count(struct icss_iep * iep,u32 compen_count)277 static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
278 						 u32 compen_count)
279 {
280 	regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
281 }
282 
283 /* PTP PHC operations */
icss_iep_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)284 static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
285 {
286 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
287 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
288 	u32 cyc_count;
289 	u16 cmp_inc;
290 
291 	mutex_lock(&iep->ptp_clk_mutex);
292 
293 	/* ppb is amount of frequency we want to adjust in 1GHz (billion)
294 	 * e.g. 100ppb means we need to speed up clock by 100Hz
295 	 * i.e. at end of 1 second (1 billion ns) clock time, we should be
296 	 * counting 100 more ns.
297 	 * We use IEP slow compensation to achieve continuous freq. adjustment.
298 	 * There are 2 parts. Cycle time and adjustment per cycle.
299 	 * Simplest case would be 1 sec Cycle time. Then adjustment
300 	 * pre cycle would be (def_inc + ppb) value.
301 	 * Cycle time will have to be chosen based on how worse the ppb is.
302 	 * e.g. smaller the ppb, cycle time has to be large.
303 	 * The minimum adjustment we can do is +-1ns per cycle so let's
304 	 * reduce the cycle time to get 1ns per cycle adjustment.
305 	 *	1ppb = 1sec cycle time & 1ns adjust
306 	 *	1000ppb = 1/1000 cycle time & 1ns adjust per cycle
307 	 */
308 
309 	if (iep->cycle_time_ns)
310 		iep->slow_cmp_inc = iep->clk_tick_time;	/* 4ns adj per cycle */
311 	else
312 		iep->slow_cmp_inc = 1;	/* 1ns adjust per cycle */
313 
314 	if (ppb < 0) {
315 		iep->slow_cmp_inc = -iep->slow_cmp_inc;
316 		ppb = -ppb;
317 	}
318 
319 	cyc_count = NSEC_PER_SEC;		/* 1s cycle time @1GHz */
320 	cyc_count /= ppb;		/* cycle time per ppb */
321 
322 	/* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
323 	if (!iep->cycle_time_ns)
324 		cyc_count /= iep->clk_tick_time;
325 	iep->slow_cmp_count = cyc_count;
326 
327 	/* iep->clk_tick_time is def_inc */
328 	cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
329 	icss_iep_set_compensation_inc(iep, cmp_inc);
330 	icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
331 
332 	mutex_unlock(&iep->ptp_clk_mutex);
333 
334 	return 0;
335 }
336 
icss_iep_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)337 static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
338 {
339 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
340 	s64 ns;
341 
342 	mutex_lock(&iep->ptp_clk_mutex);
343 	if (iep->ops && iep->ops->adjtime) {
344 		iep->ops->adjtime(iep->clockops_data, delta);
345 	} else {
346 		ns = icss_iep_gettime(iep, NULL);
347 		ns += delta;
348 		icss_iep_settime(iep, ns);
349 	}
350 	mutex_unlock(&iep->ptp_clk_mutex);
351 
352 	return 0;
353 }
354 
icss_iep_ptp_gettimeex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)355 static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
356 				  struct timespec64 *ts,
357 				  struct ptp_system_timestamp *sts)
358 {
359 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
360 	u64 ns;
361 
362 	mutex_lock(&iep->ptp_clk_mutex);
363 	ns = icss_iep_gettime(iep, sts);
364 	*ts = ns_to_timespec64(ns);
365 	mutex_unlock(&iep->ptp_clk_mutex);
366 
367 	return 0;
368 }
369 
icss_iep_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)370 static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
371 				const struct timespec64 *ts)
372 {
373 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
374 	u64 ns;
375 
376 	mutex_lock(&iep->ptp_clk_mutex);
377 	ns = timespec64_to_ns(ts);
378 	icss_iep_settime(iep, ns);
379 	mutex_unlock(&iep->ptp_clk_mutex);
380 
381 	return 0;
382 }
383 
icss_iep_update_to_next_boundary(struct icss_iep * iep,u64 start_ns)384 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
385 {
386 	u64 ns, p_ns;
387 	u32 offset;
388 
389 	ns = icss_iep_gettime(iep, NULL);
390 	if (start_ns < ns)
391 		start_ns = ns;
392 	p_ns = iep->period;
393 	/* Round up to next period boundary */
394 	start_ns += p_ns - 1;
395 	offset = do_div(start_ns, p_ns);
396 	start_ns = start_ns * p_ns;
397 	/* If it is too close to update, shift to next boundary */
398 	if (p_ns - offset < 10)
399 		start_ns += p_ns;
400 
401 	regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
402 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
403 		regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
404 }
405 
icss_iep_perout_enable_hw(struct icss_iep * iep,struct ptp_perout_request * req,int on)406 static int icss_iep_perout_enable_hw(struct icss_iep *iep,
407 				     struct ptp_perout_request *req, int on)
408 {
409 	struct timespec64 ts;
410 	u64 ns_start;
411 	u64 ns_width;
412 	int ret;
413 	u64 cmp;
414 
415 	if (!on) {
416 		/* Disable CMP 1 */
417 		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
418 				   IEP_CMP_CFG_CMP_EN(1), 0);
419 
420 		/* clear CMP regs */
421 		regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
422 		if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
423 			regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
424 
425 		/* Disable sync */
426 		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
427 
428 		return 0;
429 	}
430 
431 	/* Calculate width of the signal for PPS/PEROUT handling */
432 	ts.tv_sec = req->on.sec;
433 	ts.tv_nsec = req->on.nsec;
434 	ns_width = timespec64_to_ns(&ts);
435 
436 	if (req->flags & PTP_PEROUT_PHASE) {
437 		ts.tv_sec = req->phase.sec;
438 		ts.tv_nsec = req->phase.nsec;
439 		ns_start = timespec64_to_ns(&ts);
440 	} else {
441 		ns_start = 0;
442 	}
443 
444 	if (iep->ops && iep->ops->perout_enable) {
445 		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
446 		if (ret)
447 			return ret;
448 
449 		/* Configure CMP */
450 		regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
451 		if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
452 			regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
453 		/* Configure SYNC, based on req on width */
454 		regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
455 			     div_u64(ns_width, iep->def_inc));
456 		regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
457 		regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
458 			     div_u64(ns_start, iep->def_inc));
459 		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
460 		/* Enable CMP 1 */
461 		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
462 				   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
463 	} else {
464 		u64 start_ns;
465 
466 		iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
467 				req->period.nsec;
468 		start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
469 				+ req->period.nsec;
470 		icss_iep_update_to_next_boundary(iep, start_ns);
471 
472 		regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
473 			     div_u64(ns_width, iep->def_inc));
474 		regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
475 			     div_u64(ns_start, iep->def_inc));
476 		/* Enable Sync in single shot mode  */
477 		regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
478 			     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
479 		/* Enable CMP 1 */
480 		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
481 				   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
482 	}
483 
484 	return 0;
485 }
486 
icss_iep_perout_enable(struct icss_iep * iep,struct ptp_perout_request * req,int on)487 static int icss_iep_perout_enable(struct icss_iep *iep,
488 				  struct ptp_perout_request *req, int on)
489 {
490 	int ret = 0;
491 
492 	if (!on)
493 		goto disable;
494 
495 	/* Reject requests with unsupported flags */
496 	if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
497 			  PTP_PEROUT_PHASE))
498 		return -EOPNOTSUPP;
499 
500 	/* Set default "on" time (1ms) for the signal if not passed by the app */
501 	if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
502 		req->on.sec = 0;
503 		req->on.nsec = NSEC_PER_MSEC;
504 	}
505 
506 disable:
507 	mutex_lock(&iep->ptp_clk_mutex);
508 
509 	if (iep->pps_enabled) {
510 		ret = -EBUSY;
511 		goto exit;
512 	}
513 
514 	if (iep->perout_enabled == !!on)
515 		goto exit;
516 
517 	ret = icss_iep_perout_enable_hw(iep, req, on);
518 	if (!ret)
519 		iep->perout_enabled = !!on;
520 
521 exit:
522 	mutex_unlock(&iep->ptp_clk_mutex);
523 
524 	return ret;
525 }
526 
icss_iep_cap_cmp_work(struct work_struct * work)527 static void icss_iep_cap_cmp_work(struct work_struct *work)
528 {
529 	struct icss_iep *iep = container_of(work, struct icss_iep, work);
530 	const u32 *reg_offs = iep->plat_data->reg_offs;
531 	struct ptp_clock_event pevent;
532 	unsigned int val;
533 	u64 ns, ns_next;
534 
535 	mutex_lock(&iep->ptp_clk_mutex);
536 
537 	ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
538 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
539 		val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
540 		ns |= (u64)val << 32;
541 	}
542 	/* set next event */
543 	ns_next = ns + iep->period;
544 	writel(lower_32_bits(ns_next),
545 	       iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
546 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
547 		writel(upper_32_bits(ns_next),
548 		       iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
549 
550 	pevent.pps_times.ts_real = ns_to_timespec64(ns);
551 	pevent.type = PTP_CLOCK_PPSUSR;
552 	pevent.index = 0;
553 	ptp_clock_event(iep->ptp_clock, &pevent);
554 	dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
555 
556 	mutex_unlock(&iep->ptp_clk_mutex);
557 }
558 
icss_iep_cap_cmp_irq(int irq,void * dev_id)559 static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
560 {
561 	struct icss_iep *iep = (struct icss_iep *)dev_id;
562 	const u32 *reg_offs = iep->plat_data->reg_offs;
563 	unsigned int val;
564 
565 	val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
566 	/* The driver only enables CMP1 */
567 	if (val & BIT(1)) {
568 		/* Clear the event */
569 		writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
570 		if (iep->pps_enabled || iep->perout_enabled)
571 			schedule_work(&iep->work);
572 		return IRQ_HANDLED;
573 	}
574 
575 	return IRQ_NONE;
576 }
577 
icss_iep_pps_enable(struct icss_iep * iep,int on)578 static int icss_iep_pps_enable(struct icss_iep *iep, int on)
579 {
580 	struct ptp_clock_request rq;
581 	struct timespec64 ts;
582 	int ret = 0;
583 	u64 ns;
584 
585 	mutex_lock(&iep->ptp_clk_mutex);
586 
587 	if (iep->perout_enabled) {
588 		ret = -EBUSY;
589 		goto exit;
590 	}
591 
592 	if (iep->pps_enabled == !!on)
593 		goto exit;
594 
595 	rq.perout.index = 0;
596 	if (on) {
597 		ns = icss_iep_gettime(iep, NULL);
598 		ts = ns_to_timespec64(ns);
599 		rq.perout.flags = 0;
600 		rq.perout.period.sec = 1;
601 		rq.perout.period.nsec = 0;
602 		rq.perout.start.sec = ts.tv_sec + 2;
603 		rq.perout.start.nsec = 0;
604 		rq.perout.on.sec = 0;
605 		rq.perout.on.nsec = NSEC_PER_MSEC;
606 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
607 	} else {
608 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
609 		if (iep->cap_cmp_irq)
610 			cancel_work_sync(&iep->work);
611 	}
612 
613 	if (!ret)
614 		iep->pps_enabled = !!on;
615 
616 exit:
617 	mutex_unlock(&iep->ptp_clk_mutex);
618 
619 	return ret;
620 }
621 
icss_iep_extts_enable(struct icss_iep * iep,u32 index,int on)622 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
623 {
624 	u32 val, cap;
625 	int ret = 0;
626 
627 	mutex_lock(&iep->ptp_clk_mutex);
628 
629 	if (iep->ops && iep->ops->extts_enable) {
630 		ret = iep->ops->extts_enable(iep->clockops_data, index, on);
631 		goto exit;
632 	}
633 
634 	if (((iep->latch_enable & BIT(index)) >> index) == on)
635 		goto exit;
636 
637 	regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
638 	cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
639 	if (on) {
640 		val |= cap;
641 		iep->latch_enable |= BIT(index);
642 	} else {
643 		val &= ~cap;
644 		iep->latch_enable &= ~BIT(index);
645 	}
646 	regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
647 
648 exit:
649 	mutex_unlock(&iep->ptp_clk_mutex);
650 
651 	return ret;
652 }
653 
icss_iep_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)654 static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
655 			       struct ptp_clock_request *rq, int on)
656 {
657 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
658 
659 	switch (rq->type) {
660 	case PTP_CLK_REQ_PEROUT:
661 		return icss_iep_perout_enable(iep, &rq->perout, on);
662 	case PTP_CLK_REQ_PPS:
663 		return icss_iep_pps_enable(iep, on);
664 	case PTP_CLK_REQ_EXTTS:
665 		return icss_iep_extts_enable(iep, rq->extts.index, on);
666 	default:
667 		break;
668 	}
669 
670 	return -EOPNOTSUPP;
671 }
672 
673 static struct ptp_clock_info icss_iep_ptp_info = {
674 	.owner		= THIS_MODULE,
675 	.name		= "ICSS IEP timer",
676 	.max_adj	= 10000000,
677 	.adjfine	= icss_iep_ptp_adjfine,
678 	.adjtime	= icss_iep_ptp_adjtime,
679 	.gettimex64	= icss_iep_ptp_gettimeex,
680 	.settime64	= icss_iep_ptp_settime,
681 	.enable		= icss_iep_ptp_enable,
682 };
683 
icss_iep_get_idx(struct device_node * np,int idx)684 struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
685 {
686 	struct platform_device *pdev;
687 	struct device_node *iep_np;
688 	struct icss_iep *iep;
689 	int ret;
690 
691 	iep_np = of_parse_phandle(np, "ti,iep", idx);
692 	if (!iep_np)
693 		return ERR_PTR(-ENODEV);
694 
695 	if (!of_device_is_available(iep_np)) {
696 		of_node_put(iep_np);
697 		return ERR_PTR(-ENODEV);
698 	}
699 
700 	pdev = of_find_device_by_node(iep_np);
701 	of_node_put(iep_np);
702 
703 	if (!pdev)
704 		/* probably IEP not yet probed */
705 		return ERR_PTR(-EPROBE_DEFER);
706 
707 	iep = platform_get_drvdata(pdev);
708 	if (!iep) {
709 		ret = -EPROBE_DEFER;
710 		goto err_put_pdev;
711 	}
712 
713 	device_lock(iep->dev);
714 	if (iep->client_np) {
715 		device_unlock(iep->dev);
716 		dev_err(iep->dev, "IEP is already acquired by %s",
717 			iep->client_np->name);
718 		ret = -EBUSY;
719 		goto err_put_pdev;
720 	}
721 	iep->client_np = np;
722 	device_unlock(iep->dev);
723 
724 	return iep;
725 
726 err_put_pdev:
727 	put_device(&pdev->dev);
728 
729 	return ERR_PTR(ret);
730 }
731 EXPORT_SYMBOL_GPL(icss_iep_get_idx);
732 
icss_iep_get(struct device_node * np)733 struct icss_iep *icss_iep_get(struct device_node *np)
734 {
735 	return icss_iep_get_idx(np, 0);
736 }
737 EXPORT_SYMBOL_GPL(icss_iep_get);
738 
icss_iep_put(struct icss_iep * iep)739 void icss_iep_put(struct icss_iep *iep)
740 {
741 	device_lock(iep->dev);
742 	iep->client_np = NULL;
743 	device_unlock(iep->dev);
744 	put_device(iep->dev);
745 }
746 EXPORT_SYMBOL_GPL(icss_iep_put);
747 
icss_iep_init_fw(struct icss_iep * iep)748 void icss_iep_init_fw(struct icss_iep *iep)
749 {
750 	/* start IEP for FW use in raw 64bit mode, no PTP support */
751 	iep->clk_tick_time = iep->def_inc;
752 	iep->cycle_time_ns = 0;
753 	iep->ops = NULL;
754 	iep->clockops_data = NULL;
755 	icss_iep_set_default_inc(iep, iep->def_inc);
756 	icss_iep_set_compensation_inc(iep, iep->def_inc);
757 	icss_iep_set_compensation_count(iep, 0);
758 	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
759 	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
760 	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
761 		icss_iep_set_slow_compensation_count(iep, 0);
762 
763 	icss_iep_enable(iep);
764 	icss_iep_settime(iep, 0);
765 }
766 EXPORT_SYMBOL_GPL(icss_iep_init_fw);
767 
icss_iep_exit_fw(struct icss_iep * iep)768 void icss_iep_exit_fw(struct icss_iep *iep)
769 {
770 	icss_iep_disable(iep);
771 }
772 EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
773 
icss_iep_init(struct icss_iep * iep,const struct icss_iep_clockops * clkops,void * clockops_data,u32 cycle_time_ns)774 int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
775 		  void *clockops_data, u32 cycle_time_ns)
776 {
777 	int ret = 0;
778 
779 	iep->cycle_time_ns = cycle_time_ns;
780 	iep->clk_tick_time = iep->def_inc;
781 	iep->ops = clkops;
782 	iep->clockops_data = clockops_data;
783 	icss_iep_set_default_inc(iep, iep->def_inc);
784 	icss_iep_set_compensation_inc(iep, iep->def_inc);
785 	icss_iep_set_compensation_count(iep, 0);
786 	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
787 	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
788 	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
789 		icss_iep_set_slow_compensation_count(iep, 0);
790 
791 	if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
792 	    !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
793 		goto skip_perout;
794 
795 	if (iep->ops && iep->ops->perout_enable) {
796 		iep->ptp_info.n_per_out = 1;
797 		iep->ptp_info.pps = 1;
798 	} else if (iep->cap_cmp_irq) {
799 		iep->ptp_info.pps = 1;
800 	}
801 
802 	if (iep->ops && iep->ops->extts_enable)
803 		iep->ptp_info.n_ext_ts = 2;
804 
805 skip_perout:
806 	if (cycle_time_ns)
807 		icss_iep_enable_shadow_mode(iep);
808 	else
809 		icss_iep_enable(iep);
810 	icss_iep_settime(iep, ktime_get_real_ns());
811 
812 	iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
813 	if (IS_ERR(iep->ptp_clock)) {
814 		ret = PTR_ERR(iep->ptp_clock);
815 		iep->ptp_clock = NULL;
816 		dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
817 	}
818 
819 	return ret;
820 }
821 EXPORT_SYMBOL_GPL(icss_iep_init);
822 
icss_iep_exit(struct icss_iep * iep)823 int icss_iep_exit(struct icss_iep *iep)
824 {
825 	if (iep->ptp_clock) {
826 		ptp_clock_unregister(iep->ptp_clock);
827 		iep->ptp_clock = NULL;
828 	}
829 	icss_iep_disable(iep);
830 
831 	if (iep->pps_enabled)
832 		icss_iep_pps_enable(iep, false);
833 	else if (iep->perout_enabled)
834 		icss_iep_perout_enable(iep, NULL, false);
835 
836 	return 0;
837 }
838 EXPORT_SYMBOL_GPL(icss_iep_exit);
839 
icss_iep_probe(struct platform_device * pdev)840 static int icss_iep_probe(struct platform_device *pdev)
841 {
842 	struct device *dev = &pdev->dev;
843 	struct icss_iep *iep;
844 	struct clk *iep_clk;
845 	int ret, irq;
846 
847 	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
848 	if (!iep)
849 		return -ENOMEM;
850 
851 	iep->dev = dev;
852 	iep->base = devm_platform_ioremap_resource(pdev, 0);
853 	if (IS_ERR(iep->base))
854 		return -ENODEV;
855 
856 	irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
857 	if (irq == -EPROBE_DEFER)
858 		return irq;
859 
860 	if (irq > 0) {
861 		ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
862 				       IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
863 		if (ret) {
864 			dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
865 				 ret);
866 		} else {
867 			iep->cap_cmp_irq = irq;
868 			INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
869 		}
870 	}
871 
872 	iep_clk = devm_clk_get(dev, NULL);
873 	if (IS_ERR(iep_clk))
874 		return PTR_ERR(iep_clk);
875 
876 	iep->refclk_freq = clk_get_rate(iep_clk);
877 
878 	iep->def_inc = NSEC_PER_SEC / iep->refclk_freq;	/* ns per clock tick */
879 	if (iep->def_inc > IEP_MAX_DEF_INC) {
880 		dev_err(dev, "Failed to set def_inc %d.  IEP_clock is too slow to be supported\n",
881 			iep->def_inc);
882 		return -EINVAL;
883 	}
884 
885 	iep->plat_data = device_get_match_data(dev);
886 	if (!iep->plat_data)
887 		return -EINVAL;
888 
889 	iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
890 	if (IS_ERR(iep->map)) {
891 		dev_err(dev, "Failed to create regmap for IEP %ld\n",
892 			PTR_ERR(iep->map));
893 		return PTR_ERR(iep->map);
894 	}
895 
896 	iep->ptp_info = icss_iep_ptp_info;
897 	mutex_init(&iep->ptp_clk_mutex);
898 	dev_set_drvdata(dev, iep);
899 	icss_iep_disable(iep);
900 
901 	return 0;
902 }
903 
am654_icss_iep_valid_reg(struct device * dev,unsigned int reg)904 static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
905 {
906 	switch (reg) {
907 	case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
908 		return true;
909 	default:
910 		return false;
911 	}
912 
913 	return false;
914 }
915 
icss_iep_regmap_write(void * context,unsigned int reg,unsigned int val)916 static int icss_iep_regmap_write(void *context, unsigned int reg,
917 				 unsigned int val)
918 {
919 	struct icss_iep *iep = context;
920 
921 	writel(val, iep->base + iep->plat_data->reg_offs[reg]);
922 
923 	return 0;
924 }
925 
icss_iep_regmap_read(void * context,unsigned int reg,unsigned int * val)926 static int icss_iep_regmap_read(void *context, unsigned int reg,
927 				unsigned int *val)
928 {
929 	struct icss_iep *iep = context;
930 
931 	*val = readl(iep->base + iep->plat_data->reg_offs[reg]);
932 
933 	return 0;
934 }
935 
936 static const struct regmap_config am654_icss_iep_regmap_config = {
937 	.name = "icss iep",
938 	.reg_stride = 1,
939 	.reg_write = icss_iep_regmap_write,
940 	.reg_read = icss_iep_regmap_read,
941 	.writeable_reg = am654_icss_iep_valid_reg,
942 	.readable_reg = am654_icss_iep_valid_reg,
943 	.fast_io = 1,
944 };
945 
946 static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
947 	.flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
948 		 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
949 		 ICSS_IEP_SHADOW_MODE_SUPPORT,
950 	.reg_offs = {
951 		[ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
952 		[ICSS_IEP_COMPEN_REG] = 0x08,
953 		[ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
954 		[ICSS_IEP_COUNT_REG0] = 0x10,
955 		[ICSS_IEP_COUNT_REG1] = 0x14,
956 		[ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
957 		[ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
958 
959 		[ICSS_IEP_CAP6_RISE_REG0] = 0x50,
960 		[ICSS_IEP_CAP6_RISE_REG1] = 0x54,
961 
962 		[ICSS_IEP_CAP7_RISE_REG0] = 0x60,
963 		[ICSS_IEP_CAP7_RISE_REG1] = 0x64,
964 
965 		[ICSS_IEP_CMP_CFG_REG] = 0x70,
966 		[ICSS_IEP_CMP_STAT_REG] = 0x74,
967 		[ICSS_IEP_CMP0_REG0] = 0x78,
968 		[ICSS_IEP_CMP0_REG1] = 0x7c,
969 		[ICSS_IEP_CMP1_REG0] = 0x80,
970 		[ICSS_IEP_CMP1_REG1] = 0x84,
971 
972 		[ICSS_IEP_CMP8_REG0] = 0xc0,
973 		[ICSS_IEP_CMP8_REG1] = 0xc4,
974 		[ICSS_IEP_SYNC_CTRL_REG] = 0x180,
975 		[ICSS_IEP_SYNC0_STAT_REG] = 0x188,
976 		[ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
977 		[ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
978 		[ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
979 		[ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
980 		[ICSS_IEP_SYNC_START_REG] = 0x19c,
981 	},
982 	.config = &am654_icss_iep_regmap_config,
983 };
984 
985 static const struct of_device_id icss_iep_of_match[] = {
986 	{
987 		.compatible = "ti,am654-icss-iep",
988 		.data = &am654_icss_iep_plat_data,
989 	},
990 	{},
991 };
992 MODULE_DEVICE_TABLE(of, icss_iep_of_match);
993 
994 static struct platform_driver icss_iep_driver = {
995 	.driver = {
996 		.name = "icss-iep",
997 		.of_match_table = icss_iep_of_match,
998 	},
999 	.probe = icss_iep_probe,
1000 };
1001 module_platform_driver(icss_iep_driver);
1002 
1003 MODULE_LICENSE("GPL");
1004 MODULE_DESCRIPTION("TI ICSS IEP driver");
1005 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1006 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1007