1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2024 Microchip Technology
3
4 #include "microchip_rds_ptp.h"
5
mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock * clock,u32 offset,enum mchp_rds_ptp_base base)6 static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock,
7 u32 offset, enum mchp_rds_ptp_base base)
8 {
9 struct phy_device *phydev = clock->phydev;
10 u32 addr;
11
12 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
13 BASE_CLK(clock)));
14
15 return phy_read_mmd(phydev, PTP_MMD(clock), addr);
16 }
17
mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock * clock,u32 offset,enum mchp_rds_ptp_base base,u16 val)18 static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock,
19 u32 offset, enum mchp_rds_ptp_base base,
20 u16 val)
21 {
22 struct phy_device *phydev = clock->phydev;
23 u32 addr;
24
25 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
26 BASE_CLK(clock)));
27
28 return phy_write_mmd(phydev, PTP_MMD(clock), addr, val);
29 }
30
mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock * clock,u32 offset,enum mchp_rds_ptp_base base,u16 mask,u16 val)31 static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock,
32 u32 offset, enum mchp_rds_ptp_base base,
33 u16 mask, u16 val)
34 {
35 struct phy_device *phydev = clock->phydev;
36 u32 addr;
37
38 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
39 BASE_CLK(clock)));
40
41 return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val);
42 }
43
mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock * clock,u32 offset,enum mchp_rds_ptp_base base,u16 val)44 static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock,
45 u32 offset, enum mchp_rds_ptp_base base,
46 u16 val)
47 {
48 struct phy_device *phydev = clock->phydev;
49 u32 addr;
50
51 addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
52 BASE_CLK(clock)));
53
54 return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val);
55 }
56
mchp_get_pulsewidth(struct phy_device * phydev,struct ptp_perout_request * perout_request,int * pulse_width)57 static int mchp_get_pulsewidth(struct phy_device *phydev,
58 struct ptp_perout_request *perout_request,
59 int *pulse_width)
60 {
61 struct timespec64 ts_period;
62 s64 ts_on_nsec, period_nsec;
63 struct timespec64 ts_on;
64 static const s64 sup_on_necs[] = {
65 100, /* 100ns */
66 500, /* 500ns */
67 1000, /* 1us */
68 5000, /* 5us */
69 10000, /* 10us */
70 50000, /* 50us */
71 100000, /* 100us */
72 500000, /* 500us */
73 1000000, /* 1ms */
74 5000000, /* 5ms */
75 10000000, /* 10ms */
76 50000000, /* 50ms */
77 100000000, /* 100ms */
78 200000000, /* 200ms */
79 };
80
81 ts_period.tv_sec = perout_request->period.sec;
82 ts_period.tv_nsec = perout_request->period.nsec;
83
84 ts_on.tv_sec = perout_request->on.sec;
85 ts_on.tv_nsec = perout_request->on.nsec;
86 ts_on_nsec = timespec64_to_ns(&ts_on);
87 period_nsec = timespec64_to_ns(&ts_period);
88
89 if (period_nsec < 200) {
90 phydev_warn(phydev, "perout period small, minimum is 200ns\n");
91 return -EOPNOTSUPP;
92 }
93
94 for (int i = 0; i < ARRAY_SIZE(sup_on_necs); i++) {
95 if (ts_on_nsec <= sup_on_necs[i]) {
96 *pulse_width = i;
97 break;
98 }
99 }
100
101 phydev_info(phydev, "pulse width is %d\n", *pulse_width);
102 return 0;
103 }
104
mchp_general_event_config(struct mchp_rds_ptp_clock * clock,int pulse_width)105 static int mchp_general_event_config(struct mchp_rds_ptp_clock *clock,
106 int pulse_width)
107 {
108 int general_config;
109
110 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
111 MCHP_RDS_PTP_CLOCK);
112 if (general_config < 0)
113 return general_config;
114
115 general_config &= ~MCHP_RDS_PTP_GEN_CFG_LTC_EVT_MASK;
116 general_config |= MCHP_RDS_PTP_GEN_CFG_LTC_EVT_SET(pulse_width);
117 general_config &= ~MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
118 general_config |= MCHP_RDS_PTP_GEN_CFG_POLARITY;
119
120 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
121 MCHP_RDS_PTP_CLOCK, general_config);
122 }
123
mchp_set_clock_reload(struct mchp_rds_ptp_clock * clock,s64 period_sec,u32 period_nsec)124 static int mchp_set_clock_reload(struct mchp_rds_ptp_clock *clock,
125 s64 period_sec, u32 period_nsec)
126 {
127 int rc;
128
129 rc = mchp_rds_phy_write_mmd(clock,
130 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_LO,
131 MCHP_RDS_PTP_CLOCK,
132 lower_16_bits(period_sec));
133 if (rc < 0)
134 return rc;
135
136 rc = mchp_rds_phy_write_mmd(clock,
137 MCHP_RDS_PTP_CLK_TRGT_RELOAD_SEC_HI,
138 MCHP_RDS_PTP_CLOCK,
139 upper_16_bits(period_sec));
140 if (rc < 0)
141 return rc;
142
143 rc = mchp_rds_phy_write_mmd(clock,
144 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_LO,
145 MCHP_RDS_PTP_CLOCK,
146 lower_16_bits(period_nsec));
147 if (rc < 0)
148 return rc;
149
150 return mchp_rds_phy_write_mmd(clock,
151 MCHP_RDS_PTP_CLK_TRGT_RELOAD_NS_HI,
152 MCHP_RDS_PTP_CLOCK,
153 upper_16_bits(period_nsec) & 0x3fff);
154 }
155
mchp_set_clock_target(struct mchp_rds_ptp_clock * clock,s64 start_sec,u32 start_nsec)156 static int mchp_set_clock_target(struct mchp_rds_ptp_clock *clock,
157 s64 start_sec, u32 start_nsec)
158 {
159 int rc;
160
161 /* Set the start time */
162 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_LO,
163 MCHP_RDS_PTP_CLOCK,
164 lower_16_bits(start_sec));
165 if (rc < 0)
166 return rc;
167
168 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_SEC_HI,
169 MCHP_RDS_PTP_CLOCK,
170 upper_16_bits(start_sec));
171 if (rc < 0)
172 return rc;
173
174 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_LO,
175 MCHP_RDS_PTP_CLOCK,
176 lower_16_bits(start_nsec));
177 if (rc < 0)
178 return rc;
179
180 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CLK_TRGT_NS_HI,
181 MCHP_RDS_PTP_CLOCK,
182 upper_16_bits(start_nsec) & 0x3fff);
183 }
184
mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock * clock)185 static int mchp_rds_ptp_perout_off(struct mchp_rds_ptp_clock *clock)
186 {
187 u16 general_config;
188 int rc;
189
190 /* Set target to too far in the future, effectively disabling it */
191 rc = mchp_set_clock_target(clock, 0xFFFFFFFF, 0);
192 if (rc < 0)
193 return rc;
194
195 general_config = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
196 MCHP_RDS_PTP_CLOCK);
197 general_config |= MCHP_RDS_PTP_GEN_CFG_RELOAD_ADD;
198 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_GEN_CFG,
199 MCHP_RDS_PTP_CLOCK, general_config);
200 if (rc < 0)
201 return rc;
202
203 clock->mchp_rds_ptp_event = -1;
204
205 return 0;
206 }
207
mchp_get_event(struct mchp_rds_ptp_clock * clock,int pin)208 static bool mchp_get_event(struct mchp_rds_ptp_clock *clock, int pin)
209 {
210 if (clock->mchp_rds_ptp_event < 0 && pin == clock->event_pin) {
211 clock->mchp_rds_ptp_event = pin;
212 return true;
213 }
214
215 return false;
216 }
217
mchp_rds_ptp_perout(struct ptp_clock_info * ptpci,struct ptp_perout_request * perout,int on)218 static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
219 struct ptp_perout_request *perout, int on)
220 {
221 struct mchp_rds_ptp_clock *clock = container_of(ptpci,
222 struct mchp_rds_ptp_clock,
223 caps);
224 struct phy_device *phydev = clock->phydev;
225 int ret, event_pin, pulsewidth;
226
227 event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
228 perout->index);
229 if (event_pin != clock->event_pin)
230 return -EINVAL;
231
232 if (!on) {
233 ret = mchp_rds_ptp_perout_off(clock);
234 return ret;
235 }
236
237 if (!mchp_get_event(clock, event_pin))
238 return -EINVAL;
239
240 ret = mchp_get_pulsewidth(phydev, perout, &pulsewidth);
241 if (ret < 0)
242 return ret;
243
244 /* Configure to pulse every period */
245 ret = mchp_general_event_config(clock, pulsewidth);
246 if (ret < 0)
247 return ret;
248
249 ret = mchp_set_clock_target(clock, perout->start.sec,
250 perout->start.nsec);
251 if (ret < 0)
252 return ret;
253
254 return mchp_set_clock_reload(clock, perout->period.sec,
255 perout->period.nsec);
256 }
257
mchp_rds_ptpci_enable(struct ptp_clock_info * ptpci,struct ptp_clock_request * request,int on)258 static int mchp_rds_ptpci_enable(struct ptp_clock_info *ptpci,
259 struct ptp_clock_request *request, int on)
260 {
261 switch (request->type) {
262 case PTP_CLK_REQ_PEROUT:
263 return mchp_rds_ptp_perout(ptpci, &request->perout, on);
264 default:
265 return -EINVAL;
266 }
267 }
268
mchp_rds_ptpci_verify(struct ptp_clock_info * ptpci,unsigned int pin,enum ptp_pin_function func,unsigned int chan)269 static int mchp_rds_ptpci_verify(struct ptp_clock_info *ptpci, unsigned int pin,
270 enum ptp_pin_function func, unsigned int chan)
271 {
272 struct mchp_rds_ptp_clock *clock = container_of(ptpci,
273 struct mchp_rds_ptp_clock,
274 caps);
275
276 if (!(pin == clock->event_pin && chan == 0))
277 return -1;
278
279 switch (func) {
280 case PTP_PF_NONE:
281 case PTP_PF_PEROUT:
282 break;
283 default:
284 return -1;
285 }
286
287 return 0;
288 }
289
mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock * clock,enum mchp_rds_ptp_fifo_dir dir)290 static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
291 enum mchp_rds_ptp_fifo_dir dir)
292 {
293 int rc;
294
295 if (dir == MCHP_RDS_PTP_EGRESS_FIFO)
296 skb_queue_purge(&clock->tx_queue);
297 else
298 skb_queue_purge(&clock->rx_queue);
299
300 for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
301 rc = mchp_rds_phy_read_mmd(clock,
302 dir == MCHP_RDS_PTP_EGRESS_FIFO ?
303 MCHP_RDS_PTP_TX_MSG_HDR2 :
304 MCHP_RDS_PTP_RX_MSG_HDR2,
305 MCHP_RDS_PTP_PORT);
306 if (rc < 0)
307 return rc;
308 }
309 return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
310 MCHP_RDS_PTP_PORT);
311 }
312
mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock * clock,bool enable)313 static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
314 bool enable)
315 {
316 /* Enable or disable ptp interrupts */
317 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN,
318 MCHP_RDS_PTP_PORT,
319 enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0);
320 }
321
mchp_rds_ptp_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)322 static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
323 struct sk_buff *skb, int type)
324 {
325 struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
326 struct mchp_rds_ptp_clock,
327 mii_ts);
328
329 switch (clock->hwts_tx_type) {
330 case HWTSTAMP_TX_ONESTEP_SYNC:
331 if (ptp_msg_is_sync(skb, type)) {
332 kfree_skb(skb);
333 return;
334 }
335 fallthrough;
336 case HWTSTAMP_TX_ON:
337 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
338 skb_queue_tail(&clock->tx_queue, skb);
339 break;
340 case HWTSTAMP_TX_OFF:
341 default:
342 kfree_skb(skb);
343 break;
344 }
345 }
346
mchp_rds_ptp_get_sig_rx(struct sk_buff * skb,u16 * sig)347 static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig)
348 {
349 struct ptp_header *ptp_header;
350 int type;
351
352 skb_push(skb, ETH_HLEN);
353 type = ptp_classify_raw(skb);
354 if (type == PTP_CLASS_NONE)
355 return false;
356
357 ptp_header = ptp_parse_header(skb, type);
358 if (!ptp_header)
359 return false;
360
361 skb_pull_inline(skb, ETH_HLEN);
362
363 *sig = (__force u16)(ntohs(ptp_header->sequence_id));
364
365 return true;
366 }
367
mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock * clock,struct mchp_rds_ptp_rx_ts * rx_ts)368 static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
369 struct mchp_rds_ptp_rx_ts *rx_ts)
370 {
371 struct skb_shared_hwtstamps *shhwtstamps;
372 struct sk_buff *skb, *skb_tmp;
373 unsigned long flags;
374 bool rc = false;
375 u16 skb_sig;
376
377 spin_lock_irqsave(&clock->rx_queue.lock, flags);
378 skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
379 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
380 continue;
381
382 if (skb_sig != rx_ts->seq_id)
383 continue;
384
385 __skb_unlink(skb, &clock->rx_queue);
386
387 rc = true;
388 break;
389 }
390 spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
391
392 if (rc) {
393 shhwtstamps = skb_hwtstamps(skb);
394 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
395 netif_rx(skb);
396 }
397
398 return rc;
399 }
400
mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock * clock,struct mchp_rds_ptp_rx_ts * rx_ts)401 static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
402 struct mchp_rds_ptp_rx_ts *rx_ts)
403 {
404 unsigned long flags;
405
406 /* If we failed to match the skb add it to the queue for when
407 * the frame will come
408 */
409 if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
410 spin_lock_irqsave(&clock->rx_ts_lock, flags);
411 list_add(&rx_ts->list, &clock->rx_ts_list);
412 spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
413 } else {
414 kfree(rx_ts);
415 }
416 }
417
mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock * clock,struct sk_buff * skb)418 static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock,
419 struct sk_buff *skb)
420 {
421 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
422 struct skb_shared_hwtstamps *shhwtstamps;
423 unsigned long flags;
424 u16 skb_sig;
425
426 if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
427 return;
428
429 /* Iterate over all RX timestamps and match it with the received skbs */
430 spin_lock_irqsave(&clock->rx_ts_lock, flags);
431 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
432 /* Check if we found the signature we were looking for. */
433 if (skb_sig != rx_ts->seq_id)
434 continue;
435
436 shhwtstamps = skb_hwtstamps(skb);
437 shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
438 netif_rx(skb);
439
440 rx_ts_var = rx_ts;
441
442 break;
443 }
444 spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
445
446 if (rx_ts_var) {
447 list_del(&rx_ts_var->list);
448 kfree(rx_ts_var);
449 } else {
450 skb_queue_tail(&clock->rx_queue, skb);
451 }
452 }
453
mchp_rds_ptp_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)454 static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
455 struct sk_buff *skb, int type)
456 {
457 struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
458 struct mchp_rds_ptp_clock,
459 mii_ts);
460
461 if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
462 type == PTP_CLASS_NONE)
463 return false;
464
465 if ((type & clock->version) == 0 || (type & clock->layer) == 0)
466 return false;
467
468 /* Here if match occurs skb is sent to application, If not skb is added
469 * to queue and sending skb to application will get handled when
470 * interrupt occurs i.e., it get handles in interrupt handler. By
471 * any means skb will reach the application so we should not return
472 * false here if skb doesn't matches.
473 */
474 mchp_rds_ptp_match_rx_skb(clock, skb);
475
476 return true;
477 }
478
mchp_rds_ptp_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)479 static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
480 struct kernel_hwtstamp_config *config,
481 struct netlink_ext_ack *extack)
482 {
483 struct mchp_rds_ptp_clock *clock =
484 container_of(mii_ts, struct mchp_rds_ptp_clock,
485 mii_ts);
486 struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
487 int txcfg = 0, rxcfg = 0;
488 unsigned long flags;
489 int rc;
490
491 clock->hwts_tx_type = config->tx_type;
492 clock->rx_filter = config->rx_filter;
493
494 switch (config->rx_filter) {
495 case HWTSTAMP_FILTER_NONE:
496 clock->layer = 0;
497 clock->version = 0;
498 break;
499 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
500 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
501 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
502 clock->layer = PTP_CLASS_L4;
503 clock->version = PTP_CLASS_V2;
504 break;
505 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
506 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
507 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
508 clock->layer = PTP_CLASS_L2;
509 clock->version = PTP_CLASS_V2;
510 break;
511 case HWTSTAMP_FILTER_PTP_V2_EVENT:
512 case HWTSTAMP_FILTER_PTP_V2_SYNC:
513 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
514 clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
515 clock->version = PTP_CLASS_V2;
516 break;
517 default:
518 return -ERANGE;
519 }
520
521 /* Setup parsing of the frames and enable the timestamping for ptp
522 * frames
523 */
524 if (clock->layer & PTP_CLASS_L2) {
525 rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
526 txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
527 }
528 if (clock->layer & PTP_CLASS_L4) {
529 rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
530 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
531 txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
532 MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
533 }
534 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
535 MCHP_RDS_PTP_PORT, rxcfg);
536 if (rc < 0)
537 return rc;
538
539 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
540 MCHP_RDS_PTP_PORT, txcfg);
541 if (rc < 0)
542 return rc;
543
544 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN,
545 MCHP_RDS_PTP_PORT,
546 MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
547 if (rc < 0)
548 return rc;
549
550 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN,
551 MCHP_RDS_PTP_PORT,
552 MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
553 if (rc < 0)
554 return rc;
555
556 if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
557 /* Enable / disable of the TX timestamp in the SYNC frames */
558 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
559 MCHP_RDS_PTP_PORT,
560 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
561 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
562 else
563 rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
564 MCHP_RDS_PTP_PORT,
565 MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
566 (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
567
568 if (rc < 0)
569 return rc;
570
571 /* In case of multiple starts and stops, these needs to be cleared */
572 spin_lock_irqsave(&clock->rx_ts_lock, flags);
573 list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
574 list_del(&rx_ts->list);
575 kfree(rx_ts);
576 }
577 spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
578
579 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
580 if (rc < 0)
581 return rc;
582
583 rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
584 if (rc < 0)
585 return rc;
586
587 /* Now enable the timestamping interrupts */
588 rc = mchp_rds_ptp_config_intr(clock,
589 config->rx_filter != HWTSTAMP_FILTER_NONE);
590
591 return rc < 0 ? rc : 0;
592 }
593
mchp_rds_ptp_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * info)594 static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
595 struct kernel_ethtool_ts_info *info)
596 {
597 struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
598 struct mchp_rds_ptp_clock,
599 mii_ts);
600
601 info->phc_index = ptp_clock_index(clock->ptp_clock);
602
603 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
604 SOF_TIMESTAMPING_RX_HARDWARE |
605 SOF_TIMESTAMPING_RAW_HARDWARE;
606
607 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
608 BIT(HWTSTAMP_TX_ONESTEP_SYNC);
609
610 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
611 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
612 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
613 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
614
615 return 0;
616 }
617
mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info * info,s64 delta)618 static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta)
619 {
620 struct mchp_rds_ptp_clock *clock = container_of(info,
621 struct mchp_rds_ptp_clock,
622 caps);
623 struct timespec64 ts;
624 bool add = true;
625 int rc = 0;
626 u32 nsec;
627 s32 sec;
628
629 /* The HW allows up to 15 sec to adjust the time, but here we limit to
630 * 10 sec the adjustment. The reason is, in case the adjustment is 14
631 * sec and 999999999 nsec, then we add 8ns to compensate the actual
632 * increment so the value can be bigger than 15 sec. Therefore limit the
633 * possible adjustments so we will not have these corner cases
634 */
635 if (delta > 10000000000LL || delta < -10000000000LL) {
636 /* The timeadjustment is too big, so fall back using set time */
637 u64 now;
638
639 info->gettime64(info, &ts);
640
641 now = ktime_to_ns(timespec64_to_ktime(ts));
642 ts = ns_to_timespec64(now + delta);
643
644 info->settime64(info, &ts);
645 return 0;
646 }
647 sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec);
648 if (delta < 0 && nsec != 0) {
649 /* It is not allowed to adjust low the nsec part, therefore
650 * subtract more from second part and add to nanosecond such
651 * that would roll over, so the second part will increase
652 */
653 sec--;
654 nsec = NSEC_PER_SEC - nsec;
655 }
656
657 /* Calculate the adjustments and the direction */
658 if (delta < 0)
659 add = false;
660
661 if (nsec > 0) {
662 /* add 8 ns to cover the likely normal increment */
663 nsec += 8;
664
665 if (nsec >= NSEC_PER_SEC) {
666 /* carry into seconds */
667 sec++;
668 nsec -= NSEC_PER_SEC;
669 }
670 }
671
672 mutex_lock(&clock->ptp_lock);
673 if (sec) {
674 sec = abs(sec);
675
676 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
677 MCHP_RDS_PTP_CLOCK, sec);
678 if (rc < 0)
679 goto out_unlock;
680
681 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
682 MCHP_RDS_PTP_CLOCK,
683 ((add ?
684 MCHP_RDS_PTP_STEP_ADJ_HI_DIR :
685 0) | ((sec >> 16) &
686 GENMASK(13, 0))));
687 if (rc < 0)
688 goto out_unlock;
689
690 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
691 MCHP_RDS_PTP_CLOCK,
692 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC);
693 if (rc < 0)
694 goto out_unlock;
695 }
696
697 if (nsec) {
698 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
699 MCHP_RDS_PTP_CLOCK,
700 nsec & GENMASK(15, 0));
701 if (rc < 0)
702 goto out_unlock;
703
704 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
705 MCHP_RDS_PTP_CLOCK,
706 (nsec >> 16) & GENMASK(13, 0));
707 if (rc < 0)
708 goto out_unlock;
709
710 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
711 MCHP_RDS_PTP_CLOCK,
712 MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC);
713 }
714
715 mutex_unlock(&clock->ptp_lock);
716 info->gettime64(info, &ts);
717 mutex_lock(&clock->ptp_lock);
718
719 /* Target update is required for pulse generation on events that
720 * are enabled
721 */
722 if (clock->mchp_rds_ptp_event >= 0)
723 mchp_set_clock_target(clock,
724 ts.tv_sec + MCHP_RDS_PTP_BUFFER_TIME, 0);
725 out_unlock:
726 mutex_unlock(&clock->ptp_lock);
727
728 return rc;
729 }
730
mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info * info,long scaled_ppm)731 static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info,
732 long scaled_ppm)
733 {
734 struct mchp_rds_ptp_clock *clock = container_of(info,
735 struct mchp_rds_ptp_clock,
736 caps);
737 u16 rate_lo, rate_hi;
738 bool faster = true;
739 u32 rate;
740 int rc;
741
742 if (!scaled_ppm)
743 return 0;
744
745 if (scaled_ppm < 0) {
746 scaled_ppm = -scaled_ppm;
747 faster = false;
748 }
749
750 rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
751 rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
752
753 rate_lo = rate & GENMASK(15, 0);
754 rate_hi = (rate >> 16) & GENMASK(13, 0);
755
756 if (faster)
757 rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR;
758
759 mutex_lock(&clock->ptp_lock);
760 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI,
761 MCHP_RDS_PTP_CLOCK, rate_hi);
762 if (rc < 0)
763 goto error;
764
765 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO,
766 MCHP_RDS_PTP_CLOCK, rate_lo);
767 if (rc > 0)
768 rc = 0;
769 error:
770 mutex_unlock(&clock->ptp_lock);
771
772 return rc;
773 }
774
mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info * info,struct timespec64 * ts)775 static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info,
776 struct timespec64 *ts)
777 {
778 struct mchp_rds_ptp_clock *clock = container_of(info,
779 struct mchp_rds_ptp_clock,
780 caps);
781 time64_t secs;
782 int rc = 0;
783 s64 nsecs;
784
785 mutex_lock(&clock->ptp_lock);
786 /* Set read bit to 1 to save current values of 1588 local time counter
787 * into PTP LTC seconds and nanoseconds registers.
788 */
789 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
790 MCHP_RDS_PTP_CLOCK,
791 MCHP_RDS_PTP_CMD_CTL_CLOCK_READ);
792 if (rc < 0)
793 goto out_unlock;
794
795 /* Get LTC clock values */
796 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI,
797 MCHP_RDS_PTP_CLOCK);
798 if (rc < 0)
799 goto out_unlock;
800 secs = rc << 16;
801
802 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID,
803 MCHP_RDS_PTP_CLOCK);
804 if (rc < 0)
805 goto out_unlock;
806 secs |= rc;
807 secs <<= 16;
808
809 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO,
810 MCHP_RDS_PTP_CLOCK);
811 if (rc < 0)
812 goto out_unlock;
813 secs |= rc;
814
815 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI,
816 MCHP_RDS_PTP_CLOCK);
817 if (rc < 0)
818 goto out_unlock;
819 nsecs = (rc & GENMASK(13, 0));
820 nsecs <<= 16;
821
822 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO,
823 MCHP_RDS_PTP_CLOCK);
824 if (rc < 0)
825 goto out_unlock;
826 nsecs |= rc;
827
828 set_normalized_timespec64(ts, secs, nsecs);
829
830 if (rc > 0)
831 rc = 0;
832 out_unlock:
833 mutex_unlock(&clock->ptp_lock);
834
835 return rc;
836 }
837
mchp_rds_ptp_ltc_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)838 static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info,
839 const struct timespec64 *ts)
840 {
841 struct mchp_rds_ptp_clock *clock = container_of(info,
842 struct mchp_rds_ptp_clock,
843 caps);
844 int rc;
845
846 mutex_lock(&clock->ptp_lock);
847 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO,
848 MCHP_RDS_PTP_CLOCK,
849 lower_16_bits(ts->tv_sec));
850 if (rc < 0)
851 goto out_unlock;
852
853 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID,
854 MCHP_RDS_PTP_CLOCK,
855 upper_16_bits(ts->tv_sec));
856 if (rc < 0)
857 goto out_unlock;
858
859 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI,
860 MCHP_RDS_PTP_CLOCK,
861 upper_32_bits(ts->tv_sec) & GENMASK(15, 0));
862 if (rc < 0)
863 goto out_unlock;
864
865 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO,
866 MCHP_RDS_PTP_CLOCK,
867 lower_16_bits(ts->tv_nsec));
868 if (rc < 0)
869 goto out_unlock;
870
871 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI,
872 MCHP_RDS_PTP_CLOCK,
873 upper_16_bits(ts->tv_nsec) & GENMASK(13, 0));
874 if (rc < 0)
875 goto out_unlock;
876
877 /* Set load bit to 1 to write PTP LTC seconds and nanoseconds
878 * registers to 1588 local time counter.
879 */
880 rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
881 MCHP_RDS_PTP_CLOCK,
882 MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD);
883 if (rc > 0)
884 rc = 0;
885 out_unlock:
886 mutex_unlock(&clock->ptp_lock);
887
888 return rc;
889 }
890
mchp_rds_ptp_get_sig_tx(struct sk_buff * skb,u16 * sig)891 static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig)
892 {
893 struct ptp_header *ptp_header;
894 int type;
895
896 type = ptp_classify_raw(skb);
897 if (type == PTP_CLASS_NONE)
898 return false;
899
900 ptp_header = ptp_parse_header(skb, type);
901 if (!ptp_header)
902 return false;
903
904 *sig = (__force u16)(ntohs(ptp_header->sequence_id));
905
906 return true;
907 }
908
mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock * clock,u32 seconds,u32 nsec,u16 seq_id)909 static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock,
910 u32 seconds, u32 nsec, u16 seq_id)
911 {
912 struct skb_shared_hwtstamps shhwtstamps;
913 struct sk_buff *skb, *skb_tmp;
914 unsigned long flags;
915 bool rc = false;
916 u16 skb_sig;
917
918 spin_lock_irqsave(&clock->tx_queue.lock, flags);
919 skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) {
920 if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig))
921 continue;
922
923 if (skb_sig != seq_id)
924 continue;
925
926 __skb_unlink(skb, &clock->tx_queue);
927 rc = true;
928 break;
929 }
930 spin_unlock_irqrestore(&clock->tx_queue.lock, flags);
931
932 if (rc) {
933 shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
934 skb_complete_tx_timestamp(skb, &shhwtstamps);
935 }
936 }
937
938 static struct mchp_rds_ptp_rx_ts
mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock * clock)939 *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock)
940 {
941 struct phy_device *phydev = clock->phydev;
942 struct mchp_rds_ptp_rx_ts *rx_ts = NULL;
943 u32 sec, nsec;
944 int rc;
945
946 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI,
947 MCHP_RDS_PTP_PORT);
948 if (rc < 0)
949 goto error;
950 if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) {
951 phydev_err(phydev, "RX Timestamp is not valid!\n");
952 goto error;
953 }
954 nsec = (rc & GENMASK(13, 0)) << 16;
955
956 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO,
957 MCHP_RDS_PTP_PORT);
958 if (rc < 0)
959 goto error;
960 nsec |= rc;
961
962 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI,
963 MCHP_RDS_PTP_PORT);
964 if (rc < 0)
965 goto error;
966 sec = rc << 16;
967
968 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO,
969 MCHP_RDS_PTP_PORT);
970 if (rc < 0)
971 goto error;
972 sec |= rc;
973
974 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2,
975 MCHP_RDS_PTP_PORT);
976 if (rc < 0)
977 goto error;
978
979 rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL);
980 if (!rx_ts)
981 return NULL;
982
983 rx_ts->seconds = sec;
984 rx_ts->nsec = nsec;
985 rx_ts->seq_id = rc;
986
987 error:
988 return rx_ts;
989 }
990
mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock * clock)991 static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock)
992 {
993 int caps;
994
995 do {
996 struct mchp_rds_ptp_rx_ts *rx_ts;
997
998 rx_ts = mchp_rds_ptp_get_rx_ts(clock);
999 if (rx_ts)
1000 mchp_rds_ptp_match_rx_ts(clock, rx_ts);
1001
1002 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
1003 MCHP_RDS_PTP_PORT);
1004 if (caps < 0)
1005 return;
1006 } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0);
1007 }
1008
mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock * clock,u32 * sec,u32 * nsec,u16 * seq)1009 static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock,
1010 u32 *sec, u32 *nsec, u16 *seq)
1011 {
1012 int rc;
1013
1014 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI,
1015 MCHP_RDS_PTP_PORT);
1016 if (rc < 0)
1017 return false;
1018 if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID))
1019 return false;
1020 *nsec = (rc & GENMASK(13, 0)) << 16;
1021
1022 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO,
1023 MCHP_RDS_PTP_PORT);
1024 if (rc < 0)
1025 return false;
1026 *nsec = *nsec | rc;
1027
1028 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI,
1029 MCHP_RDS_PTP_PORT);
1030 if (rc < 0)
1031 return false;
1032 *sec = rc << 16;
1033
1034 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO,
1035 MCHP_RDS_PTP_PORT);
1036 if (rc < 0)
1037 return false;
1038 *sec = *sec | rc;
1039
1040 rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2,
1041 MCHP_RDS_PTP_PORT);
1042 if (rc < 0)
1043 return false;
1044
1045 *seq = rc;
1046
1047 return true;
1048 }
1049
mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock * clock)1050 static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock)
1051 {
1052 int caps;
1053
1054 do {
1055 u32 sec, nsec;
1056 u16 seq;
1057
1058 if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq))
1059 mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq);
1060
1061 caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
1062 MCHP_RDS_PTP_PORT);
1063 if (caps < 0)
1064 return;
1065 } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0);
1066 }
1067
mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock * clock,u16 reg,u16 val,bool clear)1068 int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
1069 u16 reg, u16 val, bool clear)
1070 {
1071 if (clear)
1072 return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
1073 val);
1074 else
1075 return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
1076 val);
1077 }
1078 EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr);
1079
mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock * clock)1080 irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock)
1081 {
1082 int irq_sts;
1083
1084 /* To handle rogue interrupt scenarios */
1085 if (!clock)
1086 return IRQ_NONE;
1087
1088 do {
1089 irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
1090 MCHP_RDS_PTP_PORT);
1091 if (irq_sts < 0)
1092 return IRQ_NONE;
1093
1094 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN)
1095 mchp_rds_ptp_process_rx_ts(clock);
1096
1097 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN)
1098 mchp_rds_ptp_process_tx_ts(clock);
1099
1100 if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN)
1101 mchp_rds_ptp_flush_fifo(clock,
1102 MCHP_RDS_PTP_EGRESS_FIFO);
1103
1104 if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)
1105 mchp_rds_ptp_flush_fifo(clock,
1106 MCHP_RDS_PTP_INGRESS_FIFO);
1107 } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN |
1108 MCHP_RDS_PTP_INT_TX_TS_EN |
1109 MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN |
1110 MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN));
1111
1112 return IRQ_HANDLED;
1113 }
1114 EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt);
1115
mchp_rds_ptp_init(struct mchp_rds_ptp_clock * clock)1116 static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock)
1117 {
1118 int rc;
1119
1120 /* Disable PTP */
1121 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
1122 MCHP_RDS_PTP_CLOCK,
1123 MCHP_RDS_PTP_CMD_CTL_DIS);
1124 if (rc < 0)
1125 return rc;
1126
1127 /* Disable TSU */
1128 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
1129 MCHP_RDS_PTP_PORT, 0);
1130 if (rc < 0)
1131 return rc;
1132
1133 /* Clear PTP interrupt status registers */
1134 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET,
1135 MCHP_RDS_PTP_PORT,
1136 MCHP_RDS_PTP_TSU_HARDRESET);
1137 if (rc < 0)
1138 return rc;
1139
1140 /* Predictor enable */
1141 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL,
1142 MCHP_RDS_PTP_CLOCK,
1143 MCHP_RDS_PTP_LATENCY_SETTING);
1144 if (rc < 0)
1145 return rc;
1146
1147 /* Configure PTP operational mode */
1148 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE,
1149 MCHP_RDS_PTP_CLOCK,
1150 MCHP_RDS_PTP_OP_MODE_STANDALONE);
1151 if (rc < 0)
1152 return rc;
1153
1154 /* Reference clock configuration */
1155 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG,
1156 MCHP_RDS_PTP_CLOCK,
1157 MCHP_RDS_PTP_REF_CLK_CFG_SET);
1158 if (rc < 0)
1159 return rc;
1160
1161 /* Classifier configurations */
1162 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
1163 MCHP_RDS_PTP_PORT, 0);
1164 if (rc < 0)
1165 return rc;
1166
1167 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
1168 MCHP_RDS_PTP_PORT, 0);
1169 if (rc < 0)
1170 return rc;
1171
1172 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN,
1173 MCHP_RDS_PTP_PORT, 0);
1174 if (rc < 0)
1175 return rc;
1176
1177 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN,
1178 MCHP_RDS_PTP_PORT, 0);
1179 if (rc < 0)
1180 return rc;
1181
1182 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN,
1183 MCHP_RDS_PTP_PORT, 0);
1184 if (rc < 0)
1185 return rc;
1186
1187 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN,
1188 MCHP_RDS_PTP_PORT, 0);
1189 if (rc < 0)
1190 return rc;
1191
1192 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION,
1193 MCHP_RDS_PTP_PORT,
1194 MCHP_RDS_PTP_MAX_VERSION(0xff) |
1195 MCHP_RDS_PTP_MIN_VERSION(0x0));
1196 if (rc < 0)
1197 return rc;
1198
1199 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION,
1200 MCHP_RDS_PTP_PORT,
1201 MCHP_RDS_PTP_MAX_VERSION(0xff) |
1202 MCHP_RDS_PTP_MIN_VERSION(0x0));
1203 if (rc < 0)
1204 return rc;
1205
1206 /* Enable TSU */
1207 rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
1208 MCHP_RDS_PTP_PORT,
1209 MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN);
1210 if (rc < 0)
1211 return rc;
1212
1213 /* Enable PTP */
1214 return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
1215 MCHP_RDS_PTP_CLOCK,
1216 MCHP_RDS_PTP_CMD_CTL_EN);
1217 }
1218
mchp_rds_ptp_probe(struct phy_device * phydev,u8 mmd,u16 clk_base_addr,u16 port_base_addr)1219 struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
1220 u16 clk_base_addr,
1221 u16 port_base_addr)
1222 {
1223 struct mchp_rds_ptp_clock *clock;
1224 int rc;
1225
1226 clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL);
1227 if (!clock)
1228 return ERR_PTR(-ENOMEM);
1229
1230 clock->port_base_addr = port_base_addr;
1231 clock->clk_base_addr = clk_base_addr;
1232 clock->mmd = mmd;
1233
1234 mutex_init(&clock->ptp_lock);
1235 clock->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
1236 MCHP_RDS_PTP_N_PIN,
1237 sizeof(*clock->pin_config),
1238 GFP_KERNEL);
1239 if (!clock->pin_config)
1240 return ERR_PTR(-ENOMEM);
1241
1242 for (int i = 0; i < MCHP_RDS_PTP_N_PIN; ++i) {
1243 struct ptp_pin_desc *p = &clock->pin_config[i];
1244
1245 memset(p, 0, sizeof(*p));
1246 snprintf(p->name, sizeof(p->name), "pin%d", i);
1247 p->index = i;
1248 p->func = PTP_PF_NONE;
1249 }
1250 /* Register PTP clock */
1251 clock->caps.owner = THIS_MODULE;
1252 snprintf(clock->caps.name, 30, "%s", phydev->drv->name);
1253 clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ;
1254 clock->caps.n_ext_ts = 0;
1255 clock->caps.pps = 0;
1256 clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
1257 clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
1258 clock->caps.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
1259 clock->caps.pin_config = clock->pin_config;
1260 clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
1261 clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
1262 clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64;
1263 clock->caps.settime64 = mchp_rds_ptp_ltc_settime64;
1264 clock->caps.enable = mchp_rds_ptpci_enable;
1265 clock->caps.verify = mchp_rds_ptpci_verify;
1266 clock->caps.getcrosststamp = NULL;
1267 clock->ptp_clock = ptp_clock_register(&clock->caps,
1268 &phydev->mdio.dev);
1269 if (IS_ERR(clock->ptp_clock))
1270 return ERR_PTR(-EINVAL);
1271
1272 /* Check if PHC support is missing at the configuration level */
1273 if (!clock->ptp_clock)
1274 return NULL;
1275
1276 /* Initialize the SW */
1277 skb_queue_head_init(&clock->tx_queue);
1278 skb_queue_head_init(&clock->rx_queue);
1279 INIT_LIST_HEAD(&clock->rx_ts_list);
1280 spin_lock_init(&clock->rx_ts_lock);
1281
1282 clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
1283 clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
1284 clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp;
1285 clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
1286
1287 phydev->mii_ts = &clock->mii_ts;
1288
1289 clock->mchp_rds_ptp_event = -1;
1290
1291 /* Timestamp selected by default to keep legacy API */
1292 phydev->default_timestamp = true;
1293
1294 clock->phydev = phydev;
1295
1296 rc = mchp_rds_ptp_init(clock);
1297 if (rc < 0)
1298 return ERR_PTR(rc);
1299
1300 return clock;
1301 }
1302 EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe);
1303
1304 MODULE_LICENSE("GPL");
1305 MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver");
1306 MODULE_AUTHOR("Divya Koppera");
1307