1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2021 Hisilicon Limited.
3 
4 #include <linux/skbuff.h>
5 #include <linux/string_choices.h>
6 #include "hclge_main.h"
7 #include "hnae3.h"
8 
hclge_ptp_get_cycle(struct hclge_dev * hdev)9 static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
10 {
11 	struct hclge_ptp *ptp = hdev->ptp;
12 
13 	ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
14 			 HCLGE_PTP_CYCLE_QUO_MASK;
15 	ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
16 	ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
17 
18 	if (ptp->cycle.den == 0) {
19 		dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
20 		return -EINVAL;
21 	}
22 
23 	return 0;
24 }
25 
hclge_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)26 static int hclge_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
27 {
28 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
29 	struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
30 	u64 adj_val, adj_base;
31 	unsigned long flags;
32 	u32 quo, numerator;
33 
34 	adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
35 	adj_val = adjust_by_scaled_ppm(adj_base, scaled_ppm);
36 
37 	/* This clock cycle is defined by three part: quotient, numerator
38 	 * and denominator. For example, 2.5ns, the quotient is 2,
39 	 * denominator is fixed to ptp->cycle.den, and numerator
40 	 * is 0.5 * ptp->cycle.den.
41 	 */
42 	quo = div_u64_rem(adj_val, cycle->den, &numerator);
43 
44 	spin_lock_irqsave(&hdev->ptp->lock, flags);
45 	writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
46 	       hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
47 	writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
48 	writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
49 	writel(HCLGE_PTP_CYCLE_ADJ_EN,
50 	       hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
51 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
52 
53 	return 0;
54 }
55 
hclge_ptp_set_tx_info(struct hnae3_handle * handle,struct sk_buff * skb)56 bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
57 {
58 	struct hclge_vport *vport = hclge_get_vport(handle);
59 	struct hclge_dev *hdev = vport->back;
60 	struct hclge_ptp *ptp = hdev->ptp;
61 
62 	if (!ptp)
63 		return false;
64 
65 	if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
66 	    test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
67 		ptp->tx_skipped++;
68 		return false;
69 	}
70 
71 	ptp->tx_start = jiffies;
72 	ptp->tx_skb = skb_get(skb);
73 	ptp->tx_cnt++;
74 
75 	return true;
76 }
77 
hclge_ptp_clean_tx_hwts(struct hclge_dev * hdev)78 void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
79 {
80 	struct sk_buff *skb = hdev->ptp->tx_skb;
81 	struct skb_shared_hwtstamps hwts;
82 	u32 hi, lo;
83 	u64 ns;
84 
85 	ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
86 	     HCLGE_PTP_TX_TS_NSEC_MASK;
87 	lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
88 	hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
89 	     HCLGE_PTP_TX_TS_SEC_H_MASK;
90 	hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
91 		HCLGE_PTP_TX_TS_SEQID_REG);
92 
93 	if (skb) {
94 		hdev->ptp->tx_skb = NULL;
95 		hdev->ptp->tx_cleaned++;
96 
97 		ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
98 		hwts.hwtstamp = ns_to_ktime(ns);
99 		skb_tstamp_tx(skb, &hwts);
100 		dev_kfree_skb_any(skb);
101 	}
102 
103 	clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
104 }
105 
hclge_ptp_get_rx_hwts(struct hnae3_handle * handle,struct sk_buff * skb,u32 nsec,u32 sec)106 void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
107 			   u32 nsec, u32 sec)
108 {
109 	struct hclge_vport *vport = hclge_get_vport(handle);
110 	struct hclge_dev *hdev = vport->back;
111 	unsigned long flags;
112 	u64 ns = nsec;
113 	u32 sec_h;
114 
115 	if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
116 		return;
117 
118 	/* Since the BD does not have enough space for the higher 16 bits of
119 	 * second, and this part will not change frequently, so read it
120 	 * from register.
121 	 */
122 	spin_lock_irqsave(&hdev->ptp->lock, flags);
123 	sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
124 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
125 
126 	ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
127 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
128 	hdev->ptp->last_rx = jiffies;
129 	hdev->ptp->rx_cnt++;
130 }
131 
hclge_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)132 static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
133 			      struct ptp_system_timestamp *sts)
134 {
135 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
136 	unsigned long flags;
137 	u32 hi, lo;
138 	u64 ns;
139 
140 	spin_lock_irqsave(&hdev->ptp->lock, flags);
141 	ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
142 	hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
143 	lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
144 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
145 
146 	ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
147 	*ts = ns_to_timespec64(ns);
148 
149 	return 0;
150 }
151 
hclge_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)152 static int hclge_ptp_settime(struct ptp_clock_info *ptp,
153 			     const struct timespec64 *ts)
154 {
155 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
156 	unsigned long flags;
157 
158 	spin_lock_irqsave(&hdev->ptp->lock, flags);
159 	writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
160 	writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
161 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
162 	writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
163 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
164 	/* synchronize the time of phc */
165 	writel(HCLGE_PTP_TIME_SYNC_EN,
166 	       hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
167 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
168 
169 	return 0;
170 }
171 
hclge_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)172 static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
173 {
174 	struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
175 	unsigned long flags;
176 	bool is_neg = false;
177 	u32 adj_val = 0;
178 
179 	if (delta < 0) {
180 		adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
181 		delta = -delta;
182 		is_neg = true;
183 	}
184 
185 	if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
186 		struct timespec64 ts;
187 		s64 ns;
188 
189 		hclge_ptp_gettimex(ptp, &ts, NULL);
190 		ns = timespec64_to_ns(&ts);
191 		ns = is_neg ? ns - delta : ns + delta;
192 		ts = ns_to_timespec64(ns);
193 		return hclge_ptp_settime(ptp, &ts);
194 	}
195 
196 	adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
197 
198 	spin_lock_irqsave(&hdev->ptp->lock, flags);
199 	writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
200 	writel(HCLGE_PTP_TIME_ADJ_EN,
201 	       hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
202 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
203 
204 	return 0;
205 }
206 
hclge_ptp_get_cfg(struct hclge_dev * hdev,struct ifreq * ifr)207 int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
208 {
209 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
210 		return -EOPNOTSUPP;
211 
212 	return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
213 		sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
214 }
215 
hclge_ptp_int_en(struct hclge_dev * hdev,bool en)216 static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
217 {
218 	struct hclge_ptp_int_cmd *req;
219 	struct hclge_desc desc;
220 	int ret;
221 
222 	req = (struct hclge_ptp_int_cmd *)desc.data;
223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
224 	req->int_en = en ? 1 : 0;
225 
226 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
227 	if (ret)
228 		dev_err(&hdev->pdev->dev,
229 			"failed to %s ptp interrupt, ret = %d\n",
230 			str_enable_disable(en), ret);
231 
232 	return ret;
233 }
234 
hclge_ptp_cfg_qry(struct hclge_dev * hdev,u32 * cfg)235 int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
236 {
237 	struct hclge_ptp_cfg_cmd *req;
238 	struct hclge_desc desc;
239 	int ret;
240 
241 	req = (struct hclge_ptp_cfg_cmd *)desc.data;
242 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
243 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
244 	if (ret) {
245 		dev_err(&hdev->pdev->dev,
246 			"failed to query ptp config, ret = %d\n", ret);
247 		return ret;
248 	}
249 
250 	*cfg = le32_to_cpu(req->cfg);
251 
252 	return 0;
253 }
254 
hclge_ptp_cfg(struct hclge_dev * hdev,u32 cfg)255 static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
256 {
257 	struct hclge_ptp_cfg_cmd *req;
258 	struct hclge_desc desc;
259 	int ret;
260 
261 	req = (struct hclge_ptp_cfg_cmd *)desc.data;
262 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
263 	req->cfg = cpu_to_le32(cfg);
264 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
265 	if (ret)
266 		dev_err(&hdev->pdev->dev,
267 			"failed to config ptp, ret = %d\n", ret);
268 
269 	return ret;
270 }
271 
hclge_ptp_set_tx_mode(struct hwtstamp_config * cfg,unsigned long * flags,u32 * ptp_cfg)272 static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
273 				 unsigned long *flags, u32 *ptp_cfg)
274 {
275 	switch (cfg->tx_type) {
276 	case HWTSTAMP_TX_OFF:
277 		clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
278 		break;
279 	case HWTSTAMP_TX_ON:
280 		set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
281 		*ptp_cfg |= HCLGE_PTP_TX_EN_B;
282 		break;
283 	default:
284 		return -ERANGE;
285 	}
286 
287 	return 0;
288 }
289 
hclge_ptp_set_rx_mode(struct hwtstamp_config * cfg,unsigned long * flags,u32 * ptp_cfg)290 static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
291 				 unsigned long *flags, u32 *ptp_cfg)
292 {
293 	int rx_filter = cfg->rx_filter;
294 
295 	switch (cfg->rx_filter) {
296 	case HWTSTAMP_FILTER_NONE:
297 		clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
298 		break;
299 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
300 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
301 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
302 		set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
303 		*ptp_cfg |= HCLGE_PTP_RX_EN_B;
304 		*ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
305 		rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
306 		break;
307 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
308 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
309 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
310 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
311 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
312 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
313 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
314 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
315 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
316 		set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
317 		*ptp_cfg |= HCLGE_PTP_RX_EN_B;
318 		*ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
319 		*ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
320 		*ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
321 		*ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
322 		rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
323 		break;
324 	case HWTSTAMP_FILTER_ALL:
325 	default:
326 		return -ERANGE;
327 	}
328 
329 	cfg->rx_filter = rx_filter;
330 
331 	return 0;
332 }
333 
hclge_ptp_set_ts_mode(struct hclge_dev * hdev,struct hwtstamp_config * cfg)334 static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
335 				 struct hwtstamp_config *cfg)
336 {
337 	unsigned long flags = hdev->ptp->flags;
338 	u32 ptp_cfg = 0;
339 	int ret;
340 
341 	if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
342 		ptp_cfg |= HCLGE_PTP_EN_B;
343 
344 	ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
345 	if (ret)
346 		return ret;
347 
348 	ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
349 	if (ret)
350 		return ret;
351 
352 	ret = hclge_ptp_cfg(hdev, ptp_cfg);
353 	if (ret)
354 		return ret;
355 
356 	hdev->ptp->flags = flags;
357 	hdev->ptp->ptp_cfg = ptp_cfg;
358 
359 	return 0;
360 }
361 
hclge_ptp_set_cfg(struct hclge_dev * hdev,struct ifreq * ifr)362 int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
363 {
364 	struct hwtstamp_config cfg;
365 	int ret;
366 
367 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
368 		dev_err(&hdev->pdev->dev, "phc is unsupported\n");
369 		return -EOPNOTSUPP;
370 	}
371 
372 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
373 		return -EFAULT;
374 
375 	ret = hclge_ptp_set_ts_mode(hdev, &cfg);
376 	if (ret)
377 		return ret;
378 
379 	hdev->ptp->ts_cfg = cfg;
380 
381 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
382 }
383 
hclge_ptp_get_ts_info(struct hnae3_handle * handle,struct kernel_ethtool_ts_info * info)384 int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
385 			  struct kernel_ethtool_ts_info *info)
386 {
387 	struct hclge_vport *vport = hclge_get_vport(handle);
388 	struct hclge_dev *hdev = vport->back;
389 
390 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
391 		dev_err(&hdev->pdev->dev, "phc is unsupported\n");
392 		return -EOPNOTSUPP;
393 	}
394 
395 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
396 				SOF_TIMESTAMPING_TX_HARDWARE |
397 				SOF_TIMESTAMPING_RX_HARDWARE |
398 				SOF_TIMESTAMPING_RAW_HARDWARE;
399 
400 	if (hdev->ptp->clock)
401 		info->phc_index = ptp_clock_index(hdev->ptp->clock);
402 
403 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
404 
405 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
406 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
407 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
408 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
409 
410 	info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
411 			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
412 			    BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
413 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
414 			    BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
415 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
416 			    BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
417 			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
418 
419 	return 0;
420 }
421 
hclge_ptp_create_clock(struct hclge_dev * hdev)422 static int hclge_ptp_create_clock(struct hclge_dev *hdev)
423 {
424 	struct hclge_ptp *ptp;
425 
426 	ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
427 	if (!ptp)
428 		return -ENOMEM;
429 
430 	ptp->hdev = hdev;
431 	snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
432 		 HCLGE_DRIVER_NAME);
433 	ptp->info.owner = THIS_MODULE;
434 	ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
435 	ptp->info.n_ext_ts = 0;
436 	ptp->info.pps = 0;
437 	ptp->info.adjfine = hclge_ptp_adjfine;
438 	ptp->info.adjtime = hclge_ptp_adjtime;
439 	ptp->info.gettimex64 = hclge_ptp_gettimex;
440 	ptp->info.settime64 = hclge_ptp_settime;
441 
442 	ptp->info.n_alarm = 0;
443 
444 	spin_lock_init(&ptp->lock);
445 	ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
446 	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
447 	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
448 	hdev->ptp = ptp;
449 
450 	ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
451 	if (IS_ERR(ptp->clock)) {
452 		dev_err(&hdev->pdev->dev,
453 			"%d failed to register ptp clock, ret = %ld\n",
454 			ptp->info.n_alarm, PTR_ERR(ptp->clock));
455 		return -ENODEV;
456 	} else if (!ptp->clock) {
457 		dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
458 		return -ENODEV;
459 	}
460 
461 	return 0;
462 }
463 
hclge_ptp_destroy_clock(struct hclge_dev * hdev)464 static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
465 {
466 	ptp_clock_unregister(hdev->ptp->clock);
467 	hdev->ptp->clock = NULL;
468 	devm_kfree(&hdev->pdev->dev, hdev->ptp);
469 	hdev->ptp = NULL;
470 }
471 
hclge_ptp_init(struct hclge_dev * hdev)472 int hclge_ptp_init(struct hclge_dev *hdev)
473 {
474 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
475 	struct timespec64 ts;
476 	int ret;
477 
478 	if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
479 		return 0;
480 
481 	if (!hdev->ptp) {
482 		ret = hclge_ptp_create_clock(hdev);
483 		if (ret)
484 			return ret;
485 
486 		ret = hclge_ptp_get_cycle(hdev);
487 		if (ret)
488 			goto out;
489 	}
490 
491 	ret = hclge_ptp_int_en(hdev, true);
492 	if (ret)
493 		goto out;
494 
495 	set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
496 	ret = hclge_ptp_adjfine(&hdev->ptp->info, 0);
497 	if (ret) {
498 		dev_err(&hdev->pdev->dev,
499 			"failed to init freq, ret = %d\n", ret);
500 		goto out;
501 	}
502 
503 	ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
504 	if (ret) {
505 		dev_err(&hdev->pdev->dev,
506 			"failed to init ts mode, ret = %d\n", ret);
507 		goto out;
508 	}
509 
510 	ktime_get_real_ts64(&ts);
511 	ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
512 	if (ret) {
513 		dev_err(&hdev->pdev->dev,
514 			"failed to init ts time, ret = %d\n", ret);
515 		goto out;
516 	}
517 
518 	set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
519 	dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
520 
521 	return 0;
522 
523 out:
524 	hclge_ptp_destroy_clock(hdev);
525 
526 	return ret;
527 }
528 
hclge_ptp_uninit(struct hclge_dev * hdev)529 void hclge_ptp_uninit(struct hclge_dev *hdev)
530 {
531 	struct hclge_ptp *ptp = hdev->ptp;
532 
533 	if (!ptp)
534 		return;
535 
536 	hclge_ptp_int_en(hdev, false);
537 	clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
538 	clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
539 	ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
540 	ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
541 
542 	if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
543 		dev_err(&hdev->pdev->dev, "failed to disable phc\n");
544 
545 	if (ptp->tx_skb) {
546 		struct sk_buff *skb = ptp->tx_skb;
547 
548 		ptp->tx_skb = NULL;
549 		dev_kfree_skb_any(skb);
550 	}
551 
552 	hclge_ptp_destroy_clock(hdev);
553 }
554