1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Userland implementation of gettimeofday() for 32 bits processes in a
4 * ppc64 kernel for use in the vDSO
5 *
6 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
7 *                    IBM Corp.
8 */
9#include <asm/processor.h>
10#include <asm/ppc_asm.h>
11#include <asm/vdso.h>
12#include <asm/vdso_datapage.h>
13#include <asm/asm-offsets.h>
14#include <asm/unistd.h>
15
16/* Offset for the low 32-bit part of a field of long type */
17#ifdef CONFIG_PPC64
18#define LOPART	4
19#else
20#define LOPART	0
21#endif
22
23	.text
24/*
25 * Exact prototype of gettimeofday
26 *
27 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
28 *
29 */
30V_FUNCTION_BEGIN(__kernel_gettimeofday)
31  .cfi_startproc
32	mflr	r12
33  .cfi_register lr,r12
34
35	mr.	r10,r3			/* r10 saves tv */
36	mr	r11,r4			/* r11 saves tz */
37	get_datapage	r9, r0
38	beq	3f
39	LOAD_REG_IMMEDIATE(r7, 1000000)	/* load up USEC_PER_SEC */
40	bl	__do_get_tspec@local	/* get sec/usec from tb & kernel */
41	stw	r3,TVAL32_TV_SEC(r10)
42	stw	r4,TVAL32_TV_USEC(r10)
43
443:	cmplwi	r11,0			/* check if tz is NULL */
45	mtlr	r12
46	crclr	cr0*4+so
47	li	r3,0
48	beqlr
49
50	lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
51	lwz	r5,CFG_TZ_DSTTIME(r9)
52	stw	r4,TZONE_TZ_MINWEST(r11)
53	stw	r5,TZONE_TZ_DSTTIME(r11)
54
55	blr
56  .cfi_endproc
57V_FUNCTION_END(__kernel_gettimeofday)
58
59/*
60 * Exact prototype of clock_gettime()
61 *
62 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
63 *
64 */
65V_FUNCTION_BEGIN(__kernel_clock_gettime)
66  .cfi_startproc
67	/* Check for supported clock IDs */
68	cmpli	cr0,r3,CLOCK_REALTIME
69	cmpli	cr1,r3,CLOCK_MONOTONIC
70	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
71
72	cmpli	cr5,r3,CLOCK_REALTIME_COARSE
73	cmpli	cr6,r3,CLOCK_MONOTONIC_COARSE
74	cror	cr5*4+eq,cr5*4+eq,cr6*4+eq
75
76	cror	cr0*4+eq,cr0*4+eq,cr5*4+eq
77	bne	cr0, .Lgettime_fallback
78
79	mflr	r12			/* r12 saves lr */
80  .cfi_register lr,r12
81	mr	r11,r4			/* r11 saves tp */
82	get_datapage	r9, r0
83	LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC)	/* load up NSEC_PER_SEC */
84	beq	cr5, .Lcoarse_clocks
85.Lprecise_clocks:
86	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
87	bne	cr1, .Lfinish		/* not monotonic -> all done */
88
89	/*
90	 * CLOCK_MONOTONIC
91	 */
92
93	/* now we must fixup using wall to monotonic. We need to snapshot
94	 * that value and do the counter trick again. Fortunately, we still
95	 * have the counter value in r8 that was returned by __do_get_xsec.
96	 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
97	 * can be used, r7 contains NSEC_PER_SEC.
98	 */
99
100	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
101	lwz	r6,WTOM_CLOCK_NSEC(r9)
102
103	/* We now have our offset in r5,r6. We create a fake dependency
104	 * on that value and re-check the counter
105	 */
106	or	r0,r6,r5
107	xor	r0,r0,r0
108	add	r9,r9,r0
109	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
110        cmpl    cr0,r8,r0		/* check if updated */
111	bne-	.Lprecise_clocks
112	b	.Lfinish_monotonic
113
114	/*
115	 * For coarse clocks we get data directly from the vdso data page, so
116	 * we don't need to call __do_get_tspec, but we still need to do the
117	 * counter trick.
118	 */
119.Lcoarse_clocks:
120	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
121	andi.	r0,r8,1                 /* pending update ? loop */
122	bne-	.Lcoarse_clocks
123	add	r9,r9,r0		/* r0 is already 0 */
124
125	/*
126	 * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
127	 * too
128	 */
129	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
130	lwz	r4,STAMP_XTIME_NSEC+LOPART(r9)
131	bne	cr6,1f
132
133	/* CLOCK_MONOTONIC_COARSE */
134	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
135	lwz	r6,WTOM_CLOCK_NSEC(r9)
136
137	/* check if counter has updated */
138	or	r0,r6,r5
1391:	or	r0,r0,r3
140	or	r0,r0,r4
141	xor	r0,r0,r0
142	add	r3,r3,r0
143	lwz	r0,CFG_TB_UPDATE_COUNT+LOPART(r9)
144	cmpl	cr0,r0,r8               /* check if updated */
145	bne-	.Lcoarse_clocks
146
147	/* Counter has not updated, so continue calculating proper values for
148	 * sec and nsec if monotonic coarse, or just return with the proper
149	 * values for realtime.
150	 */
151	bne	cr6, .Lfinish
152
153	/* Calculate and store result. Note that this mimics the C code,
154	 * which may cause funny results if nsec goes negative... is that
155	 * possible at all ?
156	 */
157.Lfinish_monotonic:
158	add	r3,r3,r5
159	add	r4,r4,r6
160	cmpw	cr0,r4,r7
161	cmpwi	cr1,r4,0
162	blt	1f
163	subf	r4,r7,r4
164	addi	r3,r3,1
1651:	bge	cr1, .Lfinish
166	addi	r3,r3,-1
167	add	r4,r4,r7
168
169.Lfinish:
170	stw	r3,TSPC32_TV_SEC(r11)
171	stw	r4,TSPC32_TV_NSEC(r11)
172
173	mtlr	r12
174	crclr	cr0*4+so
175	li	r3,0
176	blr
177
178	/*
179	 * syscall fallback
180	 */
181.Lgettime_fallback:
182	li	r0,__NR_clock_gettime
183  .cfi_restore lr
184	sc
185	blr
186  .cfi_endproc
187V_FUNCTION_END(__kernel_clock_gettime)
188
189
190/*
191 * Exact prototype of clock_getres()
192 *
193 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
194 *
195 */
196V_FUNCTION_BEGIN(__kernel_clock_getres)
197  .cfi_startproc
198	/* Check for supported clock IDs */
199	cmplwi	cr0, r3, CLOCK_MAX
200	cmpwi	cr1, r3, CLOCK_REALTIME_COARSE
201	cmpwi	cr7, r3, CLOCK_MONOTONIC_COARSE
202	bgt	cr0, 99f
203	LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES)
204	beq	cr1, 1f
205	beq	cr7, 1f
206
207	mflr	r12
208  .cfi_register lr,r12
209	get_datapage	r3, r0
210	lwz	r5, CLOCK_HRTIMER_RES(r3)
211	mtlr	r12
2121:	li	r3,0
213	cmpli	cr0,r4,0
214	crclr	cr0*4+so
215	beqlr
216	stw	r3,TSPC32_TV_SEC(r4)
217	stw	r5,TSPC32_TV_NSEC(r4)
218	blr
219
220	/*
221	 * syscall fallback
222	 */
22399:
224	li	r0,__NR_clock_getres
225	sc
226	blr
227  .cfi_endproc
228V_FUNCTION_END(__kernel_clock_getres)
229
230
231/*
232 * Exact prototype of time()
233 *
234 * time_t time(time *t);
235 *
236 */
237V_FUNCTION_BEGIN(__kernel_time)
238  .cfi_startproc
239	mflr	r12
240  .cfi_register lr,r12
241
242	mr	r11,r3			/* r11 holds t */
243	get_datapage	r9, r0
244
245	lwz	r3,STAMP_XTIME_SEC+LOPART(r9)
246
247	cmplwi	r11,0			/* check if t is NULL */
248	mtlr	r12
249	crclr	cr0*4+so
250	beqlr
251	stw	r3,0(r11)		/* store result at *t */
252	blr
253  .cfi_endproc
254V_FUNCTION_END(__kernel_time)
255
256/*
257 * This is the core of clock_gettime() and gettimeofday(),
258 * it returns the current time in r3 (seconds) and r4.
259 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
260 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
261 * It expects the datapage ptr in r9 and doesn't clobber it.
262 * It clobbers r0, r5 and r6.
263 * On return, r8 contains the counter value that can be reused.
264 * This clobbers cr0 but not any other cr field.
265 */
266__do_get_tspec:
267  .cfi_startproc
268	/* Check for update count & load values. We use the low
269	 * order 32 bits of the update count
270	 */
2711:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
272	andi.	r0,r8,1			/* pending update ? loop */
273	bne-	1b
274	xor	r0,r8,r8		/* create dependency */
275	add	r9,r9,r0
276
277	/* Load orig stamp (offset to TB) */
278	lwz	r5,CFG_TB_ORIG_STAMP(r9)
279	lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9)
280
281	/* Get a stable TB value */
2822:	MFTBU(r3)
283	MFTBL(r4)
284	MFTBU(r0)
285	cmplw	cr0,r3,r0
286	bne-	2b
287
288	/* Subtract tb orig stamp and shift left 12 bits.
289	 */
290	subfc	r4,r6,r4
291	subfe	r0,r5,r3
292	slwi	r0,r0,12
293	rlwimi.	r0,r4,12,20,31
294	slwi	r4,r4,12
295
296	/*
297	 * Load scale factor & do multiplication.
298	 * We only use the high 32 bits of the tb_to_xs value.
299	 * Even with a 1GHz timebase clock, the high 32 bits of
300	 * tb_to_xs will be at least 4 million, so the error from
301	 * ignoring the low 32 bits will be no more than 0.25ppm.
302	 * The error will just make the clock run very very slightly
303	 * slow until the next time the kernel updates the VDSO data,
304	 * at which point the clock will catch up to the kernel's value,
305	 * so there is no long-term error accumulation.
306	 */
307	lwz	r5,CFG_TB_TO_XS(r9)	/* load values */
308	mulhwu	r4,r4,r5
309	li	r3,0
310
311	beq+	4f			/* skip high part computation if 0 */
312	mulhwu	r3,r0,r5
313	mullw	r5,r0,r5
314	addc	r4,r4,r5
315	addze	r3,r3
3164:
317	/* At this point, we have seconds since the xtime stamp
318	 * as a 32.32 fixed-point number in r3 and r4.
319	 * Load & add the xtime stamp.
320	 */
321	lwz	r5,STAMP_XTIME_SEC+LOPART(r9)
322	lwz	r6,STAMP_SEC_FRAC(r9)
323	addc	r4,r4,r6
324	adde	r3,r3,r5
325
326	/* We create a fake dependency on the result in r3/r4
327	 * and re-check the counter
328	 */
329	or	r6,r4,r3
330	xor	r0,r6,r6
331	add	r9,r9,r0
332	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
333        cmplw	cr0,r8,r0		/* check if updated */
334	bne-	1b
335
336	mulhwu	r4,r4,r7		/* convert to micro or nanoseconds */
337
338	blr
339  .cfi_endproc
340