1 /*
2  * Copyright (C) 2009 Intel Corporation.
3  * Author: Patrick Ohly <patrick.ohly@intel.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19 
20 #include <linux/timecompare.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/math64.h>
24 #include <linux/kernel.h>
25 
26 /*
27  * fixed point arithmetic scale factor for skew
28  *
29  * Usually one would measure skew in ppb (parts per billion, 1e9), but
30  * using a factor of 2 simplifies the math.
31  */
32 #define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
33 
timecompare_transform(struct timecompare * sync,u64 source_tstamp)34 ktime_t timecompare_transform(struct timecompare *sync,
35 			      u64 source_tstamp)
36 {
37 	u64 nsec;
38 
39 	nsec = source_tstamp + sync->offset;
40 	nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
41 		TIMECOMPARE_SKEW_RESOLUTION;
42 
43 	return ns_to_ktime(nsec);
44 }
45 EXPORT_SYMBOL_GPL(timecompare_transform);
46 
timecompare_offset(struct timecompare * sync,s64 * offset,u64 * source_tstamp)47 int timecompare_offset(struct timecompare *sync,
48 		       s64 *offset,
49 		       u64 *source_tstamp)
50 {
51 	u64 start_source = 0, end_source = 0;
52 	struct {
53 		s64 offset;
54 		s64 duration_target;
55 	} buffer[10], sample, *samples;
56 	int counter = 0, i;
57 	int used;
58 	int index;
59 	int num_samples = sync->num_samples;
60 
61 	if (num_samples > ARRAY_SIZE(buffer)) {
62 		samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
63 		if (!samples) {
64 			samples = buffer;
65 			num_samples = ARRAY_SIZE(buffer);
66 		}
67 	} else {
68 		samples = buffer;
69 	}
70 
71 	/* run until we have enough valid samples, but do not try forever */
72 	i = 0;
73 	counter = 0;
74 	while (1) {
75 		u64 ts;
76 		ktime_t start, end;
77 
78 		start = sync->target();
79 		ts = timecounter_read(sync->source);
80 		end = sync->target();
81 
82 		if (!i)
83 			start_source = ts;
84 
85 		/* ignore negative durations */
86 		sample.duration_target = ktime_to_ns(ktime_sub(end, start));
87 		if (sample.duration_target >= 0) {
88 			/*
89 			 * assume symetric delay to and from source:
90 			 * average target time corresponds to measured
91 			 * source time
92 			 */
93 			sample.offset =
94 				(ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
95 				ts;
96 
97 			/* simple insertion sort based on duration */
98 			index = counter - 1;
99 			while (index >= 0) {
100 				if (samples[index].duration_target <
101 				    sample.duration_target)
102 					break;
103 				samples[index + 1] = samples[index];
104 				index--;
105 			}
106 			samples[index + 1] = sample;
107 			counter++;
108 		}
109 
110 		i++;
111 		if (counter >= num_samples || i >= 100000) {
112 			end_source = ts;
113 			break;
114 		}
115 	}
116 
117 	*source_tstamp = (end_source + start_source) / 2;
118 
119 	/* remove outliers by only using 75% of the samples */
120 	used = counter * 3 / 4;
121 	if (!used)
122 		used = counter;
123 	if (used) {
124 		/* calculate average */
125 		s64 off = 0;
126 		for (index = 0; index < used; index++)
127 			off += samples[index].offset;
128 		*offset = div_s64(off, used);
129 	}
130 
131 	if (samples && samples != buffer)
132 		kfree(samples);
133 
134 	return used;
135 }
136 EXPORT_SYMBOL_GPL(timecompare_offset);
137 
__timecompare_update(struct timecompare * sync,u64 source_tstamp)138 void __timecompare_update(struct timecompare *sync,
139 			  u64 source_tstamp)
140 {
141 	s64 offset;
142 	u64 average_time;
143 
144 	if (!timecompare_offset(sync, &offset, &average_time))
145 		return;
146 
147 	if (!sync->last_update) {
148 		sync->last_update = average_time;
149 		sync->offset = offset;
150 		sync->skew = 0;
151 	} else {
152 		s64 delta_nsec = average_time - sync->last_update;
153 
154 		/* avoid division by negative or small deltas */
155 		if (delta_nsec >= 10000) {
156 			s64 delta_offset_nsec = offset - sync->offset;
157 			s64 skew; /* delta_offset_nsec *
158 				     TIMECOMPARE_SKEW_RESOLUTION /
159 				     delta_nsec */
160 			u64 divisor;
161 
162 			/* div_s64() is limited to 32 bit divisor */
163 			skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
164 			divisor = delta_nsec;
165 			while (unlikely(divisor >= ((s64)1) << 32)) {
166 				/* divide both by 2; beware, right shift
167 				   of negative value has undefined
168 				   behavior and can only be used for
169 				   the positive divisor */
170 				skew = div_s64(skew, 2);
171 				divisor >>= 1;
172 			}
173 			skew = div_s64(skew, divisor);
174 
175 			/*
176 			 * Calculate new overall skew as 4/16 the
177 			 * old value and 12/16 the new one. This is
178 			 * a rather arbitrary tradeoff between
179 			 * only using the latest measurement (0/16 and
180 			 * 16/16) and even more weight on past measurements.
181 			 */
182 #define TIMECOMPARE_NEW_SKEW_PER_16 12
183 			sync->skew =
184 				div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
185 					sync->skew +
186 					TIMECOMPARE_NEW_SKEW_PER_16 * skew,
187 					16);
188 			sync->last_update = average_time;
189 			sync->offset = offset;
190 		}
191 	}
192 }
193 EXPORT_SYMBOL_GPL(__timecompare_update);
194