Lines Matching +full:k +full:- +full:to +full:- +full:j

1 // SPDX-License-Identifier: GPL-2.0
6 * is_aligned - is this pointer & size okay for word-wide copying?
7 * @base: pointer to data
15 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
16 * to "if ((a | b) & mask)", so we do that by hand.
27 return (lsbits & (align - 1)) == 0; in is_aligned()
31 * swap_words_32 - swap two elements in 32-bit chunks
32 * @a: pointer to the first element to swap
33 * @b: pointer to the second element to swap
37 * which basically all CPUs have, to minimize loop overhead computations.
47 u32 t = *(u32 *)(a + (n -= 4)); in swap_words_32()
54 * swap_words_64 - swap two elements in 64-bit chunks
55 * @a: pointer to the first element to swap
56 * @b: pointer to the second element to swap
60 * addressing, which basically all CPUs have, to minimize loop overhead
63 * We'd like to use 64-bit loads if possible. If they're not, emulating
65 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
66 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
67 * x32 ABI). Are there any cases the kernel needs to worry about?
73 u64 t = *(u64 *)(a + (n -= 8)); in swap_words_64()
77 /* Use two 32-bit transfers to avoid base+index+4 addressing */ in swap_words_64()
78 u32 t = *(u32 *)(a + (n -= 4)); in swap_words_64()
82 t = *(u32 *)(a + (n -= 4)); in swap_words_64()
90 * swap_bytes - swap two elements a byte at a time
91 * @a: pointer to the first element to swap
92 * @b: pointer to the second element to swap
100 char t = ((char *)a)[--n]; in swap_bytes()
122 * The function pointer is last to make tail calls most efficient if the
123 * compiler decides not to inline this function.
128 ((const struct wrapper *)priv)->swap_func(a, b, (int)size); in do_swap()
147 return ((const struct wrapper *)priv)->cmp(a, b); in do_cmp()
174 unsigned i, j, k; in eytzinger1_sort_r() local
177 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func) in eytzinger1_sort_r()
190 for (i = n / 2; i >= 1; --i) { in eytzinger1_sort_r()
191 /* Find the sift-down path all the way to the leaves. */ in eytzinger1_sort_r()
192 for (j = i; k = j * 2, k < n;) in eytzinger1_sort_r()
193 j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1; in eytzinger1_sort_r()
196 if (j * 2 == n) in eytzinger1_sort_r()
197 j *= 2; in eytzinger1_sort_r()
199 /* Backtrack to the correct location. */ in eytzinger1_sort_r()
200 while (j != i && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, i, j) >= 0) in eytzinger1_sort_r()
201 j /= 2; in eytzinger1_sort_r()
204 for (k = j; j != i;) { in eytzinger1_sort_r()
205 j /= 2; in eytzinger1_sort_r()
206 eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k); in eytzinger1_sort_r()
211 for (i = n; i > 1; --i) { in eytzinger1_sort_r()
214 /* Find the sift-down path all the way to the leaves. */ in eytzinger1_sort_r()
215 for (j = 1; k = j * 2, k + 1 < i;) in eytzinger1_sort_r()
216 j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1; in eytzinger1_sort_r()
219 if (j * 2 + 1 == i) in eytzinger1_sort_r()
220 j *= 2; in eytzinger1_sort_r()
222 /* Backtrack to the correct location. */ in eytzinger1_sort_r()
223 while (j >= 1 && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, 1, j) >= 0) in eytzinger1_sort_r()
224 j /= 2; in eytzinger1_sort_r()
227 for (k = j; j > 1;) { in eytzinger1_sort_r()
228 j /= 2; in eytzinger1_sort_r()
229 eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k); in eytzinger1_sort_r()
239 void *base1 = base - size; in eytzinger0_sort_r()
270 return -1;
313 return -1;