1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright IBM Corp. 1999,2013 4 * 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * 7 * The description below was taken in large parts from the powerpc 8 * bitops header file: 9 * Within a word, bits are numbered LSB first. Lot's of places make 10 * this assumption by directly testing bits with (val & (1<<nr)). 11 * This can cause confusion for large (> 1 word) bitmaps on a 12 * big-endian system because, unlike little endian, the number of each 13 * bit depends on the word size. 14 * 15 * The bitop functions are defined to work on unsigned longs, so the bits 16 * end up numbered: 17 * |63..............0|127............64|191...........128|255...........192| 18 * 19 * We also have special functions which work with an MSB0 encoding. 20 * The bits are numbered: 21 * |0..............63|64............127|128...........191|192...........255| 22 * 23 * The main difference is that bit 0-63 in the bit number field needs to be 24 * reversed compared to the LSB0 encoded bit fields. This can be achieved by 25 * XOR with 0x3f. 26 * 27 */ 28 29 #ifndef _S390_BITOPS_H 30 #define _S390_BITOPS_H 31 32 #ifndef _LINUX_BITOPS_H 33 #error only <linux/bitops.h> can be included directly 34 #endif 35 36 #include <linux/typecheck.h> 37 #include <linux/compiler.h> 38 #include <linux/types.h> 39 #include <asm/asm.h> 40 41 #define arch___set_bit generic___set_bit 42 #define arch___clear_bit generic___clear_bit 43 #define arch___change_bit generic___change_bit 44 #define arch___test_and_set_bit generic___test_and_set_bit 45 #define arch___test_and_clear_bit generic___test_and_clear_bit 46 #define arch___test_and_change_bit generic___test_and_change_bit 47 #define arch_test_bit_acquire generic_test_bit_acquire 48 49 static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr) 50 { 51 #ifdef __HAVE_ASM_FLAG_OUTPUTS__ 52 const volatile unsigned char *addr; 53 unsigned long mask; 54 int cc; 55 56 /* 57 * With CONFIG_PROFILE_ALL_BRANCHES enabled gcc fails to 58 * handle __builtin_constant_p() in some cases. 59 */ 60 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && __builtin_constant_p(nr)) { 61 addr = (const volatile unsigned char *)ptr; 62 addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE; 63 mask = 1UL << (nr & (BITS_PER_BYTE - 1)); 64 asm volatile( 65 " tm %[addr],%[mask]\n" 66 : "=@cc" (cc) 67 : [addr] "Q" (*addr), [mask] "I" (mask) 68 ); 69 return cc == 3; 70 } 71 #endif 72 return generic_test_bit(nr, ptr); 73 } 74 75 #include <asm-generic/bitops/atomic.h> 76 #include <asm-generic/bitops/non-instrumented-non-atomic.h> 77 #include <asm-generic/bitops/lock.h> 78 79 /* 80 * Functions which use MSB0 bit numbering. 81 * The bits are numbered: 82 * |0..............63|64............127|128...........191|192...........255| 83 */ 84 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 85 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, 86 unsigned long offset); 87 88 #define for_each_set_bit_inv(bit, addr, size) \ 89 for ((bit) = find_first_bit_inv((addr), (size)); \ 90 (bit) < (size); \ 91 (bit) = find_next_bit_inv((addr), (size), (bit) + 1)) 92 93 static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr) 94 { 95 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr); 96 } 97 98 static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) 99 { 100 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 101 } 102 103 static inline bool test_and_clear_bit_inv(unsigned long nr, 104 volatile unsigned long *ptr) 105 { 106 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 107 } 108 109 static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr) 110 { 111 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr); 112 } 113 114 static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) 115 { 116 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 117 } 118 119 static inline bool test_bit_inv(unsigned long nr, 120 const volatile unsigned long *ptr) 121 { 122 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 123 } 124 125 /** 126 * __flogr - find leftmost one 127 * @word - The word to search 128 * 129 * Returns the bit number of the most significant bit set, 130 * where the most significant bit has bit number 0. 131 * If no bit is set this function returns 64. 132 */ 133 static inline unsigned char __flogr(unsigned long word) 134 { 135 if (__builtin_constant_p(word)) { 136 unsigned long bit = 0; 137 138 if (!word) 139 return 64; 140 if (!(word & 0xffffffff00000000UL)) { 141 word <<= 32; 142 bit += 32; 143 } 144 if (!(word & 0xffff000000000000UL)) { 145 word <<= 16; 146 bit += 16; 147 } 148 if (!(word & 0xff00000000000000UL)) { 149 word <<= 8; 150 bit += 8; 151 } 152 if (!(word & 0xf000000000000000UL)) { 153 word <<= 4; 154 bit += 4; 155 } 156 if (!(word & 0xc000000000000000UL)) { 157 word <<= 2; 158 bit += 2; 159 } 160 if (!(word & 0x8000000000000000UL)) { 161 word <<= 1; 162 bit += 1; 163 } 164 return bit; 165 } else { 166 union register_pair rp; 167 168 rp.even = word; 169 asm volatile( 170 " flogr %[rp],%[rp]\n" 171 : [rp] "+d" (rp.pair) : : "cc"); 172 return rp.even; 173 } 174 } 175 176 /** 177 * __ffs - find first bit in word. 178 * @word: The word to search 179 * 180 * Undefined if no bit exists, so code should check against 0 first. 181 */ 182 static inline unsigned long __ffs(unsigned long word) 183 { 184 return __flogr(-word & word) ^ (BITS_PER_LONG - 1); 185 } 186 187 /** 188 * ffs - find first bit set 189 * @word: the word to search 190 * 191 * This is defined the same way as the libc and 192 * compiler builtin ffs routines (man ffs). 193 */ 194 static inline int ffs(int word) 195 { 196 unsigned long mask = 2 * BITS_PER_LONG - 1; 197 unsigned int val = (unsigned int)word; 198 199 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask; 200 } 201 202 /** 203 * __fls - find last (most-significant) set bit in a long word 204 * @word: the word to search 205 * 206 * Undefined if no set bit exists, so code should check against 0 first. 207 */ 208 static inline unsigned long __fls(unsigned long word) 209 { 210 return __flogr(word) ^ (BITS_PER_LONG - 1); 211 } 212 213 /** 214 * fls64 - find last set bit in a 64-bit word 215 * @word: the word to search 216 * 217 * This is defined in a similar way as the libc and compiler builtin 218 * ffsll, but returns the position of the most significant set bit. 219 * 220 * fls64(value) returns 0 if value is 0 or the position of the last 221 * set bit if value is nonzero. The last (most significant) bit is 222 * at position 64. 223 */ 224 static inline int fls64(unsigned long word) 225 { 226 unsigned long mask = 2 * BITS_PER_LONG - 1; 227 228 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask; 229 } 230 231 /** 232 * fls - find last (most-significant) bit set 233 * @word: the word to search 234 * 235 * This is defined the same way as ffs. 236 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 237 */ 238 static inline int fls(unsigned int word) 239 { 240 return fls64(word); 241 } 242 243 #include <asm/arch_hweight.h> 244 #include <asm-generic/bitops/const_hweight.h> 245 #include <asm-generic/bitops/ffz.h> 246 #include <asm-generic/bitops/sched.h> 247 #include <asm-generic/bitops/le.h> 248 #include <asm-generic/bitops/ext2-atomic-setbit.h> 249 250 #endif /* _S390_BITOPS_H */ 251