1 /* 2 * Extend a 32-bit counter to 63 bits 3 * 4 * Author: Nicolas Pitre 5 * Created: December 3, 2006 6 * Copyright: MontaVista Software, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 10 * as published by the Free Software Foundation. 11 */ 12 13 #ifndef __LINUX_CNT32_TO_63_H__ 14 #define __LINUX_CNT32_TO_63_H__ 15 16 #include <linux/compiler.h> 17 #include <linux/types.h> 18 #include <asm/byteorder.h> 19 #include <asm/system.h> 20 21 /* this is used only to give gcc a clue about good code generation */ 22 union cnt32_to_63 { 23 struct { 24 #if defined(__LITTLE_ENDIAN) 25 u32 lo, hi; 26 #elif defined(__BIG_ENDIAN) 27 u32 hi, lo; 28 #endif 29 }; 30 u64 val; 31 }; 32 33 34 /** 35 * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter 36 * @cnt_lo: The low part of the counter 37 * 38 * Many hardware clock counters are only 32 bits wide and therefore have 39 * a relatively short period making wrap-arounds rather frequent. This 40 * is a problem when implementing sched_clock() for example, where a 64-bit 41 * non-wrapping monotonic value is expected to be returned. 42 * 43 * To overcome that limitation, let's extend a 32-bit counter to 63 bits 44 * in a completely lock free fashion. Bits 0 to 31 of the clock are provided 45 * by the hardware while bits 32 to 62 are stored in memory. The top bit in 46 * memory is used to synchronize with the hardware clock half-period. When 47 * the top bit of both counters (hardware and in memory) differ then the 48 * memory is updated with a new value, incrementing it when the hardware 49 * counter wraps around. 50 * 51 * Because a word store in memory is atomic then the incremented value will 52 * always be in synch with the top bit indicating to any potential concurrent 53 * reader if the value in memory is up to date or not with regards to the 54 * needed increment. And any race in updating the value in memory is harmless 55 * as the same value would simply be stored more than once. 56 * 57 * The restrictions for the algorithm to work properly are: 58 * 59 * 1) this code must be called at least once per each half period of the 60 * 32-bit counter; 61 * 62 * 2) this code must not be preempted for a duration longer than the 63 * 32-bit counter half period minus the longest period between two 64 * calls to this code; 65 * 66 * Those requirements ensure proper update to the state bit in memory. 67 * This is usually not a problem in practice, but if it is then a kernel 68 * timer should be scheduled to manage for this code to be executed often 69 * enough. 70 * 71 * And finally: 72 * 73 * 3) the cnt_lo argument must be seen as a globally incrementing value, 74 * meaning that it should be a direct reference to the counter data which 75 * can be evaluated according to a specific ordering within the macro, 76 * and not the result of a previous evaluation stored in a variable. 77 * 78 * For example, this is wrong: 79 * 80 * u32 partial = get_hw_count(); 81 * u64 full = cnt32_to_63(partial); 82 * return full; 83 * 84 * This is fine: 85 * 86 * u64 full = cnt32_to_63(get_hw_count()); 87 * return full; 88 * 89 * Note that the top bit (bit 63) in the returned value should be considered 90 * as garbage. It is not cleared here because callers are likely to use a 91 * multiplier on the returned value which can get rid of the top bit 92 * implicitly by making the multiplier even, therefore saving on a runtime 93 * clear-bit instruction. Otherwise caller must remember to clear the top 94 * bit explicitly. 95 */ 96 #define cnt32_to_63(cnt_lo) \ 97 ({ \ 98 static u32 __m_cnt_hi; \ 99 union cnt32_to_63 __x; \ 100 __x.hi = __m_cnt_hi; \ 101 smp_rmb(); \ 102 __x.lo = (cnt_lo); \ 103 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ 104 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ 105 __x.val; \ 106 }) 107 108 #endif 109