1 /*
2  * FLoating proportions
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  * This file contains the public data structure and API definitions.
7  */
8 
9 #ifndef _LINUX_PROPORTIONS_H
10 #define _LINUX_PROPORTIONS_H
11 
12 #include <linux/percpu_counter.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 
16 struct prop_global {
17 	/*
18 	 * The period over which we differentiate
19 	 *
20 	 *   period = 2^shift
21 	 */
22 	int shift;
23 	/*
24 	 * The total event counter aka 'time'.
25 	 *
26 	 * Treated as an unsigned long; the lower 'shift - 1' bits are the
27 	 * counter bits, the remaining upper bits the period counter.
28 	 */
29 	struct percpu_counter events;
30 };
31 
32 /*
33  * global proportion descriptor
34  *
35  * this is needed to consitently flip prop_global structures.
36  */
37 struct prop_descriptor {
38 	int index;
39 	struct prop_global pg[2];
40 	struct mutex mutex;		/* serialize the prop_global switch */
41 };
42 
43 int prop_descriptor_init(struct prop_descriptor *pd, int shift);
44 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
45 
46 /*
47  * ----- PERCPU ------
48  */
49 
50 struct prop_local_percpu {
51 	/*
52 	 * the local events counter
53 	 */
54 	struct percpu_counter events;
55 
56 	/*
57 	 * snapshot of the last seen global state
58 	 */
59 	int shift;
60 	unsigned long period;
61 	raw_spinlock_t lock;		/* protect the snapshot state */
62 };
63 
64 int prop_local_init_percpu(struct prop_local_percpu *pl);
65 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
68 		long *numerator, long *denominator);
69 
70 static inline
prop_inc_percpu(struct prop_descriptor * pd,struct prop_local_percpu * pl)71 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
72 {
73 	unsigned long flags;
74 
75 	local_irq_save(flags);
76 	__prop_inc_percpu(pd, pl);
77 	local_irq_restore(flags);
78 }
79 
80 /*
81  * Limit the time part in order to ensure there are some bits left for the
82  * cycle counter and fraction multiply.
83  */
84 #if BITS_PER_LONG == 32
85 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
86 #else
87 #define PROP_MAX_SHIFT (BITS_PER_LONG/2)
88 #endif
89 
90 #define PROP_FRAC_SHIFT		(BITS_PER_LONG - PROP_MAX_SHIFT - 1)
91 #define PROP_FRAC_BASE		(1UL << PROP_FRAC_SHIFT)
92 
93 void __prop_inc_percpu_max(struct prop_descriptor *pd,
94 			   struct prop_local_percpu *pl, long frac);
95 
96 
97 /*
98  * ----- SINGLE ------
99  */
100 
101 struct prop_local_single {
102 	/*
103 	 * the local events counter
104 	 */
105 	unsigned long events;
106 
107 	/*
108 	 * snapshot of the last seen global state
109 	 * and a lock protecting this state
110 	 */
111 	unsigned long period;
112 	int shift;
113 	raw_spinlock_t lock;		/* protect the snapshot state */
114 };
115 
116 #define INIT_PROP_LOCAL_SINGLE(name)			\
117 {	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
118 }
119 
120 int prop_local_init_single(struct prop_local_single *pl);
121 void prop_local_destroy_single(struct prop_local_single *pl);
122 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
123 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
124 		long *numerator, long *denominator);
125 
126 static inline
prop_inc_single(struct prop_descriptor * pd,struct prop_local_single * pl)127 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
128 {
129 	unsigned long flags;
130 
131 	local_irq_save(flags);
132 	__prop_inc_single(pd, pl);
133 	local_irq_restore(flags);
134 }
135 
136 #endif /* _LINUX_PROPORTIONS_H */
137