1 #ifndef _LINUX_SLUB_DEF_H
2 #define _LINUX_SLUB_DEF_H
3 
4 /*
5  * SLUB : A Slab allocator without object queues.
6  *
7  * (C) 2007 SGI, Christoph Lameter
8  */
9 #include <linux/types.h>
10 #include <linux/gfp.h>
11 #include <linux/workqueue.h>
12 #include <linux/kobject.h>
13 
14 #include <linux/kmemleak.h>
15 
16 enum stat_item {
17 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
18 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
19 	FREE_FASTPATH,		/* Free to cpu slub */
20 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
21 	FREE_FROZEN,		/* Freeing to frozen slab */
22 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
23 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
24 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from partial list */
25 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
26 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
27 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
28 	FREE_SLAB,		/* Slab freed to the page allocator */
29 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
30 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
31 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
32 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
33 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
34 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
35 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
36 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
37 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
38 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
39 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
40 	CPU_PARTIAL_FREE,	/* USed cpu partial on free */
41 	NR_SLUB_STAT_ITEMS };
42 
43 struct kmem_cache_cpu {
44 	void **freelist;	/* Pointer to next available object */
45 	unsigned long tid;	/* Globally unique transaction id */
46 	struct page *page;	/* The slab from which we are allocating */
47 	struct page *partial;	/* Partially allocated frozen slabs */
48 	int node;		/* The node of the page (or -1 for debug) */
49 #ifdef CONFIG_SLUB_STATS
50 	unsigned stat[NR_SLUB_STAT_ITEMS];
51 #endif
52 };
53 
54 struct kmem_cache_node {
55 	spinlock_t list_lock;	/* Protect partial list and nr_partial */
56 	unsigned long nr_partial;
57 	struct list_head partial;
58 #ifdef CONFIG_SLUB_DEBUG
59 	atomic_long_t nr_slabs;
60 	atomic_long_t total_objects;
61 	struct list_head full;
62 #endif
63 };
64 
65 /*
66  * Word size structure that can be atomically updated or read and that
67  * contains both the order and the number of objects that a slab of the
68  * given order would contain.
69  */
70 struct kmem_cache_order_objects {
71 	unsigned long x;
72 };
73 
74 /*
75  * Slab cache management.
76  */
77 struct kmem_cache {
78 	struct kmem_cache_cpu __percpu *cpu_slab;
79 	/* Used for retriving partial slabs etc */
80 	unsigned long flags;
81 	unsigned long min_partial;
82 	int size;		/* The size of an object including meta data */
83 	int objsize;		/* The size of an object without meta data */
84 	int offset;		/* Free pointer offset. */
85 	int cpu_partial;	/* Number of per cpu partial objects to keep around */
86 	struct kmem_cache_order_objects oo;
87 
88 	/* Allocation and freeing of slabs */
89 	struct kmem_cache_order_objects max;
90 	struct kmem_cache_order_objects min;
91 	gfp_t allocflags;	/* gfp flags to use on each alloc */
92 	int refcount;		/* Refcount for slab cache destroy */
93 	void (*ctor)(void *);
94 	int inuse;		/* Offset to metadata */
95 	int align;		/* Alignment */
96 	int reserved;		/* Reserved bytes at the end of slabs */
97 	const char *name;	/* Name (only for display!) */
98 	struct list_head list;	/* List of slab caches */
99 #ifdef CONFIG_SYSFS
100 	struct kobject kobj;	/* For sysfs */
101 #endif
102 
103 #ifdef CONFIG_NUMA
104 	/*
105 	 * Defragmentation by allocating from a remote node.
106 	 */
107 	int remote_node_defrag_ratio;
108 #endif
109 	struct kmem_cache_node *node[MAX_NUMNODES];
110 };
111 
112 /*
113  * Kmalloc subsystem.
114  */
115 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
116 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
117 #else
118 #define KMALLOC_MIN_SIZE 8
119 #endif
120 
121 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
122 
123 /*
124  * Maximum kmalloc object size handled by SLUB. Larger object allocations
125  * are passed through to the page allocator. The page allocator "fastpath"
126  * is relatively slow so we need this value sufficiently high so that
127  * performance critical objects are allocated through the SLUB fastpath.
128  *
129  * This should be dropped to PAGE_SIZE / 2 once the page allocator
130  * "fastpath" becomes competitive with the slab allocator fastpaths.
131  */
132 #define SLUB_MAX_SIZE (2 * PAGE_SIZE)
133 
134 #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
135 
136 #ifdef CONFIG_ZONE_DMA
137 #define SLUB_DMA __GFP_DMA
138 #else
139 /* Disable DMA functionality */
140 #define SLUB_DMA (__force gfp_t)0
141 #endif
142 
143 /*
144  * We keep the general caches in an array of slab caches that are used for
145  * 2^x bytes of allocations.
146  */
147 extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
148 
149 /*
150  * Sorry that the following has to be that ugly but some versions of GCC
151  * have trouble with constant propagation and loops.
152  */
kmalloc_index(size_t size)153 static __always_inline int kmalloc_index(size_t size)
154 {
155 	if (!size)
156 		return 0;
157 
158 	if (size <= KMALLOC_MIN_SIZE)
159 		return KMALLOC_SHIFT_LOW;
160 
161 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
162 		return 1;
163 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
164 		return 2;
165 	if (size <=          8) return 3;
166 	if (size <=         16) return 4;
167 	if (size <=         32) return 5;
168 	if (size <=         64) return 6;
169 	if (size <=        128) return 7;
170 	if (size <=        256) return 8;
171 	if (size <=        512) return 9;
172 	if (size <=       1024) return 10;
173 	if (size <=   2 * 1024) return 11;
174 	if (size <=   4 * 1024) return 12;
175 /*
176  * The following is only needed to support architectures with a larger page
177  * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page
178  * size we would have to go up to 128k.
179  */
180 	if (size <=   8 * 1024) return 13;
181 	if (size <=  16 * 1024) return 14;
182 	if (size <=  32 * 1024) return 15;
183 	if (size <=  64 * 1024) return 16;
184 	if (size <= 128 * 1024) return 17;
185 	if (size <= 256 * 1024) return 18;
186 	if (size <= 512 * 1024) return 19;
187 	if (size <= 1024 * 1024) return 20;
188 	if (size <=  2 * 1024 * 1024) return 21;
189 	BUG();
190 	return -1; /* Will never be reached */
191 
192 /*
193  * What we really wanted to do and cannot do because of compiler issues is:
194  *	int i;
195  *	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
196  *		if (size <= (1 << i))
197  *			return i;
198  */
199 }
200 
201 /*
202  * Find the slab cache for a given combination of allocation flags and size.
203  *
204  * This ought to end up with a global pointer to the right cache
205  * in kmalloc_caches.
206  */
kmalloc_slab(size_t size)207 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
208 {
209 	int index = kmalloc_index(size);
210 
211 	if (index == 0)
212 		return NULL;
213 
214 	return kmalloc_caches[index];
215 }
216 
217 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218 void *__kmalloc(size_t size, gfp_t flags);
219 
220 static __always_inline void *
kmalloc_order(size_t size,gfp_t flags,unsigned int order)221 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
222 {
223 	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
224 	kmemleak_alloc(ret, size, 1, flags);
225 	return ret;
226 }
227 
228 /**
229  * Calling this on allocated memory will check that the memory
230  * is expected to be in use, and print warnings if not.
231  */
232 #ifdef CONFIG_SLUB_DEBUG
233 extern bool verify_mem_not_deleted(const void *x);
234 #else
verify_mem_not_deleted(const void * x)235 static inline bool verify_mem_not_deleted(const void *x)
236 {
237 	return true;
238 }
239 #endif
240 
241 #ifdef CONFIG_TRACING
242 extern void *
243 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
244 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
245 #else
246 static __always_inline void *
kmem_cache_alloc_trace(struct kmem_cache * s,gfp_t gfpflags,size_t size)247 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
248 {
249 	return kmem_cache_alloc(s, gfpflags);
250 }
251 
252 static __always_inline void *
kmalloc_order_trace(size_t size,gfp_t flags,unsigned int order)253 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
254 {
255 	return kmalloc_order(size, flags, order);
256 }
257 #endif
258 
kmalloc_large(size_t size,gfp_t flags)259 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
260 {
261 	unsigned int order = get_order(size);
262 	return kmalloc_order_trace(size, flags, order);
263 }
264 
kmalloc(size_t size,gfp_t flags)265 static __always_inline void *kmalloc(size_t size, gfp_t flags)
266 {
267 	if (__builtin_constant_p(size)) {
268 		if (size > SLUB_MAX_SIZE)
269 			return kmalloc_large(size, flags);
270 
271 		if (!(flags & SLUB_DMA)) {
272 			struct kmem_cache *s = kmalloc_slab(size);
273 
274 			if (!s)
275 				return ZERO_SIZE_PTR;
276 
277 			return kmem_cache_alloc_trace(s, flags, size);
278 		}
279 	}
280 	return __kmalloc(size, flags);
281 }
282 
283 #ifdef CONFIG_NUMA
284 void *__kmalloc_node(size_t size, gfp_t flags, int node);
285 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
286 
287 #ifdef CONFIG_TRACING
288 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
289 					   gfp_t gfpflags,
290 					   int node, size_t size);
291 #else
292 static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)293 kmem_cache_alloc_node_trace(struct kmem_cache *s,
294 			      gfp_t gfpflags,
295 			      int node, size_t size)
296 {
297 	return kmem_cache_alloc_node(s, gfpflags, node);
298 }
299 #endif
300 
kmalloc_node(size_t size,gfp_t flags,int node)301 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
302 {
303 	if (__builtin_constant_p(size) &&
304 		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
305 			struct kmem_cache *s = kmalloc_slab(size);
306 
307 		if (!s)
308 			return ZERO_SIZE_PTR;
309 
310 		return kmem_cache_alloc_node_trace(s, flags, node, size);
311 	}
312 	return __kmalloc_node(size, flags, node);
313 }
314 #endif
315 
316 #endif /* _LINUX_SLUB_DEF_H */
317