2006-12-13 03:34:23 -05:00
|
|
|
#ifndef _LINUX_SLAB_DEF_H
|
|
|
|
#define _LINUX_SLAB_DEF_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Definitions unique to the original Linux SLAB allocator.
|
|
|
|
*
|
|
|
|
* What we provide here is a way to optimize the frequent kmalloc
|
|
|
|
* calls in the kernel by selecting the appropriate general cache
|
|
|
|
* if kmalloc was called with a size that can be established at
|
|
|
|
* compile time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
|
|
|
|
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
|
|
|
|
/* Size description struct for general caches. */
|
|
|
|
struct cache_sizes {
|
|
|
|
size_t cs_size;
|
|
|
|
struct kmem_cache *cs_cachep;
|
2007-02-10 04:43:10 -05:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
2006-12-13 03:34:23 -05:00
|
|
|
struct kmem_cache *cs_dmacachep;
|
2007-02-10 04:43:10 -05:00
|
|
|
#endif
|
2006-12-13 03:34:23 -05:00
|
|
|
};
|
|
|
|
extern struct cache_sizes malloc_sizes[];
|
|
|
|
|
|
|
|
static inline void *kmalloc(size_t size, gfp_t flags)
|
|
|
|
{
|
|
|
|
if (__builtin_constant_p(size)) {
|
|
|
|
int i = 0;
|
|
|
|
#define CACHE(x) \
|
|
|
|
if (size <= x) \
|
|
|
|
goto found; \
|
|
|
|
else \
|
|
|
|
i++;
|
|
|
|
#include "kmalloc_sizes.h"
|
|
|
|
#undef CACHE
|
|
|
|
{
|
|
|
|
extern void __you_cannot_kmalloc_that_much(void);
|
|
|
|
__you_cannot_kmalloc_that_much();
|
|
|
|
}
|
|
|
|
found:
|
2007-02-10 04:43:10 -05:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
if (flags & GFP_DMA)
|
|
|
|
return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
|
|
|
|
flags);
|
|
|
|
#endif
|
|
|
|
return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
|
2006-12-13 03:34:23 -05:00
|
|
|
}
|
|
|
|
return __kmalloc(size, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *kzalloc(size_t size, gfp_t flags)
|
|
|
|
{
|
|
|
|
if (__builtin_constant_p(size)) {
|
|
|
|
int i = 0;
|
|
|
|
#define CACHE(x) \
|
|
|
|
if (size <= x) \
|
|
|
|
goto found; \
|
|
|
|
else \
|
|
|
|
i++;
|
|
|
|
#include "kmalloc_sizes.h"
|
|
|
|
#undef CACHE
|
|
|
|
{
|
|
|
|
extern void __you_cannot_kzalloc_that_much(void);
|
|
|
|
__you_cannot_kzalloc_that_much();
|
|
|
|
}
|
|
|
|
found:
|
2007-02-10 04:43:10 -05:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
if (flags & GFP_DMA)
|
|
|
|
return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
|
|
|
|
flags);
|
|
|
|
#endif
|
|
|
|
return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
|
2006-12-13 03:34:23 -05:00
|
|
|
}
|
|
|
|
return __kzalloc(size, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
|
|
|
|
|
|
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
|
{
|
|
|
|
if (__builtin_constant_p(size)) {
|
|
|
|
int i = 0;
|
|
|
|
#define CACHE(x) \
|
|
|
|
if (size <= x) \
|
|
|
|
goto found; \
|
|
|
|
else \
|
|
|
|
i++;
|
|
|
|
#include "kmalloc_sizes.h"
|
|
|
|
#undef CACHE
|
|
|
|
{
|
|
|
|
extern void __you_cannot_kmalloc_that_much(void);
|
|
|
|
__you_cannot_kmalloc_that_much();
|
|
|
|
}
|
|
|
|
found:
|
2007-02-10 04:43:10 -05:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
|
|
if (flags & GFP_DMA)
|
|
|
|
return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
|
|
|
|
flags, node);
|
|
|
|
#endif
|
|
|
|
return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
|
|
|
|
flags, node);
|
2006-12-13 03:34:23 -05:00
|
|
|
}
|
|
|
|
return __kmalloc_node(size, flags, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
|
2007-05-17 01:10:52 -04:00
|
|
|
extern const struct seq_operations slabinfo_op;
|
|
|
|
ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
|
|
|
|
|
2006-12-13 03:34:23 -05:00
|
|
|
#endif /* _LINUX_SLAB_DEF_H */
|