Bound allocated pages to resource pools with page caches to avoid unnecessary syscalls

This commit is contained in:
Maria Matejka 2021-09-01 00:46:46 +02:00
parent 2c13759136
commit 7f0e598208
4 changed files with 92 additions and 35 deletions

View file

@ -31,9 +31,18 @@
struct pool {
resource r;
list inside;
struct pool_pages *pages;
const char *name;
};
struct pool_pages {
uint free;
uint used;
void *ptr[0];
};
#define POOL_PAGES_MAX ((page_size - sizeof(struct pool_pages)) / sizeof (void *))
static void pool_dump(resource *);
static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long);
@ -50,6 +59,10 @@ static struct resclass pool_class = {
pool root_pool;
void *alloc_sys_page(void);
void free_sys_page(void *);
void resource_sys_init(void);
static int indent;
/**
@ -82,6 +95,14 @@ pool_free(resource *P)
xfree(r);
r = rr;
}
if (p->pages)
{
ASSERT_DIE(!p->pages->used);
for (uint i=0; i<p->pages->free; i++)
free_sys_page(p->pages->ptr[i]);
free_sys_page(p->pages);
}
}
static void
@ -107,6 +128,9 @@ pool_memsize(resource *P)
WALK_LIST(r, p->inside)
sum += rmemsize(r);
if (p->pages)
sum += page_size * (p->pages->used + p->pages->free + 1);
return sum;
}
@ -259,6 +283,7 @@ rlookup(unsigned long a)
void
resource_init(void)
{
resource_sys_init();
root_pool.r.class = &pool_class;
root_pool.name = "Root";
init_list(&root_pool.inside);
@ -425,6 +450,39 @@ mb_free(void *m)
rfree(b);
}
void *
alloc_page(pool *p)
{
if (!p->pages)
{
p->pages = alloc_sys_page();
p->pages->free = 0;
p->pages->used = 1;
}
else
p->pages->used++;
if (p->pages->free)
{
void *ptr = p->pages->ptr[--p->pages->free];
bzero(ptr, page_size);
return ptr;
}
else
return alloc_sys_page();
}
void
free_page(pool *p, void *ptr)
{
ASSERT_DIE(p->pages);
p->pages->used--;
if (p->pages->free >= POOL_PAGES_MAX)
return free_sys_page(ptr);
else
p->pages->ptr[p->pages->free++] = ptr;
}
#define STEP_UP(x) ((x) + (x)/2 + 4)

View file

@ -94,10 +94,12 @@ void sl_free(slab *, void *);
void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);
extern long page_size;
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
u64 get_page_size(void);
void *alloc_page(void);
void free_page(void *);
void *alloc_page(pool *);
void free_page(pool *, void *);
#define PAGE_HEAD(x) ((void *) (((intptr_t) (x)) & ~(page_size-1)))
#ifdef HAVE_LIBDMALLOC
/*

View file

@ -152,6 +152,7 @@ slab_memsize(resource *r)
struct slab {
resource r;
pool *p;
uint obj_size, head_size, head_bitfield_len;
uint objs_per_slab, num_empty_heads, data_size;
list empty_heads, partial_heads, full_heads;
@ -191,6 +192,7 @@ slab *
sl_new(pool *p, uint size)
{
slab *s = ralloc(p, &sl_class);
s->p = p;
uint align = sizeof(struct sl_alignment);
if (align < sizeof(int))
align = sizeof(int);
@ -199,7 +201,6 @@ sl_new(pool *p, uint size)
s->obj_size = size;
s->head_size = sizeof(struct sl_head);
u64 page_size = get_page_size();
do {
s->objs_per_slab = (page_size - s->head_size) / size;
@ -268,9 +269,9 @@ no_partial:
s->num_empty_heads--;
goto okay;
}
h = alloc_page();
h = alloc_page(s->p);
#ifdef POISON
memset(h, 0xba, get_page_size());
memset(h, 0xba, page_size);
#endif
ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size);
@ -329,9 +330,9 @@ sl_free(slab *s, void *oo)
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
{
#ifdef POISON
memset(h, 0xde, get_page_size());
memset(h, 0xde, page_size);
#endif
free_page(h);
free_page(s->p, h);
}
else
{
@ -348,11 +349,11 @@ slab_free(resource *r)
struct sl_head *h, *g;
WALK_LIST_DELSAFE(h, g, s->empty_heads)
free_page(h);
free_page(s->p, h);
WALK_LIST_DELSAFE(h, g, s->partial_heads)
free_page(h);
free_page(s->p, h);
WALK_LIST_DELSAFE(h, g, s->full_heads)
free_page(h);
free_page(s->p, h);
}
static void
@ -385,7 +386,8 @@ slab_memsize(resource *r)
WALK_LIST(h, s->full_heads)
heads++;
return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + get_page_size());
// return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size);
return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */
}
static resource *
@ -395,10 +397,10 @@ slab_lookup(resource *r, unsigned long a)
struct sl_head *h;
WALK_LIST(h, s->partial_heads)
if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
WALK_LIST(h, s->full_heads)
if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
}

View file

@ -16,41 +16,36 @@
#include <sys/mman.h>
#endif
long page_size = 0;
#ifdef HAVE_MMAP
static u64 page_size = 0;
static _Bool use_fake = 0;
#else
static const u64 page_size = 4096; /* Fake page size */
static _Bool use_fake = 1;
#endif
u64 get_page_size(void)
void resource_sys_init(void)
{
if (page_size)
return page_size;
#ifdef HAVE_MMAP
if (page_size = sysconf(_SC_PAGESIZE))
{
if (!(page_size = sysconf(_SC_PAGESIZE)))
die("System page size must be non-zero");
if ((u64_popcount(page_size) > 1) || (page_size > 16384))
{
#endif
/* Too big or strange page, use the aligned allocator instead */
page_size = 4096;
use_fake = 1;
}
return page_size;
}
bug("Page size must be non-zero");
#endif
}
void *
alloc_page(void)
alloc_sys_page(void)
{
#ifdef HAVE_MMAP
if (!use_fake)
{
void *ret = mmap(NULL, get_page_size(), PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
void *ret = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ret == MAP_FAILED)
bug("mmap(%lu) failed: %m", page_size);
return ret;
@ -66,12 +61,12 @@ alloc_page(void)
}
void
free_page(void *ptr)
free_sys_page(void *ptr)
{
#ifdef HAVE_MMAP
if (!use_fake)
{
if (munmap(ptr, get_page_size()) < 0)
if (munmap(ptr, page_size) < 0)
bug("munmap(%p) failed: %m", ptr);
}
else