aboutsummaryrefslogtreecommitdiffstats
path: root/epan
diff options
context:
space:
mode:
authorJakub Zawadzki <darkjames-ws@darkjames.pl>2011-05-09 08:55:35 +0000
committerJakub Zawadzki <darkjames-ws@darkjames.pl>2011-05-09 08:55:35 +0000
commit99351cb0e3755a8685fcd5a40f298779f218a28c (patch)
tree0205dcd18ef7930ce3538c25919b8db97d65191e /epan
parent3de2b1be7405adac31bd796f3380b9d8edbe0f99 (diff)
Initial commit of slab allocator (sl_* API)
svn path=/trunk/; revision=37027
Diffstat (limited to 'epan')
-rw-r--r--epan/emem.c126
-rw-r--r--epan/emem.h22
2 files changed, 131 insertions, 17 deletions
diff --git a/epan/emem.c b/epan/emem.c
index 2042f0002d..7ce8c64017 100644
--- a/epan/emem.c
+++ b/epan/emem.c
@@ -620,15 +620,8 @@ emem_scrub_memory(char *buf, size_t size, gboolean alloc)
}
static emem_chunk_t *
-emem_create_chunk(void) {
-#if defined (_WIN32)
- BOOL ret;
- char *buf_end, *prot1, *prot2;
- DWORD oldprot;
-#elif defined(USE_GUARD_PAGES)
- int ret;
- char *buf_end, *prot1, *prot2;
-#endif /* _WIN32 / USE_GUARD_PAGES */
+emem_create_chunk(size_t size)
+{
emem_chunk_t *npc;
npc = g_new(emem_chunk_t, 1);
@@ -642,7 +635,7 @@ emem_create_chunk(void) {
*/
/* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
- npc->buf = VirtualAlloc(NULL, EMEM_PACKET_CHUNK_SIZE,
+ npc->buf = VirtualAlloc(NULL, size,
MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
if (npc->buf == NULL) {
@@ -651,7 +644,7 @@ emem_create_chunk(void) {
}
#elif defined(USE_GUARD_PAGES)
- npc->buf = mmap(NULL, EMEM_PACKET_CHUNK_SIZE,
+ npc->buf = mmap(NULL, size,
PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
if (npc->buf == MAP_FAILED) {
@@ -660,7 +653,7 @@ emem_create_chunk(void) {
}
#else /* Is there a draft in here? */
- npc->buf = g_malloc(EMEM_PACKET_CHUNK_SIZE);
+ npc->buf = g_malloc(size);
/* g_malloc() can't fail */
#endif
@@ -668,8 +661,44 @@ emem_create_chunk(void) {
total_no_chunks++;
#endif
+ npc->amount_free = npc->amount_free_init = size;
+ npc->free_offset = npc->free_offset_init = 0;
+ return npc;
+}
+
+static void
+emem_destroy_chunk(emem_chunk_t *npc)
+{
+#if defined (_WIN32)
+ VirtualFree(npc->buf, 0, MEM_RELEASE);
+#elif defined(USE_GUARD_PAGES)
+ munmap(npc->buf, npc->amount_free_init);
+#else
+ g_free(npc->buf);
+#endif
+#ifdef SHOW_EMEM_STATS
+ total_no_chunks--;
+#endif
+ g_free(npc);
+}
+
+static emem_chunk_t *
+emem_create_chunk_gp(size_t size)
+{
#if defined (_WIN32)
- buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
+ BOOL ret;
+ char *buf_end, *prot1, *prot2;
+ DWORD oldprot;
+#elif defined(USE_GUARD_PAGES)
+ int ret;
+ char *buf_end, *prot1, *prot2;
+#endif /* _WIN32 / USE_GUARD_PAGES */
+ emem_chunk_t *npc;
+
+ npc = emem_create_chunk(size);
+
+#if defined (_WIN32)
+ buf_end = npc->buf + size;
/* Align our guard pages on page-sized boundaries */
prot1 = (char *) ((((int) npc->buf + pagesize - 1) / pagesize) * pagesize);
@@ -683,7 +712,7 @@ emem_create_chunk(void) {
npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
#elif defined(USE_GUARD_PAGES)
- buf_end = npc->buf + EMEM_PACKET_CHUNK_SIZE;
+ buf_end = npc->buf + size;
/* Align our guard pages on page-sized boundaries */
prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
@@ -697,7 +726,7 @@ emem_create_chunk(void) {
npc->amount_free_init = prot2 - prot1 - pagesize;
npc->free_offset_init = (prot1 - npc->buf) + pagesize;
#else
- npc->amount_free_init = EMEM_PACKET_CHUNK_SIZE;
+ npc->amount_free_init = size;
npc->free_offset_init = 0;
#endif /* USE_GUARD_PAGES */
@@ -760,7 +789,7 @@ emem_alloc_chunk(size_t size, emem_header_t *mem)
DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
if (!mem->free_list)
- mem->free_list = emem_create_chunk();
+ mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
/* oops, we need to allocate more memory to serve this request
* than we have free. move this node to the used list and try again
@@ -773,7 +802,7 @@ emem_alloc_chunk(size_t size, emem_header_t *mem)
mem->used_list=npc;
if (!mem->free_list)
- mem->free_list = emem_create_chunk();
+ mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
}
free_list = mem->free_list;
@@ -847,6 +876,50 @@ se_alloc(size_t size)
}
void *
+sl_alloc(struct ws_memory_slab *mem_chunk)
+{
+ emem_chunk_t *chunk;
+ void *ptr;
+
+ /* XXX, debug_use_slices -> fallback to g_slice_alloc0 */
+
+ if (G_UNLIKELY(mem_chunk->freed != NULL)) {
+ ptr = mem_chunk->freed;
+ memcpy(&mem_chunk->freed, ptr, sizeof(void *));
+ return ptr;
+ }
+
+ if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
+ chunk = emem_create_chunk(mem_chunk->item_size * mem_chunk->count); /* NOTE: using version without guard pages! */
+
+ /* XXX, align size for emem_create_chunk to pagesize, and/or posix_memalign (?) */
+
+ chunk->next = mem_chunk->chunk_list;
+ mem_chunk->chunk_list = chunk;
+ }
+
+ ptr = chunk->buf + chunk->free_offset;
+ chunk->free_offset += mem_chunk->item_size;
+ chunk->amount_free -= mem_chunk->item_size;
+
+ return ptr;
+}
+
+void
+sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
+{
+ /* XXX, debug_use_slices -> fallback to g_slice_free1 */
+
+ /* XXX, abort if ptr not found in emem_verify_pointer_list()? */
+ if (ptr != NULL && emem_verify_pointer_list(mem_chunk->chunk_list, ptr)) {
+ void *tmp = mem_chunk->freed;
+
+ mem_chunk->freed = ptr;
+ memcpy(ptr, &tmp, sizeof(void *));
+ }
+}
+
+void *
ep_alloc0(size_t size)
{
return memset(ep_alloc(size),'\0',size);
@@ -858,6 +931,11 @@ se_alloc0(size_t size)
return memset(se_alloc(size),'\0',size);
}
+void *
+sl_alloc0(struct ws_memory_slab *mem_chunk)
+{
+ return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
+}
static gchar *
emem_strdup(const gchar *src, void *allocator(size_t))
@@ -1174,6 +1252,20 @@ se_free_all(void)
emem_free_all(&se_packet_mem);
}
+void
+sl_free_all(struct ws_memory_slab *mem_chunk)
+{
+ emem_chunk_t *chunk_list = mem_chunk->chunk_list;
+
+ mem_chunk->chunk_list = NULL;
+ while (chunk_list) {
+ emem_chunk_t *chunk = chunk_list;
+
+ chunk_list = chunk_list->next;
+ emem_destroy_chunk(chunk);
+ }
+}
+
ep_stack_t
ep_stack_new(void) {
ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
diff --git a/epan/emem.h b/epan/emem.h
index 82f281ed1b..f082106f21 100644
--- a/epan/emem.h
+++ b/epan/emem.h
@@ -161,8 +161,30 @@ gchar* se_strdup_printf(const gchar* fmt, ...)
/** release all memory allocated */
void se_free_all(void);
+/**************************************************************
+ * slab allocator
+ **************************************************************/
+struct _emem_chunk_t;
+/* Macros to initialize ws_memory_slab */
+/* XXX, is G_MEM_ALIGN enough? http://mail.gnome.org/archives/gtk-devel-list/2004-December/msg00091.html */
+#define WS_MEMORY_SLAB_INIT(type, count) { ((sizeof(type) + (G_MEM_ALIGN - 1)) & ~(G_MEM_ALIGN - 1)), count, NULL, NULL }
+#define WS_MEMORY_SLAB_INIT_UNALIGNED(size, count) { size, count, NULL, NULL }
+struct ws_memory_slab {
+ const gint item_size;
+ const gint count;
+
+ struct _emem_chunk_t *chunk_list;
+ void *freed;
+};
+
+void *sl_alloc(struct ws_memory_slab *mem_chunk);
+void *sl_alloc0(struct ws_memory_slab *mem_chunk);
+void sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr);
+
+/** release all memory allocated */
+void sl_free_all(struct ws_memory_slab *mem_chunk);
/**************************************************************
* binary trees