mempool.c

00001 #include "mempool.h"
00002 #include "util.h"
00003 
00004 #include <stdlib.h>            /* for calloc() and malloc() */
00005 #include <string.h>            /* for memset() */
00006 #include <errno.h>             /* for errno and EINVAL */
00007 
00008 #ifdef CP_HAS_GETPAGESIZE
00009 #include <unistd.h>            /* for getpagesize() */
00010 #else
00011 int getpagesize() { return 0x2000; }
00012 #endif /* CP_HAS_GETPAGESIZE */
00013 
00014 #ifndef WORD_SIZE
00015 #define WORD_SIZE (sizeof(void *))
00016 #endif /* WORD_SIZE */
00017 
00018 #if defined(CP_HAS_PTHREAD_MUTEX_RECURSIVE) || defined(CP_HAS_PTHREAD_MUTEX_RECURSIVE_NP)
00019 #define CP_MEMPOOL_TXLOCK(pool, err_ret) { \
00020     if (!((pool)->mode & COLLECTION_MODE_NOSYNC)) \
00021     if (cp_mutex_lock((pool)->lock)) \
00022         return err_ret; \
00023 }
00024 #define CP_MEMPOOL_TXUNLOCK(pool, err_ret) { \
00025     if (!((pool)->mode & COLLECTION_MODE_NOSYNC)) \
00026     if (cp_mutex_unlock((pool)->lock)) \
00027         return err_ret; \
00028 }
00029 #else
00030 #define CP_MEMPOOL_TXLOCK(pool, err_ret) { \
00031     if (!((pool)->mode & COLLECTION_MODE_NOSYNC)) \
00032     { \
00033         cp_thread self = cp_thread_self(); \
00034         if (!cp_thread_equal(self, (pool)->txowner) && \
00035             cp_mutex_lock((pool)->lock)) \
00036             return err_ret; \
00037         (pool)->txowner = self; \
00038     } \
00039 }
00040 #define CP_MEMPOOL_TXUNLOCK(pool, err_ret) { \
00041     if (!((pool)->mode & COLLECTION_MODE_NOSYNC)) \
00042     { \
00043         cp_thread self = cp_thread_self(); \
00044         if (!cp_thread_equal(self, (pool)->txowner) && \
00045             cp_mutex_unlock((pool)->lock)) \
00046             return err_ret; \
00047         (pool)->txowner = 0; \
00048     } \
00049 }
00050 #endif /* CP_HAS_PTHREAD_MUTEX_RECURSIVE */
00051 static size_t pagesize = 0;
00052 
00053 cp_mempool *cp_mempool_create_by_option(const int mode, 
00054                                         size_t item_size, 
00055                                         size_t alloc_size)
00056 {
00057     cp_mempool *pool = (cp_mempool *) calloc(1, sizeof(cp_mempool));
00058     if (pool == NULL) return NULL;
00059 
00060     pool->mode = mode;
00061 
00062     if (!(mode & COLLECTION_MODE_NOSYNC))
00063     {
00064 #if defined(PTHREAD_MUTEX_RECURSIVE) || defined(PTHREAD_MUTEX_RECURSIVE_NP)
00065         pthread_mutexattr_t attr;
00066 #endif /* PTHREAD_MUTEX_RECURSIVE */
00067         pool->lock = (cp_mutex *) malloc(sizeof(cp_mutex));
00068         if (pool->lock == NULL)
00069         {
00070             cp_mempool_destroy(pool);
00071             return NULL;
00072         }
00073 #ifdef PTHREAD_MUTEX_RECURSIVE
00074         pthread_mutexattr_init(&attr);
00075         pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
00076         cp_mutex_init(pool->lock, &attr);
00077 #else
00078         cp_mutex_init(pool->lock, NULL);
00079 #endif /* PTHREAD_MUTEX_RECURSIVE */
00080     }
00081 
00082     if (pagesize == 0) pagesize = getpagesize();
00083 
00084     /* first, we ensure that item_size is a multiple of WORD_SIZE,
00085      * and also that it is at least sizeof(void*). The first
00086      * condition may imply the second on *most* platforms, but it
00087      * costs us very little to make sure. */
00088     if (item_size < sizeof(void*)) item_size = sizeof(void*);
00089     if (item_size % WORD_SIZE)
00090         item_size += (WORD_SIZE) - (item_size % WORD_SIZE);
00091     pool->item_size = item_size;
00092     /* next, we pump up the alloc_size until it is at least big enough
00093      * to hold ten chunks plus a void pointer, or ten pages, whichever
00094      * is bigger. The reason for doing it this way rather than simply
00095      * adding sizeof(void*) to alloc_size is that we want alloc_size to
00096      * be a multiple of pagesize (this makes it faster!). */
00097     if (alloc_size < item_size * 10 + sizeof(void *))
00098         alloc_size = item_size * 10 + sizeof(void *);
00099     if (alloc_size < pagesize * 10) alloc_size = pagesize * 10;
00100     if (alloc_size % pagesize)
00101         alloc_size += pagesize - (alloc_size % pagesize);
00102     pool->alloc_size = alloc_size;
00103 
00104     pool->items_per_alloc = (alloc_size - sizeof(void *)) / item_size;
00105 
00106     pool->reuse_pool = NULL;
00107     pool->alloc_pool = (char *) malloc(alloc_size);
00108     if (pool->alloc_pool == NULL)
00109     {
00110         free(pool);
00111         return NULL;
00112     }
00113     *(void **) pool->alloc_pool = NULL;
00114 
00115     return pool;
00116 }
00117 
00118 
00119 cp_mempool *cp_mempool_create(const size_t item_size)
00120 {
00121     return cp_mempool_create_by_option(COLLECTION_MODE_NOSYNC, item_size, 0);
00122 }
00123 
00124 
00125 void *cp_mempool_alloc(cp_mempool * const pool)
00126 {
00127     void *p;
00128 
00129     CP_MEMPOOL_TXLOCK(pool, NULL);
00130 
00131     if (pool->reuse_pool)
00132     {
00133         p = pool->reuse_pool;
00134         pool->reuse_pool = *(void **)p;
00135     }
00136     else
00137     {
00138         if (pool->alloc_pool_pos == pool->items_per_alloc)
00139         {
00140             p = malloc(pool->alloc_size);
00141             if (p == NULL) return NULL;
00142             *(void **) p = pool->alloc_pool;
00143             pool->alloc_pool = p;
00144             pool->alloc_pool_pos = 0;
00145             /* if this pool is owned by a shared_mempool, report allocations */
00146             if (pool->alloc_callback) 
00147                 (*pool->alloc_callback)(pool->callback_prm, pool, p);
00148         }
00149         p = pool->alloc_pool + sizeof(void *) + 
00150             pool->item_size * pool->alloc_pool_pos++;
00151     }
00152 
00153     CP_MEMPOOL_TXUNLOCK(pool, NULL);
00154 
00155     return p;
00156 }
00157 
00158 void *cp_mempool_calloc(cp_mempool * const pool)
00159 {
00160     void *p = cp_mempool_alloc(pool);
00161     if (p)
00162         memset(p, 0, pool->item_size);
00163     return p;
00164 }
00165 
00166 int cp_mempool_free(cp_mempool * const pool, void *data)
00167 {
00168     CP_MEMPOOL_TXLOCK(pool, -1);
00169     *(void **) data = pool->reuse_pool;
00170     pool->reuse_pool = data;
00171     CP_MEMPOOL_TXUNLOCK(pool, -1);
00172     return 0;
00173 }
00174 
00175 /* increment refcount */
00176 int cp_mempool_inc_refcount(cp_mempool *pool)
00177 {
00178     CP_MEMPOOL_TXLOCK(pool, -1);
00179     pool->refcount++;
00180     CP_MEMPOOL_TXUNLOCK(pool, -1);
00181     return 0;
00182 }
00183 
00184 void cp_mempool_destroy(cp_mempool *pool)
00185 {
00186     if (pool)
00187     {
00188         if (pool->refcount-- <= 0)
00189         {
00190             void *p;
00191 
00192             while ((p = pool->alloc_pool))
00193             {
00194                 pool->alloc_pool = *(void **) pool->alloc_pool;
00195                 free(p);
00196             }
00197             
00198             if (pool->lock != NULL)
00199             {
00200                 cp_mutex_destroy(pool->lock);
00201                 free(pool->lock);
00202             }
00203             free(pool);
00204         }
00205     }
00206 }
00207 
00208 void cp_mempool_set_callback(cp_mempool *pool, void *prm, cp_mempool_callback_fn cb)
00209 
00210 {
00211     pool->alloc_callback = cb;
00212     pool->callback_prm = prm;
00213 }
00214 
00215 
00216 /****************************************************************************
00217  *                                                                          *
00218  *                         cp_shared_mempool functions                      *
00219  *                                                                          *
00220  ****************************************************************************/
00221 
00222 typedef struct _chunk_track
00223 {
00224     void *mem;
00225     size_t size;
00226 } chunk_track;
00227 
00228 chunk_track *get_chunk_track(void *mem, size_t size)
00229 {
00230     chunk_track *t = (chunk_track *) malloc(sizeof(chunk_track));
00231     if (t)
00232     {
00233         t->mem = mem;
00234         t->size = size;
00235     }
00236     return t;
00237 }
00238 
00239 int compare_chunk_track(void *c1, void *c2)
00240 {
00241     chunk_track *t1 = c1;
00242     chunk_track *t2 = c2;
00243     return (t2->size == 0 && 
00244             t2->mem >= t1->mem && 
00245             ((char *) t2->mem - (char *) t1->mem) < t1->size) ||
00246            (t1->size == 0 && 
00247             t1->mem >= t2->mem && 
00248             ((char *) t1->mem - (char *) t2->mem) < t2->size) ? 0 : 
00249         ((char *) t1->mem - (char *) t2->mem);
00250 }
00251 
00252 cp_mempool *shared_mempool_entry_get(cp_shared_mempool *pool, size_t size)
00253 {
00254     shared_mempool_entry *entry = pool->reg_tbl[size % pool->reg_tbl_size];
00255 
00256     while (entry && entry->item_size != size) entry = entry->next;
00257     if (entry) return entry->pool;
00258 
00259     return NULL;
00260 }
00261 
00262 cp_mempool *shared_mempool_entry_put(cp_shared_mempool *pool, 
00263                                      size_t size, cp_mempool *sub)
00264 {
00265     shared_mempool_entry **entry = &pool->reg_tbl[size % pool->reg_tbl_size];
00266 
00267     while ((*entry) && (*entry)->item_size != size) 
00268         entry = &(*entry)->next;
00269 
00270     if (*entry == NULL)
00271     {
00272         *entry = calloc(1, sizeof(shared_mempool_entry));
00273         (*entry)->item_size = size;
00274     }
00275 
00276     (*entry)->pool = sub;
00277     return sub;
00278 }
00279 
00280 void shared_mempool_entry_destroy(cp_shared_mempool *pool)
00281 {
00282     int i;
00283 
00284     for (i = 0; i < pool->reg_tbl_size; i++)
00285     {
00286         shared_mempool_entry *curr, *tmp;
00287         curr = pool->reg_tbl[i];
00288         while (curr)
00289         {
00290             tmp = curr;
00291             curr = curr->next;
00292             cp_mempool_destroy(tmp->pool);
00293             free(tmp);
00294         }
00295     }
00296 
00297     free(pool->reg_tbl);
00298 }
00299 
00300 /* cp_shared_mempool_create */
00301 cp_shared_mempool *cp_shared_mempool_create()
00302 {
00303     return 
00304         cp_shared_mempool_create_by_option(0, CP_SHARED_MEMPOOL_TYPE_2, 0, 0);
00305 }
00306 
00307 /* cp_shared_mempool_create_by_option */
00308 CPROPS_DLL
00309 cp_shared_mempool *
00310     cp_shared_mempool_create_by_option(int mode, 
00311                                        int arbitrary_allocation_strategy,
00312                                        int size_hint, 
00313                                        int page_count)
00314 {
00315     cp_shared_mempool *pool = 
00316         (cp_shared_mempool *) calloc(1, sizeof(cp_shared_mempool));
00317     if (pool == NULL) return NULL;
00318 
00319     if (size_hint)
00320         size_hint = size_hint * 2 + 1; /* choose an odd number */
00321     else 
00322         size_hint = 211; /* 211 is a prime */
00323 
00324     pool->reg_tbl = calloc(size_hint, sizeof(shared_mempool_entry *));
00325     if (pool->reg_tbl == NULL) goto CREATE_ERROR;
00326     pool->reg_tbl_size = size_hint;
00327 
00328     pool->mode = mode;
00329 
00330     if ((mode & COLLECTION_MODE_NOSYNC))
00331     {
00332         pool->lock = (cp_mutex *) malloc(sizeof(cp_mutex));
00333         if (pool->lock == NULL) goto CREATE_ERROR;
00334         if ((cp_mutex_init(pool->lock, NULL))) goto CREATE_ERROR;
00335     }
00336 
00337     if (arbitrary_allocation_strategy == 0)
00338         pool->gm_mode = CP_SHARED_MEMPOOL_TYPE_1;
00339     else
00340         pool->gm_mode = arbitrary_allocation_strategy;
00341 
00342     pool->multiple = page_count;
00343 
00344     pool->chunk_tracker = 
00345         cp_rbtree_create_by_option(mode | COLLECTION_MODE_DEEP, 
00346                                    compare_chunk_track, NULL, free, NULL, NULL);
00347     if (pool->chunk_tracker == NULL) goto CREATE_ERROR;
00348 
00349     return pool;
00350 
00351 CREATE_ERROR:
00352     if (pool->lock)
00353     {
00354         free(pool->lock);
00355         pool->lock = NULL;
00356     }
00357     cp_shared_mempool_destroy(pool);
00358     return NULL;
00359 }
00360 
00361 /* cp_shared_mempool destroy */
00362 CPROPS_DLL
00363 void cp_shared_mempool_destroy(cp_shared_mempool *pool)
00364 {
00365     if (pool)
00366     {
00367         cp_rbtree_destroy(pool->chunk_tracker);
00368         shared_mempool_entry_destroy(pool);
00369         if (pool->lock)
00370         {
00371             cp_mutex_destroy(pool->lock);
00372             free(pool->lock);
00373         }
00374         free(pool);
00375     }
00376 }
00377 
00378 void cp_shared_mempool_track_alloc(cp_shared_mempool *pool, 
00379                                    cp_mempool *sub, void *mem)
00380 {
00381     cp_rbtree_insert(pool->chunk_tracker, 
00382                      get_chunk_track(mem, sub->alloc_size), sub);
00383 }
00384 
00385 /* cp_shared_mempool_register */
00386 cp_mempool *cp_shared_mempool_register(cp_shared_mempool *pool, size_t size)
00387 {
00388     cp_mempool *sub;
00389     if (size % WORD_SIZE) size += WORD_SIZE - (size % WORD_SIZE);
00390     sub = shared_mempool_entry_get(pool, size);
00391     if (sub)
00392         cp_mempool_inc_refcount(sub);
00393     else
00394     {
00395         sub = cp_mempool_create_by_option(pool->mode, size, pool->multiple);
00396         cp_mempool_set_callback(sub, pool, 
00397             (cp_mempool_callback_fn) cp_shared_mempool_track_alloc);
00398         shared_mempool_entry_put(pool, size, sub);
00399     }
00400 
00401     return sub;
00402 }
00403 
00404 #if 0
00405 /* unregister a mempool */
00406 void cp_shared_mempool_unregister(cp_shared_mempool *pool, size_t size)
00407 {
00408     cp_mempool *sub;
00409     if (size % WORD_SIZE) size += WORD_SIZE - (size % WORD_SIZE);
00410     sub = shared_mempool_entry_get(pool, size);
00411     if (sub)
00412         cp_mempool_destroy(sub);
00413 }
00414 #endif
00415 
00416 /* cp_shared_mempool_alloc */
00417 CPROPS_DLL
00418 void *cp_shared_mempool_alloc(cp_shared_mempool *pool, size_t size)
00419 {
00420     size_t actual;
00421     cp_mempool *mempool = NULL;
00422 
00423     if (size % WORD_SIZE) size += WORD_SIZE - (size % WORD_SIZE);
00424     
00425     if ((mempool = shared_mempool_entry_get(pool, size)))
00426         return cp_mempool_alloc(mempool);
00427 
00428     if ((pool->gm_mode & CP_SHARED_MEMPOOL_TYPE_2))
00429         actual = size;
00430     else
00431     {
00432         actual = WORD_SIZE;
00433         while (actual < size) actual <<= 1;
00434     }
00435     if ((mempool = cp_shared_mempool_register(pool, actual)))
00436         return cp_mempool_alloc(mempool);
00437 
00438     return NULL;
00439 }
00440 
00441 /* cp_shared_mempool_calloc */
00442 CPROPS_DLL
00443 void *cp_shared_mempool_calloc(cp_shared_mempool *pool, size_t size)
00444 {
00445     size_t actual;
00446     cp_mempool *mempool = NULL;
00447 
00448     if (size % WORD_SIZE) size += WORD_SIZE - (size % WORD_SIZE);
00449     
00450     if ((mempool = shared_mempool_entry_get(pool, size)))
00451         return cp_mempool_calloc(mempool);
00452 
00453     if ((pool->gm_mode & CP_SHARED_MEMPOOL_TYPE_2))
00454         actual = size;
00455     else
00456     {
00457         actual = WORD_SIZE;
00458         while (actual < size) actual <<= 1;
00459     }
00460     if ((mempool = cp_shared_mempool_register(pool, actual)))
00461         return cp_mempool_calloc(mempool);
00462 
00463     return NULL;
00464 }
00465 
00466 
00467 /* cp_shared_mempool_free */
00468 CPROPS_DLL
00469 void cp_shared_mempool_free(cp_shared_mempool *pool, void *p)
00470 {
00471     cp_mempool *mempool;
00472     chunk_track ct;
00473     memset(&ct, 0, sizeof(chunk_track));
00474     ct.mem = p;
00475 
00476     if ((mempool = cp_rbtree_get(pool->chunk_tracker, &ct)))
00477         cp_mempool_free(mempool, p);
00478 }
00479 

Generated on Mon Dec 5 23:00:22 2011 for cprops by  doxygen 1.4.7