//line:229~238 //16字节对齐向上取整,最后换算成size_classes的下标,对group进行分类 static inline int size_to_class(size_t n) { n = (n+IB-1)>>4; if (n<10) return n; n++; int i = (28-a_clz_32(n))*4 + 8; if (n>size_classes[i+1]) i+=2; if (n>size_classes[i]) i++; return i; }
//line:174~284 static struct meta *alloc_group(int sc, size_t req) { ... } else {///通过brk分配 int j = size_to_class(UNIT+cnt*size-IB); int idx = alloc_slot(j, UNIT+cnt*size-IB); if (idx < 0) { free_meta(m); return 0; } struct meta *g = ctx.active[j]; p = enframe(g, idx, UNIT*size_classes[j]-IB, ctx.mmap_counter); m->maplen = 0; p[-3] = (p[-3]&31) | (6<<5); for (int i=0; i<=cnt; i++) p[UNIT+i*size-4] = 0;///根据size清零mem active_idx = cnt-1; } ... }
//line:300~381 //malloc的实现,lite_malloc调这里 void *malloc(size_t n) { if (size_overflows(n)) return 0; struct meta *g; uint32_t mask, first; int sc; int idx; int ctr;
//大于某一个阈值,通过mmap分配 if (n >= MMAP_THRESHOLD) {///p MMAP_THRESHOLD; $10 = 0x1ffec size_t needed = n + IB + UNIT; void *p = mmap(0, needed, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); if (p==MAP_FAILED) return 0; wrlock(); step_seq(); g = alloc_meta(); if (!g) { unlock(); munmap(p, needed); return 0; } g->mem = p; g->mem->meta = g; g->last_idx = 0; g->freeable = 1; g->sizeclass = 63; g->maplen = (needed+4095)/4096; g->avail_mask = g->freed_mask = 0; // use a global counter to cycle offset in // individually-mmapped allocations. ctx.mmap_counter++; idx = 0; goto success; } //否则通过brk分配 //根据传入size,转换成size_classes的下标,根据sc申请相对应group的chunk sc = size_to_class(n);
rdlock(); //根据sc,获取存放着对应size group的meta,如果还没申请过这类group,对应ctx.active[sc]为0 g = ctx.active[sc];
// use coarse size classes initially when there are not yet // any groups of desired size. this allows counts of 2 or 3 // to be allocated at first rather than having to start with // 7 or 5, the min counts for even size classes. if (!g && sc>=4 && sc<32 && sc!=6 && !(sc&1) && !ctx.usage_by_class[sc]) { size_t usage = ctx.usage_by_class[sc|1]; // if a new group may be allocated, count it toward // usage in deciding if we can use coarse class. if (!ctx.active[sc|1] || (!ctx.active[sc|1]->avail_mask && !ctx.active[sc|1]->freed_mask)) usage += 3; if (usage <= 12) sc |= 1; g = ctx.active[sc]; }
for (;;) { mask = g ? g->avail_mask : 0; first = mask&-mask; if (!first) break; if (RDLOCK_IS_EXCLUSIVE || !MT) g->avail_mask = mask-first; else if (a_cas(&g->avail_mask, mask, mask-first)!=mask) continue; idx = a_ctz_32(first); goto success; } upgradelock();
//申请分配sc类别的chunk,size为n idx = alloc_slot(sc, n); if (idx < 0) { unlock(); return 0; } g = ctx.active[sc];
//line:101~143 void free(void *p) { if (!p) return;//地址为0,直接返回
//获取meta结构,以及做一些检查 struct meta *g = get_meta(p); //获取chunk的下标 int idx = get_slot_index(p); size_t stride = get_stride(g); unsigned char *start = g->mem->storage + stride*idx; unsigned char *end = start + stride - IB; get_nominal_size(p, end); uint32_t self = 1u<<idx, all = (2u<<g->last_idx)-1; //将对应chunk的下标置0xff ((unsigned char *)p)[-3] = 255; // invalidate offset to group header, and cycle offset of // used region within slot if current offset is zero. //将chunk的offset清0 *(uint16_t *)((char *)p-2) = 0;
// release any whole pages contained in the slot to be freed // unless it's a single-slot group that will be unmapped. if (((uintptr_t)(start-1) ^ (uintptr_t)end) >= 2*PGSZ && g->last_idx) { unsigned char *base = start + (-(uintptr_t)start & (PGSZ-1)); size_t len = (end-base) & -PGSZ; if (len) madvise(base, len, MADV_FREE); }
// atomic free without locking if this is neither first or last slot //设置meta的avail_mask`freed_mask for (;;) { uint32_t freed = g->freed_mask; uint32_t avail = g->avail_mask; uint32_t mask = freed | avail; assert(!(mask&self)); if (!freed || mask+self==all) break; if (!MT) g->freed_mask = freed+self; else if (a_cas(&g->freed_mask, freed, freed+self)!=freed) continue; return; }
wrlock(); struct mapinfo mi = nontrivial_free(g, idx); unlock(); if (mi.len) munmap(mi.base, mi.len); }