97 lines
3.2 KiB
PHP
97 lines
3.2 KiB
PHP
|
|
static __attribute__((cold, noinline, unused)) void* tiny_slow_alloc_fast(int class_idx) {
|
||
|
|
int tls_enabled = g_tls_list_enable;
|
||
|
|
TinyTLSList* tls = &g_tls_lists[class_idx];
|
||
|
|
pthread_mutex_t* lock = &g_tiny_class_locks[class_idx].m;
|
||
|
|
pthread_mutex_lock(lock);
|
||
|
|
|
||
|
|
TinySlab* slab = g_tiny_pool.free_slabs[class_idx];
|
||
|
|
if (slab) {
|
||
|
|
g_tiny_pool.free_slabs[class_idx] = slab->next;
|
||
|
|
} else {
|
||
|
|
slab = allocate_new_slab(class_idx);
|
||
|
|
if (!slab) {
|
||
|
|
pthread_mutex_unlock(lock);
|
||
|
|
return NULL;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
slab->next = NULL;
|
||
|
|
|
||
|
|
if (atomic_load_explicit(&slab->remote_head, memory_order_acquire)) {
|
||
|
|
tiny_remote_drain_locked(slab);
|
||
|
|
}
|
||
|
|
|
||
|
|
int block_idx = hak_tiny_find_free_block(slab);
|
||
|
|
if (block_idx < 0) {
|
||
|
|
slab->next = g_tiny_pool.free_slabs[class_idx];
|
||
|
|
g_tiny_pool.free_slabs[class_idx] = slab;
|
||
|
|
pthread_mutex_unlock(lock);
|
||
|
|
return NULL;
|
||
|
|
}
|
||
|
|
|
||
|
|
hak_tiny_set_used(slab, block_idx);
|
||
|
|
slab->free_count--;
|
||
|
|
size_t block_size = g_tiny_class_sizes[class_idx];
|
||
|
|
uint8_t* base = (uint8_t*)slab->base;
|
||
|
|
void* ret = (void*)(base + ((size_t)block_idx * block_size));
|
||
|
|
g_tiny_pool.alloc_count[class_idx]++;
|
||
|
|
|
||
|
|
uint16_t cap = g_fast_cap_defaults[class_idx];
|
||
|
|
uint16_t count = g_fast_count[class_idx];
|
||
|
|
uint16_t fast_need = (cap > count) ? (uint16_t)(cap - count) : 0;
|
||
|
|
if (fast_need > slab->free_count) fast_need = (uint16_t)slab->free_count;
|
||
|
|
|
||
|
|
uint32_t tls_need = 0;
|
||
|
|
if (tls_enabled && tls_list_needs_refill(tls)) {
|
||
|
|
uint32_t target = tls_list_refill_threshold(tls);
|
||
|
|
if (tls->count < target) {
|
||
|
|
tls_need = target - tls->count;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
uint32_t remaining = slab->free_count;
|
||
|
|
if (fast_need > remaining) fast_need = (uint16_t)remaining;
|
||
|
|
remaining -= fast_need;
|
||
|
|
if (tls_need > remaining) tls_need = remaining;
|
||
|
|
|
||
|
|
while (fast_need > 0) {
|
||
|
|
int extra_idx = hak_tiny_find_free_block(slab);
|
||
|
|
if (extra_idx < 0) break;
|
||
|
|
hak_tiny_set_used(slab, extra_idx);
|
||
|
|
slab->free_count--;
|
||
|
|
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
|
||
|
|
int pushed = 0;
|
||
|
|
if (__builtin_expect(g_fastcache_enable && class_idx <= 3, 1)) {
|
||
|
|
pushed = fastcache_push(class_idx, extra);
|
||
|
|
} else {
|
||
|
|
pushed = tiny_fast_push(class_idx, extra);
|
||
|
|
}
|
||
|
|
if (!pushed) {
|
||
|
|
if (tls_enabled) {
|
||
|
|
tiny_tls_list_guard_push(class_idx, tls, extra);
|
||
|
|
tls_list_push(tls, extra, class_idx);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
fast_need--;
|
||
|
|
}
|
||
|
|
|
||
|
|
while (tls_enabled && tls_need > 0) {
|
||
|
|
int extra_idx = hak_tiny_find_free_block(slab);
|
||
|
|
if (extra_idx < 0) break;
|
||
|
|
hak_tiny_set_used(slab, extra_idx);
|
||
|
|
slab->free_count--;
|
||
|
|
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
|
||
|
|
tiny_tls_list_guard_push(class_idx, tls, extra);
|
||
|
|
tls_list_push(tls, extra, class_idx);
|
||
|
|
tls_need--;
|
||
|
|
}
|
||
|
|
|
||
|
|
if (slab->free_count == 0) {
|
||
|
|
move_to_full_list(class_idx, slab);
|
||
|
|
} else {
|
||
|
|
slab->next = g_tiny_pool.free_slabs[class_idx];
|
||
|
|
g_tiny_pool.free_slabs[class_idx] = slab;
|
||
|
|
}
|
||
|
|
|
||
|
|
pthread_mutex_unlock(lock);
|
||
|
|
return ret;
|
||
|
|
}
|