Files
hakmem/core/hakmem_tiny_legacy_slow_box.inc
Moe Charm (CI) 1bbfb53925 Implement Phantom typing for Tiny FastCache layer
Refactor FastCache and TLS cache APIs to use Phantom types (hak_base_ptr_t)
for compile-time type safety, preventing BASE/USER pointer confusion.

Changes:
1. core/hakmem_tiny_fastcache.inc.h:
   - fastcache_pop() returns hak_base_ptr_t instead of void*
   - fastcache_push() accepts hak_base_ptr_t instead of void*

2. core/hakmem_tiny.c:
   - Updated forward declarations to match new signatures

3. core/tiny_alloc_fast.inc.h, core/hakmem_tiny_alloc.inc:
   - Alloc paths now use hak_base_ptr_t for cache operations
   - BASE->USER conversion via HAK_RET_ALLOC macro

4. core/hakmem_tiny_refill.inc.h, core/refill/ss_refill_fc.h:
   - Refill paths properly handle BASE pointer types
   - Fixed: Removed unnecessary HAK_BASE_FROM_RAW() in ss_refill_fc.h line 176

5. core/hakmem_tiny_free.inc, core/tiny_free_magazine.inc.h:
   - Free paths convert USER->BASE before cache push
   - USER->BASE conversion via HAK_USER_TO_BASE or ptr_user_to_base()

6. core/hakmem_tiny_legacy_slow_box.inc:
   - Legacy path properly wraps pointers for cache API

Benefits:
- Type safety at compile time (in debug builds)
- Zero runtime overhead (debug builds only, release builds use typedef=void*)
- All BASE->USER conversions verified via Task analysis
- Prevents pointer type confusion bugs

Testing:
- Build: SUCCESS (all 9 files)
- Smoke test: PASS (sh8bench runs to completion)
- Conversion path verification: 3/3 paths correct

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-04 11:05:06 +09:00

97 lines
3.2 KiB
C++

static __attribute__((cold, noinline, unused)) void* tiny_slow_alloc_fast(int class_idx) {
int tls_enabled = g_tls_list_enable;
TinyTLSList* tls = &g_tls_lists[class_idx];
pthread_mutex_t* lock = &g_tiny_class_locks[class_idx].m;
pthread_mutex_lock(lock);
TinySlab* slab = g_tiny_pool.free_slabs[class_idx];
if (slab) {
g_tiny_pool.free_slabs[class_idx] = slab->next;
} else {
slab = allocate_new_slab(class_idx);
if (!slab) {
pthread_mutex_unlock(lock);
return NULL;
}
}
slab->next = NULL;
if (atomic_load_explicit(&slab->remote_head, memory_order_acquire)) {
tiny_remote_drain_locked(slab);
}
int block_idx = hak_tiny_find_free_block(slab);
if (block_idx < 0) {
slab->next = g_tiny_pool.free_slabs[class_idx];
g_tiny_pool.free_slabs[class_idx] = slab;
pthread_mutex_unlock(lock);
return NULL;
}
hak_tiny_set_used(slab, block_idx);
slab->free_count--;
size_t block_size = g_tiny_class_sizes[class_idx];
uint8_t* base = (uint8_t*)slab->base;
void* ret = (void*)(base + ((size_t)block_idx * block_size));
g_tiny_pool.alloc_count[class_idx]++;
uint16_t cap = g_fast_cap_defaults[class_idx];
uint16_t count = g_fast_count[class_idx];
uint16_t fast_need = (cap > count) ? (uint16_t)(cap - count) : 0;
if (fast_need > slab->free_count) fast_need = (uint16_t)slab->free_count;
uint32_t tls_need = 0;
if (tls_enabled && tls_list_needs_refill(tls)) {
uint32_t target = tls_list_refill_threshold(tls);
if (tls->count < target) {
tls_need = target - tls->count;
}
}
uint32_t remaining = slab->free_count;
if (fast_need > remaining) fast_need = (uint16_t)remaining;
remaining -= fast_need;
if (tls_need > remaining) tls_need = remaining;
while (fast_need > 0) {
int extra_idx = hak_tiny_find_free_block(slab);
if (extra_idx < 0) break;
hak_tiny_set_used(slab, extra_idx);
slab->free_count--;
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
int pushed = 0;
if (__builtin_expect(g_fastcache_enable && class_idx <= 3, 1)) {
pushed = fastcache_push(class_idx, HAK_BASE_FROM_RAW(extra));
} else {
pushed = tiny_fast_push(class_idx, HAK_BASE_FROM_RAW(extra));
}
if (!pushed) {
if (tls_enabled) {
tiny_tls_list_guard_push(class_idx, tls, extra);
tls_list_push(tls, extra, class_idx);
}
}
fast_need--;
}
while (tls_enabled && tls_need > 0) {
int extra_idx = hak_tiny_find_free_block(slab);
if (extra_idx < 0) break;
hak_tiny_set_used(slab, extra_idx);
slab->free_count--;
void* extra = (void*)(base + ((size_t)extra_idx * block_size));
tiny_tls_list_guard_push(class_idx, tls, extra);
tls_list_push(tls, extra, class_idx);
tls_need--;
}
if (slab->free_count == 0) {
move_to_full_list(class_idx, slab);
} else {
slab->next = g_tiny_pool.free_slabs[class_idx];
g_tiny_pool.free_slabs[class_idx] = slab;
}
pthread_mutex_unlock(lock);
return ret;
}