static TinySlab* registry_lookup(uintptr_t slab_base) { // Lock-free read with atomic owner access (MT-safe) int hash = registry_hash(slab_base); // Linear probing search for (int i = 0; i < SLAB_REGISTRY_MAX_PROBE; i++) { int idx = (hash + i) & SLAB_REGISTRY_MASK; SlabRegistryEntry* entry = &g_slab_registry[idx]; if (entry->slab_base == slab_base) { // Atomic load to prevent TOCTOU race with registry_unregister() TinySlab* owner = atomic_load_explicit(&entry->owner, memory_order_acquire); if (!owner) return NULL; // Entry cleared by unregister return owner; } if (entry->slab_base == 0) { return NULL; // Empty slot - not found } } return NULL; // Not found after max probes } // ============================================================================ // EXTRACTED TO hakmem_tiny_slab_mgmt.inc (Phase 2D-4 FINAL) // ============================================================================ // Function: allocate_new_slab() - 79 lines (lines 952-1030) // Allocate new slab for a class // Function: release_slab() - 23 lines (lines 1033-1055) // Release a slab back to system // Step 2: Find slab owner by pointer (O(1) via hash table registry, or O(N) fallback) TinySlab* hak_tiny_owner_slab(void* ptr) { if (!ptr || !g_tiny_initialized) return NULL; // Phase 6.14: Runtime toggle between Registry (O(1)) and List (O(N)) if (g_use_registry) { // O(1) lookup via hash table uintptr_t slab_base = (uintptr_t)ptr & ~(TINY_SLAB_SIZE - 1); TinySlab* slab = registry_lookup(slab_base); if (!slab) return NULL; // SAFETY: validate membership (ptr must be inside [base, base+64KB)) uintptr_t start = (uintptr_t)slab->base; uintptr_t end = start + TINY_SLAB_SIZE; if ((uintptr_t)ptr < start || (uintptr_t)ptr >= end) { return NULL; // false positive from registry → treat as non-Tiny } return slab; } else { // O(N) fallback: linear search through all slab lists (lock per class) for (int class_idx = 0; class_idx < TINY_NUM_CLASSES; class_idx++) { pthread_mutex_t* lock = &g_tiny_class_locks[class_idx].m; pthread_mutex_lock(lock); // Search free slabs for (TinySlab* slab = g_tiny_pool.free_slabs[class_idx]; slab; slab = slab->next) { uintptr_t slab_start = (uintptr_t)slab->base; uintptr_t slab_end = slab_start + TINY_SLAB_SIZE; if ((uintptr_t)ptr >= slab_start && (uintptr_t)ptr < slab_end) { pthread_mutex_unlock(lock); return slab; } } // Search full slabs for (TinySlab* slab = g_tiny_pool.full_slabs[class_idx]; slab; slab = slab->next) { uintptr_t slab_start = (uintptr_t)slab->base; uintptr_t slab_end = slab_start + TINY_SLAB_SIZE; if ((uintptr_t)ptr >= slab_start && (uintptr_t)ptr < slab_end) { pthread_mutex_unlock(lock); return slab; } } pthread_mutex_unlock(lock); } return NULL; // Not found } }