Upstream-Status: Backport (from upstream WebKit) https://bugs.webkit.org/show_bug.cgi?id=129370 https://trac.webkit.org/changeset?old=164727&old_path=webkit%2Ftrunk%2FSource%2FWTF%2Fwtf%2FFastMalloc.cpp&new=164742&new_path=webkit%2Ftrunk%2FSource%2FWTF%2Fwtf%2FFastMalloc.cpp Index: trunk/Source/WTF/wtf/FastMalloc.cpp =================================================================== --- Source/WTF/wtf/FastMalloc.cpp (revision 164727) +++ Source/WTF/wtf/FastMalloc.cpp (revision 164742) @@ -92,4 +92,5 @@ #if OS(DARWIN) +#include #include #endif @@ -630,26 +631,20 @@ //------------------------------------------------------------------- +// Type that can hold the length of a run of pages +typedef uintptr_t Length; + // Not all possible combinations of the following parameters make // sense. In particular, if kMaxSize increases, you may have to // increase kNumClasses as well. -#if OS(DARWIN) -# define K_PAGE_SHIFT PAGE_SHIFT -# if (K_PAGE_SHIFT == 12) -# define K_NUM_CLASSES 68 -# elif (K_PAGE_SHIFT == 14) -# define K_NUM_CLASSES 77 -# else -# error "Unsupported PAGE_SHIFT amount" -# endif -#else -# define K_PAGE_SHIFT 12 -# define K_NUM_CLASSES 68 -#endif -static const size_t kPageShift = K_PAGE_SHIFT; -static const size_t kPageSize = 1 << kPageShift; +#define K_PAGE_SHIFT_MIN 12 +#define K_PAGE_SHIFT_MAX 14 +#define K_NUM_CLASSES_MAX 77 +static size_t kPageShift = 0; +static size_t kNumClasses = 0; +static size_t kPageSize = 0; +static Length kMaxValidPages = 0; static const size_t kMaxSize = 32u * 1024; static const size_t kAlignShift = 3; static const size_t kAlignment = 1 << kAlignShift; -static const size_t kNumClasses = K_NUM_CLASSES; // Allocates a big block of memory for the pagemap once we reach more than @@ -663,5 +658,5 @@ // have small limits on the number of mmap() regions per // address-space. -static const size_t kMinSystemAlloc = 1 << (20 - kPageShift); +static const size_t kMinSystemAlloc = 1 << (20 - K_PAGE_SHIFT_MAX); // Number of objects to move between a per-thread list and a central @@ -670,5 +665,5 @@ // it too big may temporarily cause unnecessary memory wastage in the // per-thread free list until the scavenger cleans up the list. -static int num_objects_to_move[kNumClasses]; +static int num_objects_to_move[K_NUM_CLASSES_MAX]; // Maximum length we allow a per-thread free-list to have before we @@ -766,8 +761,8 @@ // Mapping from size class to max size storable in that class -static size_t class_to_size[kNumClasses]; +static size_t class_to_size[K_NUM_CLASSES_MAX]; // Mapping from size class to number of pages to allocate at a time -static size_t class_to_pages[kNumClasses]; +static size_t class_to_pages[K_NUM_CLASSES_MAX]; // Hardened singly linked list. We make this a class to allow compiler to @@ -814,5 +809,6 @@ // class is initially given one TCEntry which also means that the maximum any // one class can have is kNumClasses. -static const int kNumTransferEntries = kNumClasses; +#define K_NUM_TRANSFER_ENTRIES_MAX static_cast(K_NUM_CLASSES_MAX) +#define kNumTransferEntries static_cast(kNumClasses) // Note: the following only works for "n"s that fit in 32-bits, but @@ -918,4 +914,23 @@ // Initialize the mapping arrays static void InitSizeClasses() { +#if OS(DARWIN) + kPageShift = vm_page_shift; + switch (kPageShift) { + case 12: + kNumClasses = 68; + break; + case 14: + kNumClasses = 77; + break; + default: + CRASH(); + }; +#else + kPageShift = 12; + kNumClasses = 68; +#endif + kPageSize = 1 << kPageShift; + kMaxValidPages = (~static_cast(0)) >> kPageShift; + // Do some sanity checking on add_amount[]/shift_amount[]/class_array[] if (ClassIndex(0) < 0) { @@ -1145,12 +1160,8 @@ typedef uintptr_t PageID; -// Type that can hold the length of a run of pages -typedef uintptr_t Length; - -static const Length kMaxValidPages = (~static_cast(0)) >> kPageShift; - // Convert byte size into pages. This won't overflow, but may return // an unreasonably large value if bytes is huge enough. static inline Length pages(size_t bytes) { + ASSERT(kPageShift && kNumClasses && kPageSize); return (bytes >> kPageShift) + ((bytes & (kPageSize - 1)) > 0 ? 1 : 0); @@ -1160,4 +1171,5 @@ // allocated static size_t AllocationSize(size_t bytes) { + ASSERT(kPageShift && kNumClasses && kPageSize); if (bytes > kMaxSize) { // Large object: we allocate an integral number of pages @@ -1432,5 +1444,5 @@ // end up getting all the TCEntries quota in the system we just preallocate // sufficient number of entries here. - TCEntry tc_slots_[kNumTransferEntries]; + TCEntry tc_slots_[K_NUM_TRANSFER_ENTRIES_MAX]; // Number of currently used cached entries in tc_slots_. This variable is @@ -1654,5 +1666,5 @@ template class MapSelector { public: - typedef TCMalloc_PageMap3 Type; + typedef TCMalloc_PageMap3 Type; typedef PackedCache CacheType; }; @@ -1672,5 +1684,5 @@ template <> class MapSelector<64> { public: - typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type; + typedef TCMalloc_PageMap3<64 - K_PAGE_SHIFT_MIN - kBitsUnusedOn64Bit> Type; typedef PackedCache<64, uint64_t> CacheType; }; @@ -1680,6 +1692,6 @@ template <> class MapSelector<32> { public: - typedef TCMalloc_PageMap2<32 - kPageShift> Type; - typedef PackedCache<32 - kPageShift, uint16_t> CacheType; + typedef TCMalloc_PageMap2<32 - K_PAGE_SHIFT_MIN> Type; + typedef PackedCache<32 - K_PAGE_SHIFT_MIN, uint16_t> CacheType; }; @@ -1778,4 +1790,5 @@ // Return number of free bytes in heap uint64_t FreeBytes() const { + ASSERT(kPageShift && kNumClasses && kPageSize); return (static_cast(free_pages_) << kPageShift); } @@ -1913,4 +1926,6 @@ void TCMalloc_PageHeap::init() { + ASSERT(kPageShift && kNumClasses && kPageSize); + pagemap_.init(MetaDataAlloc); pagemap_cache_ = PageMapCache(0); @@ -1927,5 +1942,5 @@ // Start scavenging at kMaxPages list scavenge_index_ = kMaxPages-1; - COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); + ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits)); DLL_Init(&large_.normal, entropy_); DLL_Init(&large_.returned, entropy_); @@ -2068,4 +2083,5 @@ void TCMalloc_PageHeap::scavenge() { + ASSERT(kPageShift && kNumClasses && kPageSize); size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage; size_t targetPageCount = std::max(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease); @@ -2229,4 +2245,5 @@ inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { + ASSERT(kPageShift && kNumClasses && kPageSize); ASSERT(n > 0); DLL_Remove(span, entropy_); @@ -2265,4 +2282,5 @@ static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other) { + ASSERT(kPageShift && kNumClasses && kPageSize); if (destination->decommitted && !other->decommitted) { TCMalloc_SystemRelease(reinterpret_cast(other->start << kPageShift), @@ -2368,4 +2386,5 @@ #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY void TCMalloc_PageHeap::IncrementalScavenge(Length n) { + ASSERT(kPageShift && kNumClasses && kPageSize); // Fast path; not yet time to release memory scavenge_counter_ -= n; @@ -2429,4 +2448,5 @@ #ifdef WTF_CHANGES size_t TCMalloc_PageHeap::ReturnedBytes() const { + ASSERT(kPageShift && kNumClasses && kPageSize); size_t result = 0; for (unsigned s = 0; s < kMaxPages; s++) { @@ -2444,4 +2464,5 @@ #ifndef WTF_CHANGES static double PagesToMB(uint64_t pages) { + ASSERT(kPageShift && kNumClasses && kPageSize); return (pages << kPageShift) / 1048576.0; } @@ -2510,4 +2531,5 @@ bool TCMalloc_PageHeap::GrowHeap(Length n) { + ASSERT(kPageShift && kNumClasses && kPageSize); ASSERT(kMaxPages >= kMinSystemAlloc); if (n > kMaxValidPages) return false; @@ -2606,4 +2628,5 @@ void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) { + ASSERT(kPageShift && kNumClasses && kPageSize); // Walk backwards through list so that when we push these // spans on the "returned" list, we preserve the order. @@ -2739,5 +2762,5 @@ ThreadIdentifier tid_; // Which thread owns it bool in_setspecific_; // Called pthread_setspecific? - FreeList list_[kNumClasses]; // Array indexed by size-class + FreeList list_[K_NUM_CLASSES_MAX]; // Array indexed by size-class // We sample allocations, biased by the size of the allocation @@ -2795,4 +2818,5 @@ void enumerateFreeObjects(Finder& finder, const Reader& reader) { + ASSERT(kPageShift && kNumClasses && kPageSize); for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++) list_[sizeClass].enumerateFreeObjects(finder, reader); @@ -2807,5 +2831,5 @@ // Central cache -- a collection of free-lists, one per size-class. // We have a separate lock per free-list to reduce contention. -static TCMalloc_Central_FreeListPadded central_cache[kNumClasses]; +static TCMalloc_Central_FreeListPadded central_cache[K_NUM_CLASSES_MAX]; // Page-level allocator @@ -2963,4 +2987,5 @@ void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) { + ASSERT(kPageShift && kNumClasses && kPageSize); lock_.Init(); size_class_ = cl; @@ -2987,4 +3012,5 @@ ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) { + ASSERT(kPageShift && kNumClasses && kPageSize); const PageID p = reinterpret_cast(object.value()) >> kPageShift; Span* span = pageheap->GetDescriptor(p); @@ -3033,4 +3059,5 @@ ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass( size_t locked_size_class, bool force) { + ASSERT(kPageShift && kNumClasses && kPageSize); static int race_counter = 0; int t = race_counter++; // Updated without a lock, but who cares. @@ -3048,4 +3075,5 @@ bool TCMalloc_Central_FreeList::MakeCacheSpace() { + ASSERT(kPageShift && kNumClasses && kPageSize); // Is there room in the cache? if (used_slots_ < cache_size_) return true; @@ -3102,4 +3130,5 @@ void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) { + ASSERT(kPageShift && kNumClasses && kPageSize); SpinLockHolder h(&lock_); if (N == num_objects_to_move[size_class_] && @@ -3184,4 +3213,5 @@ // Fetch memory from the system and add to the central cache freelist. ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { + ASSERT(kPageShift && kNumClasses && kPageSize); // Release central list lock while operating on pageheap lock_.Unlock(); @@ -3270,4 +3300,5 @@ void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) { + ASSERT(kPageShift && kNumClasses && kPageSize); size_ = 0; next_ = NULL; @@ -3292,4 +3323,5 @@ void TCMalloc_ThreadCache::Cleanup() { + ASSERT(kPageShift && kNumClasses && kPageSize); // Put unused memory back into central cache for (size_t cl = 0; cl < kNumClasses; ++cl) { @@ -3366,4 +3398,5 @@ // Release idle memory to the central cache inline void TCMalloc_ThreadCache::Scavenge() { + ASSERT(kPageShift && kNumClasses && kPageSize); // If the low-water mark for the free list is L, it means we would // not have had to allocate anything from the central cache even if @@ -3658,4 +3691,5 @@ void TCMalloc_ThreadCache::Print() const { + ASSERT(kPageShift && kNumClasses && kPageSize); for (size_t cl = 0; cl < kNumClasses; ++cl) { MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n", @@ -3679,4 +3713,5 @@ // Get stats into "r". Also get per-size-class counts if class_count != NULL static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { + ASSERT(kPageShift && kNumClasses && kPageSize); r->central_bytes = 0; r->transfer_bytes = 0; @@ -3716,4 +3751,5 @@ // WRITE stats to "out" static void DumpStats(TCMalloc_Printer* out, int level) { + ASSERT(kPageShift && kNumClasses && kPageSize); TCMallocStats stats; uint64_t class_count[kNumClasses]; @@ -4004,4 +4040,5 @@ #if !ASSERT_DISABLED static inline bool CheckCachedSizeClass(void *ptr) { + ASSERT(kPageShift && kNumClasses && kPageSize); PageID p = reinterpret_cast(ptr) >> kPageShift; size_t cached_value = pageheap->GetSizeClassIfCached(p); @@ -4018,4 +4055,5 @@ static inline void* SpanToMallocResult(Span *span) { + ASSERT(kPageShift && kNumClasses && kPageSize); ASSERT_SPAN_COMMITTED(span); pageheap->CacheSizeClass(span->start, 0); @@ -4071,4 +4109,5 @@ if (ptr == NULL) return; ASSERT(pageheap != NULL); // Should not call free() before malloc() + ASSERT(kPageShift && kNumClasses && kPageSize); const PageID p = reinterpret_cast(ptr) >> kPageShift; Span* span = pageheap->GetDescriptor(p); @@ -4122,4 +4161,5 @@ ASSERT(align > 0); if (pageheap == NULL) TCMalloc_ThreadCache::InitModule(); + ASSERT(kPageShift && kNumClasses && kPageSize); // Allocate at least one byte to avoid boundary conditions below @@ -4441,4 +4481,7 @@ new_size += Internal::ValidationBufferSize; #endif + + ASSERT(pageheap != NULL); // Should not call realloc() before malloc() + ASSERT(kPageShift && kNumClasses && kPageSize); // Get the size of the old entry @@ -4660,4 +4703,5 @@ FastMallocStatistics fastMallocStatistics() { + ASSERT(kPageShift && kNumClasses && kPageSize); FastMallocStatistics statistics; @@ -4681,4 +4725,7 @@ size_t fastMallocSize(const void* ptr) { + if (pageheap == NULL) TCMalloc_ThreadCache::InitModule(); + ASSERT(kPageShift && kNumClasses && kPageSize); + #if ENABLE(WTF_MALLOC_VALIDATION) return Internal::fastMallocValidationHeader(const_cast(ptr))->m_size; @@ -4792,4 +4839,5 @@ int visit(void* ptr) const { + ASSERT(kPageShift && kNumClasses && kPageSize); if (!ptr) return 1; @@ -4839,4 +4887,6 @@ void recordPendingRegions() { + ASSERT(kPageShift && kNumClasses && kPageSize); + bool recordRegionsContainingPointers = m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE; bool recordAllocations = m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE; @@ -4887,4 +4937,5 @@ int visit(void* ptr) { + ASSERT(kPageShift && kNumClasses && kPageSize); if (!ptr) return 1;