// Copyright 2021 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Only including the -inl.h file directly makes the linter complain. #include "src/objects/swiss-name-dictionary.h" #include "src/heap/heap-inl.h" #include "src/objects/swiss-name-dictionary-inl.h" namespace v8 { namespace internal { // static Handle<SwissNameDictionary> SwissNameDictionary::DeleteEntry( Isolate* isolate, Handle<SwissNameDictionary> table, InternalIndex entry) { … } // static template <typename IsolateT> Handle<SwissNameDictionary> SwissNameDictionary::Rehash( IsolateT* isolate, DirectHandle<SwissNameDictionary> table, int new_capacity) { … } bool SwissNameDictionary::EqualsForTesting(Tagged<SwissNameDictionary> other) { … } // static Handle<SwissNameDictionary> SwissNameDictionary::ShallowCopy( Isolate* isolate, Handle<SwissNameDictionary> table) { … } // static Handle<SwissNameDictionary> SwissNameDictionary::Shrink( Isolate* isolate, Handle<SwissNameDictionary> table) { … } // TODO(v8::11388) Copying all data into a std::vector and then re-adding into // the table doesn't seem like a good algorithm. Abseil's Swiss Tables come with // a clever algorithm for re-hashing in place: It first changes the control // table, effectively changing the roles of full, empty and deleted buckets. It // then moves each entry to its new bucket by swapping entries (see // drop_deletes_without_resize in Abseil's raw_hash_set.h). This algorithm could // generally adapted to work on our insertion order preserving implementation, // too. However, it would require a mapping from hash table buckets back to // enumeration indices. This could either be be created in this function // (requiring a vector with Capacity() entries and a separate pass over the // enumeration table) or by creating this backwards mapping ahead of time and // storing it somewhere in the main table or the meta table, for those // SwissNameDictionaries that we know will be in-place rehashed, most notably // those stored in the snapshot. template <typename IsolateT> void SwissNameDictionary::Rehash(IsolateT* isolate) { … } // TODO(emrich,v8:11388): This is almost an identical copy of // HashTable<..>::NumberOfEnumerableProperties. Consolidate both versions // elsewhere (e.g., hash-table-utils)? int SwissNameDictionary::NumberOfEnumerableProperties() { … } // TODO(emrich, v8:11388): This is almost an identical copy of // Dictionary<..>::SlowReverseLookup. Consolidate both versions elsewhere (e.g., // hash-table-utils)? Tagged<Object> SwissNameDictionary::SlowReverseLookup(Isolate* isolate, Tagged<Object> value) { … } // The largest value we ever have to store in the enumeration table is // Capacity() - 1. The largest value we ever have to store for the present or // deleted element count is MaxUsableCapacity(Capacity()). All data in the // meta table is unsigned. Using this, we verify the values of the constants // |kMax1ByteMetaTableCapacity| and |kMax2ByteMetaTableCapacity|. static_assert …; static_assert …; static_assert …; static_assert …; template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize( Isolate* isolate, Tagged<ByteArray> meta_table, int capacity); template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize( LocalIsolate* isolate, Tagged<ByteArray> meta_table, int capacity); template V8_EXPORT_PRIVATE Handle<SwissNameDictionary> SwissNameDictionary::Rehash(LocalIsolate* isolate, DirectHandle<SwissNameDictionary> table, int new_capacity); template V8_EXPORT_PRIVATE Handle<SwissNameDictionary> SwissNameDictionary::Rehash(Isolate* isolate, DirectHandle<SwissNameDictionary> table, int new_capacity); template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash( LocalIsolate* isolate); template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash(Isolate* isolate); constexpr int SwissNameDictionary::kInitialCapacity; constexpr int SwissNameDictionary::kGroupWidth; } // namespace internal } // namespace v8