#[inline] #[allow(clippy::cast_possible_truncation)] fnh2(hash: u64) ->u8 { // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit // value, some hash functions (such as FxHash) produce a usize result // instead, which means that the top 32 bits are 0 on 32-bit platforms. // So we use MIN_HASH_LEN constant to handle this. lettop7 = hash >> (MIN_HASH_LEN * 8 - 7); (top7 & 0x7f) asu8// truncation }
fnbucket_mask_to_capacity(bucket_mask: usize) ->usize { if bucket_mask < 8 { // For tables with 1/2/4/8 buckets, we always reserve one empty slot. // Keep in mind that the bucket mask is one less than the bucket count. bucket_mask } else { // For larger tables we reserve 12.5% of the slots as empty. ((bucket_mask + 1) / 8) * 7 } }
unsafefnresize( &mutself, capacity: usize, hasher: implFn(&T) ->u64, fallibility: Fallibility, ) ->Result<(), TryReserveError> { // SAFETY: // 1. The caller of this function guarantees that `capacity >= self.table.items`. // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and // [`TableLayout`] that were used to allocate this table. // 3. The caller ensures that the control bytes of the `RawTableInner` // are already initialized. self.table.resize_inner( &self.alloc, capacity, &|table, index| hasher(table.bucket::<T>(index).as_ref()), fallibility, Self::TABLE_LAYOUT, ) }
#[allow(clippy::inline_always)] #[inline(always)] unsafefnresize_inner<A>( &mutself, alloc: &A, capacity: usize, hasher: &dynFn(&mutSelf, usize) ->u64, fallibility: Fallibility, layout: TableLayout, ) ->Result<(), TryReserveError> where A: Allocator, { // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] // that were used to allocate this table. letmut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
// SAFETY: We know for sure that RawTableInner will outlive the // returned `FullBucketsIndices` iterator, and the caller of this // function ensures that the control bytes are properly initialized. forfull_byte_indexinself.full_buckets_indices() { // This may panic. lethash = hasher(self, full_byte_index);
// SAFETY: // We can use a simpler version of insert() here since: // 1. There are no DELETED entries. // 2. We know there is enough space in the table. // 3. All elements are unique. // 4. The caller of this function guarantees that `capacity > 0` // so `new_table` must already have some allocated memory. // 5. We set `growth_left` and `items` fields of the new table // after the loop. // 6. We insert into the table, at the returned index, the data // matching the given hash immediately after calling this function. let (new_index, _) = new_table.prepare_insert_slot(hash);
// SAFETY: // // * `src` is valid for reads of `layout.size` bytes, since the // table is alive and the `full_byte_index` is guaranteed to be // within bounds (see `FullBucketsIndices::next_impl`); // // * `dst` is valid for writes of `layout.size` bytes, since the // caller ensures that `table_layout` matches the [`TableLayout`] // that was used to allocate old table and we have the `new_index` // returned by `prepare_insert_slot`. // // * Both `src` and `dst` are properly aligned. // // * Both `src` and `dst` point to different region of memory. ptr::copy_nonoverlapping( self.bucket_ptr(full_byte_index, layout.size), new_table.bucket_ptr(new_index, layout.size), layout.size, ); }
// The hash function didn't panic, so we can safely set the // `growth_left` and `items` fields of the new table. new_table.growth_left -= self.items; new_table.items = self.items;
// We successfully copied all elements without panicking. Now replace // self with the new table. The old table will have its memory freed but // the items will not be dropped (since they have been moved into the // new table). // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`] // that was used to allocate this table. mem::swap(self, &mut new_table);