use crate::alloc::alloc::{handle_alloc_error, Layout};
use crate::scopeguard::guard;
use crate::TryReserveError;
#[cfg(feature = "nightly")]
use crate::UnavailableMutError;
use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
#[cfg(feature = "nightly")]
use core::mem::MaybeUninit;
use core::ptr::NonNull;
cfg_if! {
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
))] {
mod sse2;
use sse2 as imp;
} else {
#[path = "generic.rs"]
mod generic;
use generic as imp;
}
}
mod alloc;
pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
use self::bitmask::{BitMask, BitMaskIter};
use self::imp::Group;
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
#[cfg(not(feature = "nightly"))]
#[inline]
#[cold]
fn cold() {}
#[cfg(not(feature = "nightly"))]
#[inline]
fn likely(b: bool) -> bool {
if !b {
cold()
}
b
}
#[cfg(not(feature = "nightly"))]
#[inline]
fn unlikely(b: bool) -> bool {
if b {
cold()
}
b
}
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
#[derive(Copy, Clone)]
enum Fallibility {
Fallible,
Infallible,
}
impl Fallibility {
#[cfg_attr(feature = "inline-more", inline)]
fn capacity_overflow(self) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::CapacityOverflow,
Fallibility::Infallible => panic!("Hash table capacity overflow"),
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn alloc_err(self, layout: Layout) -> TryReserveError {
match self {
Fallibility::Fallible => TryReserveError::AllocError { layout },
Fallibility::Infallible => handle_alloc_error(layout),
}
}
}
const EMPTY: u8 = 0b1111_1111;
const DELETED: u8 = 0b1000_0000;
#[inline]
fn is_full(ctrl: u8) -> bool {
ctrl & 0x80 == 0
}
#[inline]
fn is_special(ctrl: u8) -> bool {
ctrl & 0x80 != 0
}
#[inline]
fn special_is_empty(ctrl: u8) -> bool {
debug_assert!(is_special(ctrl));
ctrl & 0x01 != 0
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h1(hash: u64) -> usize {
hash as usize
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
fn h2(hash: u64) -> u8 {
let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
let top7 = hash >> (hash_len * 8 - 7);
(top7 & 0x7f) as u8
}
struct ProbeSeq {
pos: usize,
stride: usize,
}
impl ProbeSeq {
#[inline]
fn move_next(&mut self, bucket_mask: usize) {
debug_assert!(
self.stride <= bucket_mask,
"Went past end of probe sequence"
);
self.stride += Group::WIDTH;
self.pos += self.stride;
self.pos &= bucket_mask;
}
}
#[cfg_attr(target_os = "emscripten", inline(never))]
#[cfg_attr(not(target_os = "emscripten"), inline)]
fn capacity_to_buckets(cap: usize) -> Option<usize> {
debug_assert_ne!(cap, 0);
if cap < 8 {
return Some(if cap < 4 { 4 } else { 8 });
}
let adjusted_cap = cap.checked_mul(8)? / 7;
Some(adjusted_cap.next_power_of_two())
}
#[inline]
fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
if bucket_mask < 8 {
bucket_mask
} else {
((bucket_mask + 1) / 8) * 7
}
}
#[derive(Copy, Clone)]
struct TableLayout {
size: usize,
ctrl_align: usize,
}
impl TableLayout {
#[inline]
fn new<T>() -> Self {
let layout = Layout::new::<T>();
Self {
size: layout.size(),
ctrl_align: usize::max(layout.align(), Group::WIDTH),
}
}
#[inline]
fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
debug_assert!(buckets.is_power_of_two());
let TableLayout { size, ctrl_align } = self;
let ctrl_offset =
size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
))
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
TableLayout::new::<T>().calculate_layout_for(buckets)
}
pub struct Bucket<T> {
ptr: NonNull<T>,
}
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(index + 1) as *mut T
} else {
base.as_ptr().sub(index)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
mem::align_of::<T>() as *mut T
} else {
unsafe { self.ptr.as_ptr().sub(1) }
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
};
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
}
pub struct RawTable<T, A: Allocator + Clone = Global> {
table: RawTableInner<A>,
marker: PhantomData<T>,
}
struct RawTableInner<A> {
bucket_mask: usize,
ctrl: NonNull<u8>,
growth_left: usize,
items: usize,
alloc: A,
}
impl<T> RawTable<T, Global> {
#[cfg_attr(feature = "inline-more", inline)]
pub const fn new() -> Self {
Self {
table: RawTableInner::new_in(Global),
marker: PhantomData,
}
}
#[cfg(feature = "raw")]
pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
Self::try_with_capacity_in(capacity, Global)
}
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
}
impl<T, A: Allocator + Clone> RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
Self {
table: RawTableInner::new_in(alloc),
marker: PhantomData,
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
alloc: A,
buckets: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
Ok(Self {
table: RawTableInner::new_uninitialized(
alloc,
TableLayout::new::<T>(),
buckets,
fallibility,
)?,
marker: PhantomData,
})
}
fn fallible_with_capacity(
alloc: A,
capacity: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
Ok(Self {
table: RawTableInner::fallible_with_capacity(
alloc,
TableLayout::new::<T>(),
capacity,
fallibility,
)?,
marker: PhantomData,
})
}
#[cfg(feature = "raw")]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
}
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
Ok(capacity) => capacity,
Err(_) => unsafe { hint::unreachable_unchecked() },
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
self.table.free_buckets(TableLayout::new::<T>())
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
}
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.table.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
#[cfg_attr(feature = "inline-more", inline)]
#[deprecated(since = "0.8.1", note = "use erase or remove instead")]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
self.table.erase(index)
}
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
self.erase_no_drop(&item);
item.drop();
}
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
if let Some(bucket) = self.find(hash, eq) {
unsafe { self.erase(bucket) };
true
} else {
false
}
}
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
#[allow(deprecated)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { self.remove(bucket) }),
None => None,
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
self.table.clear_no_drop()
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
let mut self_ = guard(self, |self_| self_.clear_no_drop());
unsafe {
self_.drop_elements();
}
}
unsafe fn drop_elements(&mut self) {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self.iter() {
item.drop();
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
let min_size = usize::max(self.table.items, min_size);
if min_size == 0 {
*self = Self::new_in(self.table.alloc.clone());
return;
}
let min_buckets = match capacity_to_buckets(min_size) {
Some(buckets) => buckets,
None => return,
};
if min_buckets < self.buckets() {
if self.table.items == 0 {
*self = Self::with_capacity_in(min_size, self.table.alloc.clone())
} else {
if self
.resize(min_size, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
if additional > self.table.growth_left {
if self
.reserve_rehash(additional, hasher, Fallibility::Infallible)
.is_err()
{
unsafe { hint::unreachable_unchecked() }
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.table.growth_left {
self.reserve_rehash(additional, hasher, Fallibility::Fallible)
} else {
Ok(())
}
}
#[cold]
#[inline(never)]
fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
let new_items = match self.table.items.checked_add(additional) {
Some(new_items) => new_items,
None => return Err(fallibility.capacity_overflow()),
};
let full_capacity = bucket_mask_to_capacity(self.table.bucket_mask);
if new_items <= full_capacity / 2 {
self.rehash_in_place(hasher);
Ok(())
} else {
self.resize(
usize::max(new_items, full_capacity + 1),
hasher,
fallibility,
)
}
}
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
self.table.prepare_rehash_in_place();
let mut guard = guard(&mut self.table, move |self_| {
if mem::needs_drop::<T>() {
for i in 0..self_.buckets() {
if *self_.ctrl(i) == DELETED {
self_.set_ctrl(i, EMPTY);
self_.bucket::<T>(i).drop();
self_.items -= 1;
}
}
}
self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
});
'outer: for i in 0..guard.buckets() {
if *guard.ctrl(i) != DELETED {
continue;
}
'inner: loop {
let item = guard.bucket(i);
let hash = hasher(item.as_ref());
let new_i = guard.find_insert_slot(hash);
if likely(guard.is_in_same_group(i, new_i, hash)) {
guard.set_ctrl_h2(i, hash);
continue 'outer;
}
let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
if prev_ctrl == EMPTY {
guard.set_ctrl(i, EMPTY);
guard.bucket(new_i).copy_from_nonoverlapping(&item);
continue 'outer;
} else {
debug_assert_eq!(prev_ctrl, DELETED);
mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
continue 'inner;
}
}
}
guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
mem::forget(guard);
}
}
fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
let mut new_table =
self.table
.prepare_resize(TableLayout::new::<T>(), capacity, fallibility)?;
for item in self.iter() {
let hash = hasher(item.as_ref());
let (index, _) = new_table.prepare_insert_slot(hash);
new_table.bucket(index).copy_from_nonoverlapping(&item);
}
mem::swap(&mut self.table, &mut new_table);
Ok(())
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
let mut index = self.table.find_insert_slot(hash);
let old_ctrl = *self.table.ctrl(index);
if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
index = self.table.find_insert_slot(hash);
}
self.table.record_item_insert_at(index, old_ctrl, hash);
let bucket = self.bucket(index);
bucket.write(value);
bucket
}
}
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
unsafe {
match self.table.prepare_insert_no_grow(hash) {
Ok(index) => {
let bucket = self.bucket(index);
bucket.write(value);
Ok(bucket)
}
Err(()) => Err(value),
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
unsafe { self.insert(hash, value, hasher).as_mut() }
}
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
unsafe {
let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
let bucket = self.table.bucket(index);
self.table.growth_left -= special_is_empty(old_ctrl) as usize;
bucket.write(value);
self.table.items += 1;
bucket
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
where
F: FnOnce(T) -> Option<T>,
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.table.ctrl(index);
debug_assert!(is_full(old_ctrl));
let old_growth_left = self.table.growth_left;
let item = self.remove(bucket);
if let Some(new_item) = f(item) {
self.table.growth_left = old_growth_left;
self.table.set_ctrl(index, old_ctrl);
self.table.items += 1;
self.bucket(index).write(new_item);
true
} else {
false
}
}
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
unsafe {
for bucket in self.iter_hash(hash) {
let elm = bucket.as_ref();
if likely(eq(elm)) {
return Some(bucket);
}
}
None
}
}
#[inline]
pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_ref() }),
None => None,
}
}
#[inline]
pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
match self.find(hash, eq) {
Some(bucket) => Some(unsafe { bucket.as_mut() }),
None => None,
}
}
#[cfg(feature = "nightly")]
pub fn get_each_mut<const N: usize>(
&mut self,
hashes: [u64; N],
mut eq: impl FnMut(usize, &T) -> bool,
) -> [Result<&'_ mut T, UnavailableMutError>; N] {
let mut buckets: [MaybeUninit<Option<Bucket<T>>>; N] =
unsafe { MaybeUninit::uninit().assume_init() };
for i in 0..N {
buckets[i] = MaybeUninit::new(self.find(hashes[i], |k| eq(i, k)));
}
let buckets: [Option<Bucket<T>>; N] = unsafe { MaybeUninit::array_assume_init(buckets) };
let mut out: [MaybeUninit<Result<&'_ mut T, UnavailableMutError>>; N] =
unsafe { MaybeUninit::uninit().assume_init() };
for i in 0..N {
out[i] = MaybeUninit::new(
#[allow(clippy::never_loop)]
'outer: loop {
for j in 0..i {
match (&buckets[j], &buckets[i]) {
(Some(prev), Some(cur)) if prev.as_ptr() == cur.as_ptr() => {
break 'outer Err(UnavailableMutError::Duplicate(j));
}
_ => {}
}
}
break match &buckets[i] {
None => Err(UnavailableMutError::Absent),
Some(bkt) => unsafe { Ok(bkt.as_mut()) },
};
},
)
}
unsafe { MaybeUninit::array_assume_init(out) }
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn capacity(&self) -> usize {
self.table.items + self.table.growth_left
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn len(&self) -> usize {
self.table.items
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn buckets(&self) -> usize {
self.table.bucket_mask + 1
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()),
items: self.table.items,
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
RawIterHash::new(self, hash)
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn drain(&mut self) -> RawDrain<'_, T, A> {
unsafe {
let iter = self.iter();
self.drain_iter_from(iter)
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))),
orig_table: NonNull::from(self),
marker: PhantomData,
}
}
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
debug_assert_eq!(iter.len(), self.len());
let alloc = self.table.alloc.clone();
let allocation = self.into_allocation();
RawIntoIter {
iter,
allocation,
marker: PhantomData,
alloc,
}
}
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
let alloc = if self.table.is_empty_singleton() {
None
} else {
let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
Some(lco) => lco,
None => unsafe { hint::unreachable_unchecked() },
};
Some((
unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
))
};
mem::forget(self);
alloc
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> where T: Sync {}
impl<A> RawTableInner<A> {
#[cfg_attr(feature = "inline-more", inline)]
const fn new_in(alloc: A) -> Self {
Self {
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
alloc,
}
}
}
impl<A: Allocator + Clone> RawTableInner<A> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
alloc: A,
table_layout: TableLayout,
buckets: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
debug_assert!(buckets.is_power_of_two());
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
Some(lco) => lco,
None => return Err(fallibility.capacity_overflow()),
};
let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
};
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
alloc,
})
}
#[inline]
fn fallible_with_capacity(
alloc: A,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
) -> Result<Self, TryReserveError> {
if capacity == 0 {
Ok(Self::new_in(alloc))
} else {
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
}
}
}
#[inline]
unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
let index = self.find_insert_slot(hash);
let old_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
(index, old_ctrl)
}
#[inline]
fn find_insert_slot(&self, hash: u64) -> usize {
let mut probe_seq = self.probe_seq(hash);
loop {
unsafe {
let group = Group::load(self.ctrl(probe_seq.pos));
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (probe_seq.pos + bit) & self.bucket_mask;
if unlikely(is_full(*self.ctrl(result))) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(probe_seq.pos, 0);
return Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
}
return result;
}
}
probe_seq.move_next(self.bucket_mask);
}
}
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_rehash_in_place(&mut self) {
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
group.store_aligned(self.ctrl(i));
}
if self.buckets() < Group::WIDTH {
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn data_end<T>(&self) -> NonNull<T> {
NonNull::new_unchecked(self.ctrl.as_ptr().cast())
}
#[inline]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
}
#[cfg(feature = "raw")]
#[inline]
unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
let index = self.find_insert_slot(hash);
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(())
} else {
self.record_item_insert_at(index, old_ctrl, hash);
Ok(index)
}
}
#[inline]
unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
self.growth_left -= special_is_empty(old_ctrl) as usize;
self.set_ctrl_h2(index, hash);
self.items += 1;
}
#[inline]
fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
let probe_seq_pos = self.probe_seq(hash).pos;
let probe_index =
|pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
probe_index(i) == probe_index(new_i)
}
#[inline]
unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
self.set_ctrl(index, h2(hash))
}
#[inline]
unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 {
let prev_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
prev_ctrl
}
#[inline]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
#[inline]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
self.ctrl.as_ptr().add(index)
}
#[inline]
fn buckets(&self) -> usize {
self.bucket_mask + 1
}
#[inline]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
}
#[inline]
fn is_empty_singleton(&self) -> bool {
self.bucket_mask == 0
}
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_resize(
&self,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> {
debug_assert!(self.items <= capacity);
let mut new_table = RawTableInner::fallible_with_capacity(
self.alloc.clone(),
table_layout,
capacity,
fallibility,
)?;
new_table.growth_left -= self.items;
new_table.items = self.items;
Ok(guard(new_table, move |self_| {
if !self_.is_empty_singleton() {
self_.free_buckets(table_layout);
}
}))
}
#[inline]
unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
Some(lco) => lco,
None => hint::unreachable_unchecked(),
};
self.alloc.deallocate(
NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
layout,
);
}
#[inline]
fn clear_no_drop(&mut self) {
if !self.is_empty_singleton() {
unsafe {
self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
}
}
self.items = 0;
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
#[inline]
unsafe fn erase(&mut self, index: usize) {
debug_assert!(is_full(*self.ctrl(index)));
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
self.set_ctrl(index, ctrl);
self.items -= 1;
}
}
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
fn clone(&self) -> Self {
if self.table.is_empty_singleton() {
Self::new_in(self.table.alloc.clone())
} else {
unsafe {
let mut new_table = ManuallyDrop::new(
match Self::new_uninitialized(
self.table.alloc.clone(),
self.table.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
new_table.clone_from_spec(self, |new_table| {
new_table.free_buckets();
});
ManuallyDrop::into_inner(new_table)
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.table.is_empty_singleton() {
*self = Self::new_in(self.table.alloc.clone());
} else {
unsafe {
self.drop_elements();
if self.buckets() != source.buckets() {
if !self.table.is_empty_singleton() {
self.free_buckets();
}
(self as *mut Self).write(
match Self::new_uninitialized(
self.table.alloc.clone(),
source.buckets(),
Fallibility::Infallible,
) {
Ok(table) => table,
Err(_) => hint::unreachable_unchecked(),
},
);
}
self.clone_from_spec(source, |self_| {
self_.clear_no_drop()
});
}
}
}
}
trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
default_fn! {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
}
}
#[cfg(feature = "nightly")]
impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
source
.table
.ctrl(0)
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
source
.data_start()
.copy_to_nonoverlapping(self.data_start(), self.table.buckets());
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
}
}
impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
source
.table
.ctrl(0)
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
let mut guard = guard((0, &mut *self), |(index, self_)| {
if mem::needs_drop::<T>() && self_.len() != 0 {
for i in 0..=*index {
if is_full(*self_.table.ctrl(i)) {
self_.bucket(i).drop();
}
}
}
on_panic(self_);
});
for from in source.iter() {
let index = source.bucket_index(&from);
let to = guard.1.bucket(index);
to.write(from.as_ref().clone());
guard.0 = index;
}
mem::forget(guard);
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
}
#[cfg(feature = "raw")]
pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
if self.table.buckets() != source.table.buckets()
&& bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
{
self.clear();
let guard_self = guard(&mut *self, |self_| {
self_.clear();
});
unsafe {
for item in source.iter() {
let item = item.as_ref().clone();
let hash = hasher(&item);
let (index, _) = guard_self.table.prepare_insert_slot(hash);
guard_self.bucket(index).write(item);
}
}
mem::forget(guard_self);
self.table.items = source.table.items;
self.table.growth_left -= source.table.items;
} else {
self.clone_from(source);
}
}
}
impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn default() -> Self {
Self::new_in(Default::default())
}
}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.table.is_empty_singleton() {
unsafe {
self.drop_elements();
self.free_buckets();
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
if !self.table.is_empty_singleton() {
unsafe {
self.drop_elements();
self.free_buckets();
}
}
}
}
impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
type Item = T;
type IntoIter = RawIntoIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_iter(self) -> RawIntoIter<T, A> {
unsafe {
let iter = self.iter();
self.into_iter_from(iter)
}
}
}
pub(crate) struct RawIterRange<T> {
current_group: BitMask,
data: Bucket<T>,
next_ctrl: *const u8,
end: *const u8,
}
impl<T> RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
current_group,
data,
next_ctrl,
end,
}
}
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
(self, None)
} else {
let len = offset_from(self.end, self.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);
let mid = (len / 2) & !(Group::WIDTH - 1);
let tail = Self::new(
self.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
(self, Some(tail))
}
}
}
}
unsafe impl<T> Send for RawIterRange<T> {}
unsafe impl<T> Sync for RawIterRange<T> {}
impl<T> Clone for RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
}
}
}
impl<T> Iterator for RawIterRange<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
}
if self.next_ctrl >= self.end {
return None;
}
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(
0,
Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
)
}
}
impl<T> FusedIterator for RawIterRange<T> {}
pub struct RawIter<T> {
pub(crate) iter: RawIterRange<T>,
items: usize,
}
impl<T> RawIter<T> {
#[cfg(feature = "raw")]
pub fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
#[cfg(feature = "raw")]
pub fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
#[cfg(feature = "raw")]
fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
unsafe {
if b.as_ptr() > self.iter.data.as_ptr() {
return;
}
if self.iter.next_ctrl < self.iter.end
&& b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
{
if cfg!(debug_assertions) {
let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
assert!(is_full(*ctrl));
}
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
return;
}
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
} else {
let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
let was_full = self.iter.current_group.flip(our_bit);
debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
} else {
self.items -= 1;
}
if cfg!(debug_assertions) {
if b.as_ptr() == next_bucket.as_ptr() {
debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
} else {
debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
}
}
}
} else {
}
}
}
unsafe fn drop_elements(&mut self) {
if mem::needs_drop::<T>() && self.len() != 0 {
for item in self {
item.drop();
}
}
}
}
impl<T> Clone for RawIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
items: self.items,
}
}
}
impl<T> Iterator for RawIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
if let Some(b) = self.iter.next() {
self.items -= 1;
Some(b)
} else {
debug_assert_eq!(self.items, 0);
None
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
allocation: Option<(NonNull<u8>, Layout)>,
marker: PhantomData<T>,
alloc: A,
}
impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> where T: Send {}
unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> where T: Sync {}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
self.iter.drop_elements();
if let Some((ptr, layout)) = self.allocation {
self.alloc.deallocate(ptr, layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
self.iter.drop_elements();
if let Some((ptr, layout)) = self.allocation {
self.alloc.deallocate(ptr, layout);
}
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe { Some(self.iter.next()?.read()) }
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
iter: RawIter<T>,
table: ManuallyDrop<RawTable<T, A>>,
orig_table: NonNull<RawTable<T, A>>,
marker: PhantomData<&'a RawTable<T, A>>,
}
impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> where T: Send {}
unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> where T: Sync {}
impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
self.iter.drop_elements();
self.table.clear_no_drop();
self.orig_table
.as_ptr()
.copy_from_nonoverlapping(&*self.table, 1);
}
}
}
impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<T> {
unsafe {
let item = self.iter.next()?;
Some(item.read())
}
}
#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
inner: RawIterHashInner<'a, A>,
_marker: PhantomData<T>,
}
struct RawIterHashInner<'a, A: Allocator + Clone> {
table: &'a RawTableInner<A>,
h2_hash: u8,
probe_seq: ProbeSeq,
group: Group,
bitmask: BitMaskIter,
}
impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
RawIterHash {
inner: RawIterHashInner::new(&table.table, hash),
_marker: PhantomData,
}
}
}
impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
unsafe {
let h2_hash = h2(hash);
let probe_seq = table.probe_seq(hash);
let group = Group::load(table.ctrl(probe_seq.pos));
let bitmask = group.match_byte(h2_hash).into_iter();
RawIterHashInner {
table,
h2_hash,
probe_seq,
group,
bitmask,
}
}
}
}
impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
match self.inner.next() {
Some(index) => Some(self.inner.table.bucket(index)),
None => None,
}
}
}
}
impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
return Some(index);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
self.probe_seq.move_next(self.table.bucket_mask);
self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
}
}
#[cfg(test)]
mod test_map {
use super::*;
#[test]
fn rehash() {
let mut table = RawTable::new();
let hasher = |i: &u64| *i;
for i in 0..100 {
table.insert(i, i, hasher);
}
for i in 0..100 {
unsafe {
assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
}
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
}
table.rehash_in_place(hasher);
for i in 0..100 {
unsafe {
assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
}
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
}
}
}