1mod raw;
11
12use hashbrown::raw::RawTable;
13
14use crate::vec::{Drain, Vec};
15use core::cmp;
16use core::fmt;
17use core::mem::replace;
18use core::ops::RangeBounds;
19
20use crate::equivalent::Equivalent;
21use crate::util::simplify_range;
22use crate::{Bucket, Entries, HashValue};
23
24pub(crate) struct IndexMapCore<K, V> {
26 indices: RawTable<usize>,
28 entries: Vec<Bucket<K, V>>,
30}
31
32#[inline(always)]
33fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
34 move |&i| entries[i].hash.get()
35}
36
37#[inline]
38fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
39 key: &'a Q,
40 entries: &'a [Bucket<K, V>],
41) -> impl Fn(&usize) -> bool + 'a {
42 move |&i| Q::equivalent(key, &entries[i].key)
43}
44
45#[inline]
46fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
47 let erased = table.erase_entry(hash.get(), move |&i| i == index);
48 debug_assert!(erased);
49}
50
51#[inline]
52fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
53 let index = table
54 .get_mut(hash.get(), move |&i| i == old)
55 .expect("index not found");
56 *index = new;
57}
58
59impl<K, V> Clone for IndexMapCore<K, V>
60where
61 K: Clone,
62 V: Clone,
63{
64 fn clone(&self) -> Self {
65 let indices = self.indices.clone();
66 let mut entries = Vec::with_capacity(indices.capacity());
67 entries.clone_from(&self.entries);
68 IndexMapCore { indices, entries }
69 }
70
71 fn clone_from(&mut self, other: &Self) {
72 let hasher = get_hash(&other.entries);
73 self.indices.clone_from_with_hasher(&other.indices, hasher);
74 if self.entries.capacity() < other.entries.len() {
75 self.reserve_entries();
77 }
78 self.entries.clone_from(&other.entries);
79 }
80}
81
82impl<K, V> fmt::Debug for IndexMapCore<K, V>
83where
84 K: fmt::Debug,
85 V: fmt::Debug,
86{
87 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
88 f.debug_struct("IndexMapCore")
89 .field("indices", &raw::DebugIndices(&self.indices))
90 .field("entries", &self.entries)
91 .finish()
92 }
93}
94
95impl<K, V> Entries for IndexMapCore<K, V> {
96 type Entry = Bucket<K, V>;
97
98 #[inline]
99 fn into_entries(self) -> Vec<Self::Entry> {
100 self.entries
101 }
102
103 #[inline]
104 fn as_entries(&self) -> &[Self::Entry] {
105 &self.entries
106 }
107
108 #[inline]
109 fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
110 &mut self.entries
111 }
112
113 fn with_entries<F>(&mut self, f: F)
114 where
115 F: FnOnce(&mut [Self::Entry]),
116 {
117 f(&mut self.entries);
118 self.rebuild_hash_table();
119 }
120}
121
122impl<K, V> IndexMapCore<K, V> {
123 #[inline]
124 pub(crate) const fn new() -> Self {
125 IndexMapCore {
126 indices: RawTable::new(),
127 entries: Vec::new(),
128 }
129 }
130
131 #[inline]
132 pub(crate) fn with_capacity(n: usize) -> Self {
133 IndexMapCore {
134 indices: RawTable::with_capacity(n),
135 entries: Vec::with_capacity(n),
136 }
137 }
138
139 #[inline]
140 pub(crate) fn len(&self) -> usize {
141 self.indices.len()
142 }
143
144 #[inline]
145 pub(crate) fn capacity(&self) -> usize {
146 cmp::min(self.indices.capacity(), self.entries.capacity())
147 }
148
149 pub(crate) fn clear(&mut self) {
150 self.indices.clear();
151 self.entries.clear();
152 }
153
154 pub(crate) fn truncate(&mut self, len: usize) {
155 if len < self.len() {
156 self.erase_indices(len, self.entries.len());
157 self.entries.truncate(len);
158 }
159 }
160
161 pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
162 where
163 R: RangeBounds<usize>,
164 {
165 let range = simplify_range(range, self.entries.len());
166 self.erase_indices(range.start, range.end);
167 self.entries.drain(range)
168 }
169
170 #[cfg(feature = "rayon")]
171 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
172 where
173 K: Send,
174 V: Send,
175 R: RangeBounds<usize>,
176 {
177 use rayon::iter::ParallelDrainRange;
178 let range = simplify_range(range, self.entries.len());
179 self.erase_indices(range.start, range.end);
180 self.entries.par_drain(range)
181 }
182
183 pub(crate) fn split_off(&mut self, at: usize) -> Self {
184 assert!(at <= self.entries.len());
185 self.erase_indices(at, self.entries.len());
186 let entries = self.entries.split_off(at);
187
188 let mut indices = RawTable::with_capacity(entries.len());
189 raw::insert_bulk_no_grow(&mut indices, &entries);
190 Self { indices, entries }
191 }
192
193 pub(crate) fn reserve(&mut self, additional: usize) {
195 self.indices.reserve(additional, get_hash(&self.entries));
196 self.reserve_entries();
197 }
198
199 fn reserve_entries(&mut self) {
201 let additional = self.indices.capacity() - self.entries.len();
202 self.entries.reserve_exact(additional);
203 }
204
205 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
207 self.indices
208 .shrink_to(min_capacity, get_hash(&self.entries));
209 self.entries.shrink_to(min_capacity);
210 }
211
212 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
214 if let Some(entry) = self.entries.pop() {
215 let last = self.entries.len();
216 erase_index(&mut self.indices, entry.hash, last);
217 Some((entry.key, entry.value))
218 } else {
219 None
220 }
221 }
222
223 fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
226 let i = self.entries.len();
227 self.indices.insert(hash.get(), i, get_hash(&self.entries));
228 if i == self.entries.capacity() {
229 self.reserve_entries();
232 }
233 self.entries.push(Bucket { hash, key, value });
234 i
235 }
236
237 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
239 where
240 Q: ?Sized + Equivalent<K>,
241 {
242 let eq = equivalent(key, &self.entries);
243 self.indices.get(hash.get(), eq).copied()
244 }
245
246 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
247 where
248 K: Eq,
249 {
250 match self.get_index_of(hash, &key) {
251 Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
252 None => (self.push(hash, key, value), None),
253 }
254 }
255
256 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
258 where
259 Q: ?Sized + Equivalent<K>,
260 {
261 let eq = equivalent(key, &self.entries);
262 match self.indices.remove_entry(hash.get(), eq) {
263 Some(index) => {
264 let (key, value) = self.shift_remove_finish(index);
265 Some((index, key, value))
266 }
267 None => None,
268 }
269 }
270
271 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
273 match self.entries.get(index) {
274 Some(entry) => {
275 erase_index(&mut self.indices, entry.hash, index);
276 Some(self.shift_remove_finish(index))
277 }
278 None => None,
279 }
280 }
281
282 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
286 self.decrement_indices(index + 1, self.entries.len());
288
289 let entry = self.entries.remove(index);
291 (entry.key, entry.value)
292 }
293
294 fn decrement_indices(&mut self, start: usize, end: usize) {
299 let shifted_entries = &self.entries[start..end];
301 if shifted_entries.len() > self.indices.buckets() / 2 {
302 for i in self.indices_mut() {
304 if start <= *i && *i < end {
305 *i -= 1;
306 }
307 }
308 } else {
309 for (i, entry) in (start..end).zip(shifted_entries) {
311 update_index(&mut self.indices, entry.hash, i, i - 1);
312 }
313 }
314 }
315
316 fn increment_indices(&mut self, start: usize, end: usize) {
321 let shifted_entries = &self.entries[start..end];
323 if shifted_entries.len() > self.indices.buckets() / 2 {
324 for i in self.indices_mut() {
326 if start <= *i && *i < end {
327 *i += 1;
328 }
329 }
330 } else {
331 for (i, entry) in (start..end).zip(shifted_entries).rev() {
334 update_index(&mut self.indices, entry.hash, i, i + 1);
335 }
336 }
337 }
338
339 pub(super) fn move_index(&mut self, from: usize, to: usize) {
340 let from_hash = self.entries[from].hash;
341 if from != to {
342 update_index(&mut self.indices, from_hash, from, usize::MAX);
344
345 if from < to {
347 self.decrement_indices(from + 1, to + 1);
348 self.entries[from..=to].rotate_left(1);
349 } else if to < from {
350 self.increment_indices(to, from);
351 self.entries[to..=from].rotate_right(1);
352 }
353
354 update_index(&mut self.indices, from_hash, usize::MAX, to);
356 }
357 }
358
359 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
361 where
362 Q: ?Sized + Equivalent<K>,
363 {
364 let eq = equivalent(key, &self.entries);
365 match self.indices.remove_entry(hash.get(), eq) {
366 Some(index) => {
367 let (key, value) = self.swap_remove_finish(index);
368 Some((index, key, value))
369 }
370 None => None,
371 }
372 }
373
374 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
376 match self.entries.get(index) {
377 Some(entry) => {
378 erase_index(&mut self.indices, entry.hash, index);
379 Some(self.swap_remove_finish(index))
380 }
381 None => None,
382 }
383 }
384
385 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
389 let entry = self.entries.swap_remove(index);
392
393 if let Some(entry) = self.entries.get(index) {
395 let last = self.entries.len();
398 update_index(&mut self.indices, entry.hash, last, index);
399 }
400
401 (entry.key, entry.value)
402 }
403
404 fn erase_indices(&mut self, start: usize, end: usize) {
409 let (init, shifted_entries) = self.entries.split_at(end);
410 let (start_entries, erased_entries) = init.split_at(start);
411
412 let erased = erased_entries.len();
413 let shifted = shifted_entries.len();
414 let half_capacity = self.indices.buckets() / 2;
415
416 if erased == 0 {
418 } else if start + shifted < half_capacity && start < erased {
420 self.indices.clear();
422
423 raw::insert_bulk_no_grow(&mut self.indices, start_entries);
425 raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
426 } else if erased + shifted < half_capacity {
427 for (i, entry) in (start..).zip(erased_entries) {
431 erase_index(&mut self.indices, entry.hash, i);
432 }
433
434 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
436 update_index(&mut self.indices, entry.hash, old, new);
437 }
438 } else {
439 self.erase_indices_sweep(start, end);
441 }
442
443 debug_assert_eq!(self.indices.len(), start + shifted);
444 }
445
446 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
447 where
448 F: FnMut(&mut K, &mut V) -> bool,
449 {
450 let len = self.entries.len();
455 let mut n_deleted = 0;
456 for i in 0..len {
457 let will_keep = {
458 let entry = &mut self.entries[i];
459 keep(&mut entry.key, &mut entry.value)
460 };
461 if !will_keep {
462 n_deleted += 1;
463 } else if n_deleted > 0 {
464 self.entries.swap(i - n_deleted, i);
465 }
466 }
467 if n_deleted > 0 {
468 self.entries.truncate(len - n_deleted);
469 self.rebuild_hash_table();
470 }
471 }
472
473 fn rebuild_hash_table(&mut self) {
474 self.indices.clear();
475 raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
476 }
477
478 pub(crate) fn reverse(&mut self) {
479 self.entries.reverse();
480
481 let len = self.entries.len();
484 for i in self.indices_mut() {
485 *i = len - *i - 1;
486 }
487 }
488}
489
490pub enum Entry<'a, K, V> {
493 Occupied(OccupiedEntry<'a, K, V>),
495 Vacant(VacantEntry<'a, K, V>),
497}
498
499impl<'a, K, V> Entry<'a, K, V> {
500 pub fn or_insert(self, default: V) -> &'a mut V {
505 match self {
506 Entry::Occupied(entry) => entry.into_mut(),
507 Entry::Vacant(entry) => entry.insert(default),
508 }
509 }
510
511 pub fn or_insert_with<F>(self, call: F) -> &'a mut V
516 where
517 F: FnOnce() -> V,
518 {
519 match self {
520 Entry::Occupied(entry) => entry.into_mut(),
521 Entry::Vacant(entry) => entry.insert(call()),
522 }
523 }
524
525 pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
531 where
532 F: FnOnce(&K) -> V,
533 {
534 match self {
535 Entry::Occupied(entry) => entry.into_mut(),
536 Entry::Vacant(entry) => {
537 let value = call(&entry.key);
538 entry.insert(value)
539 }
540 }
541 }
542
543 pub fn key(&self) -> &K {
546 match *self {
547 Entry::Occupied(ref entry) => entry.key(),
548 Entry::Vacant(ref entry) => entry.key(),
549 }
550 }
551
552 pub fn index(&self) -> usize {
554 match *self {
555 Entry::Occupied(ref entry) => entry.index(),
556 Entry::Vacant(ref entry) => entry.index(),
557 }
558 }
559
560 pub fn and_modify<F>(self, f: F) -> Self
562 where
563 F: FnOnce(&mut V),
564 {
565 match self {
566 Entry::Occupied(mut o) => {
567 f(o.get_mut());
568 Entry::Occupied(o)
569 }
570 x => x,
571 }
572 }
573
574 pub fn or_default(self) -> &'a mut V
579 where
580 V: Default,
581 {
582 match self {
583 Entry::Occupied(entry) => entry.into_mut(),
584 Entry::Vacant(entry) => entry.insert(V::default()),
585 }
586 }
587}
588
589impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
590 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
591 match *self {
592 Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
593 Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
594 }
595 }
596}
597
598pub use self::raw::OccupiedEntry;
599
600impl<K, V> OccupiedEntry<'_, K, V> {
602 pub fn insert(&mut self, value: V) -> V {
604 replace(self.get_mut(), value)
605 }
606
607 pub fn remove(self) -> V {
611 self.swap_remove()
612 }
613
614 pub fn swap_remove(self) -> V {
622 self.swap_remove_entry().1
623 }
624
625 pub fn shift_remove(self) -> V {
633 self.shift_remove_entry().1
634 }
635
636 pub fn remove_entry(self) -> (K, V) {
640 self.swap_remove_entry()
641 }
642}
643
644impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
645 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
646 f.debug_struct(stringify!(OccupiedEntry))
647 .field("key", self.key())
648 .field("value", self.get())
649 .finish()
650 }
651}
652
653pub struct VacantEntry<'a, K, V> {
658 map: &'a mut IndexMapCore<K, V>,
659 hash: HashValue,
660 key: K,
661}
662
663impl<'a, K, V> VacantEntry<'a, K, V> {
664 pub fn key(&self) -> &K {
666 &self.key
667 }
668
669 pub fn into_key(self) -> K {
671 self.key
672 }
673
674 pub fn index(&self) -> usize {
676 self.map.len()
677 }
678
679 pub fn insert(self, value: V) -> &'a mut V {
682 let i = self.map.push(self.hash, self.key, value);
683 &mut self.map.entries[i].value
684 }
685}
686
687impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
688 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
689 f.debug_tuple(stringify!(VacantEntry))
690 .field(self.key())
691 .finish()
692 }
693}
694
695#[test]
696fn assert_send_sync() {
697 fn assert_send_sync<T: Send + Sync>() {}
698 assert_send_sync::<IndexMapCore<i32, i32>>();
699 assert_send_sync::<Entry<'_, i32, i32>>();
700}