Kokkos Core Kernels Package  Version of the Day
Kokkos_UnorderedMap.hpp
Go to the documentation of this file.
1 /*
2 //@HEADER
3 // ************************************************************************
4 //
5 // Kokkos v. 3.0
6 // Copyright (2020) National Technology & Engineering
7 // Solutions of Sandia, LLC (NTESS).
8 //
9 // Under the terms of Contract DE-NA0003525 with NTESS,
10 // the U.S. Government retains certain rights in this software.
11 //
12 // Redistribution and use in source and binary forms, with or without
13 // modification, are permitted provided that the following conditions are
14 // met:
15 //
16 // 1. Redistributions of source code must retain the above copyright
17 // notice, this list of conditions and the following disclaimer.
18 //
19 // 2. Redistributions in binary form must reproduce the above copyright
20 // notice, this list of conditions and the following disclaimer in the
21 // documentation and/or other materials provided with the distribution.
22 //
23 // 3. Neither the name of the Corporation nor the names of the
24 // contributors may be used to endorse or promote products derived from
25 // this software without specific prior written permission.
26 //
27 // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
28 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
31 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 //
39 // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
40 //
41 // ************************************************************************
42 //@HEADER
43 */
44 
50 
51 #ifndef KOKKOS_UNORDERED_MAP_HPP
52 #define KOKKOS_UNORDERED_MAP_HPP
53 
54 #include <Kokkos_Core.hpp>
55 #include <Kokkos_Functional.hpp>
56 
57 #include <Kokkos_Bitset.hpp>
58 
59 #include <impl/Kokkos_Traits.hpp>
60 #include <impl/Kokkos_UnorderedMap_impl.hpp>
61 
62 #include <iostream>
63 
64 #include <cstdint>
65 #include <stdexcept>
66 
67 namespace Kokkos {
68 
69 enum : unsigned { UnorderedMapInvalidIndex = ~0u };
70 
84 
86  private:
87  enum Status : uint32_t {
88  SUCCESS = 1u << 31,
89  EXISTING = 1u << 30,
90  FREED_EXISTING = 1u << 29,
91  LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
92  };
93 
94  public:
96  KOKKOS_FORCEINLINE_FUNCTION
97  bool success() const { return (m_status & SUCCESS); }
98 
100  KOKKOS_FORCEINLINE_FUNCTION
101  bool existing() const { return (m_status & EXISTING); }
102 
104  KOKKOS_FORCEINLINE_FUNCTION
105  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
106 
109  KOKKOS_FORCEINLINE_FUNCTION
110  bool freed_existing() const { return (m_status & FREED_EXISTING); }
111 
114  KOKKOS_FORCEINLINE_FUNCTION
115  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
116 
118  KOKKOS_FORCEINLINE_FUNCTION
119  uint32_t index() const { return m_index; }
120 
121  KOKKOS_FORCEINLINE_FUNCTION
122  UnorderedMapInsertResult() : m_index(UnorderedMapInvalidIndex), m_status(0) {}
123 
124  KOKKOS_FORCEINLINE_FUNCTION
125  void increment_list_position() {
126  m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
127  }
128 
129  KOKKOS_FORCEINLINE_FUNCTION
130  void set_existing(uint32_t i, bool arg_freed_existing) {
131  m_index = i;
132  m_status =
133  EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
134  }
135 
136  KOKKOS_FORCEINLINE_FUNCTION
137  void set_success(uint32_t i) {
138  m_index = i;
139  m_status = SUCCESS | list_position();
140  }
141 
142  private:
143  uint32_t m_index;
144  uint32_t m_status;
145 };
146 
202 template <typename Key, typename Value,
203  typename Device = Kokkos::DefaultExecutionSpace,
204  typename Hasher = pod_hash<typename std::remove_const<Key>::type>,
205  typename EqualTo =
206  pod_equal_to<typename std::remove_const<Key>::type> >
208  private:
209  using host_mirror_space =
210  typename ViewTraits<Key, Device, void, void>::host_mirror_space;
211 
212  public:
214 
215 
216  // key_types
217  using declared_key_type = Key;
218  using key_type = typename std::remove_const<declared_key_type>::type;
219  using const_key_type = typename std::add_const<key_type>::type;
220 
221  // value_types
222  using declared_value_type = Value;
223  using value_type = typename std::remove_const<declared_value_type>::type;
224  using const_value_type = typename std::add_const<value_type>::type;
225 
226  using device_type = Device;
227  using execution_space = typename Device::execution_space;
228  using hasher_type = Hasher;
229  using equal_to_type = EqualTo;
230  using size_type = uint32_t;
231 
232  // map_types
233  using declared_map_type =
234  UnorderedMap<declared_key_type, declared_value_type, device_type,
235  hasher_type, equal_to_type>;
236  using insertable_map_type = UnorderedMap<key_type, value_type, device_type,
237  hasher_type, equal_to_type>;
238  using modifiable_map_type =
239  UnorderedMap<const_key_type, value_type, device_type, hasher_type,
240  equal_to_type>;
241  using const_map_type = UnorderedMap<const_key_type, const_value_type,
242  device_type, hasher_type, equal_to_type>;
243 
244  static const bool is_set = std::is_same<void, value_type>::value;
245  static const bool has_const_key =
246  std::is_same<const_key_type, declared_key_type>::value;
247  static const bool has_const_value =
248  is_set || std::is_same<const_value_type, declared_value_type>::value;
249 
250  static const bool is_insertable_map =
251  !has_const_key && (is_set || !has_const_value);
252  static const bool is_modifiable_map = has_const_key && !has_const_value;
253  static const bool is_const_map = has_const_key && has_const_value;
254 
256 
257  using HostMirror =
259 
260  using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
261 
263 
264  private:
265  enum : size_type { invalid_index = ~static_cast<size_type>(0) };
266 
267  using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
268 
269  using key_type_view = std::conditional_t<
270  is_insertable_map, View<key_type *, device_type>,
272 
273  using value_type_view = std::conditional_t<
274  is_insertable_map || is_modifiable_map,
277 
278  using size_type_view = std::conditional_t<
279  is_insertable_map, View<size_type *, device_type>,
281 
282  using bitset_type =
283  std::conditional_t<is_insertable_map, Bitset<execution_space>,
285 
286  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287  enum { num_scalars = 3 };
289 
290  public:
292 
293 
300  UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
301  equal_to_type equal_to = equal_to_type())
302  : m_bounded_insert(true),
303  m_hasher(hasher),
304  m_equal_to(equal_to),
305  m_size(),
306  m_available_indexes(calculate_capacity(capacity_hint)),
307  m_hash_lists(view_alloc(WithoutInitializing, "UnorderedMap hash list"),
308  Impl::find_hash_size(capacity())),
309  m_next_index(view_alloc(WithoutInitializing, "UnorderedMap next index"),
310  capacity() + 1) // +1 so that the *_at functions can
311  // always return a valid reference
312  ,
313  m_keys("UnorderedMap keys", capacity() + 1),
314  m_values("UnorderedMap values", (is_set ? 1 : capacity() + 1)),
315  m_scalars("UnorderedMap scalars") {
316  if (!is_insertable_map) {
317  throw std::runtime_error(
318  "Cannot construct a non-insertable (i.e. const key_type) "
319  "unordered_map");
320  }
321 
322  Kokkos::deep_copy(m_hash_lists, invalid_index);
323  Kokkos::deep_copy(m_next_index, invalid_index);
324  }
325 
326  void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
327 
328  histogram_type get_histogram() { return histogram_type(*this); }
329 
331  void clear() {
332  m_bounded_insert = true;
333 
334  if (capacity() == 0) return;
335 
336  m_available_indexes.clear();
337 
338  Kokkos::deep_copy(m_hash_lists, invalid_index);
339  Kokkos::deep_copy(m_next_index, invalid_index);
340  {
341  const key_type tmp = key_type();
342  Kokkos::deep_copy(m_keys, tmp);
343  }
344  if (is_set) {
345  const impl_value_type tmp = impl_value_type();
346  Kokkos::deep_copy(m_values, tmp);
347  }
348  { Kokkos::deep_copy(m_scalars, 0); }
349  }
350 
351  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
352  return (m_keys.is_allocated() && m_values.is_allocated() &&
353  m_scalars.is_allocated());
354  }
355 
366  bool rehash(size_type requested_capacity = 0) {
367  const bool bounded_insert = (capacity() == 0) || (size() == 0u);
368  return rehash(requested_capacity, bounded_insert);
369  }
370 
371  bool rehash(size_type requested_capacity, bool bounded_insert) {
372  if (!is_insertable_map) return false;
373 
374  const size_type curr_size = size();
375  requested_capacity =
376  (requested_capacity < curr_size) ? curr_size : requested_capacity;
377 
378  insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
379 
380  if (curr_size) {
381  tmp.m_bounded_insert = false;
382  Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
383  f.apply();
384  }
385  tmp.m_bounded_insert = bounded_insert;
386 
387  *this = tmp;
388 
389  return true;
390  }
391 
399  size_type size() const {
400  if (capacity() == 0u) return 0u;
401  if (modified()) {
402  m_size = m_available_indexes.count();
403  reset_flag(modified_idx);
404  }
405  return m_size;
406  }
407 
413  bool failed_insert() const { return get_flag(failed_insert_idx); }
414 
415  bool erasable() const {
416  return is_insertable_map ? get_flag(erasable_idx) : false;
417  }
418 
419  bool begin_erase() {
420  bool result = !erasable();
421  if (is_insertable_map && result) {
422  execution_space().fence();
423  set_flag(erasable_idx);
424  execution_space().fence();
425  }
426  return result;
427  }
428 
429  bool end_erase() {
430  bool result = erasable();
431  if (is_insertable_map && result) {
432  execution_space().fence();
433  Impl::UnorderedMapErase<declared_map_type> f(*this);
434  f.apply();
435  execution_space().fence();
436  reset_flag(erasable_idx);
437  }
438  return result;
439  }
440 
445  KOKKOS_FORCEINLINE_FUNCTION
446  size_type capacity() const { return m_available_indexes.size(); }
447 
458  KOKKOS_INLINE_FUNCTION
459  size_type hash_capacity() const { return m_hash_lists.extent(0); }
460 
461  //---------------------------------------------------------------------------
462  //---------------------------------------------------------------------------
463 
472  KOKKOS_INLINE_FUNCTION
473  insert_result insert(key_type const &k,
474  impl_value_type const &v = impl_value_type()) const {
475  insert_result result;
476 
477  if (!is_insertable_map || capacity() == 0u ||
478  m_scalars((int)erasable_idx)) {
479  return result;
480  }
481 
482  if (!m_scalars((int)modified_idx)) {
483  m_scalars((int)modified_idx) = true;
484  }
485 
486  int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);
487 
488  const size_type hash_value = m_hasher(k);
489  const size_type hash_list = hash_value % m_hash_lists.extent(0);
490 
491  size_type *curr_ptr = &m_hash_lists[hash_list];
492  size_type new_index = invalid_index;
493 
494  // Force integer multiply to long
495  size_type index_hint = static_cast<size_type>(
496  (static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
497 
498  size_type find_attempts = 0;
499 
500  enum : unsigned { bounded_find_attempts = 32u };
501  const size_type max_attempts =
502  (m_bounded_insert &&
503  (bounded_find_attempts < m_available_indexes.max_hint()))
504  ? bounded_find_attempts
505  : m_available_indexes.max_hint();
506 
507  bool not_done = true;
508 
509 #if defined(__MIC__)
510 #pragma noprefetch
511 #endif
512  while (not_done) {
513  // Continue searching the unordered list for this key,
514  // list will only be appended during insert phase.
515  // Need volatile_load as other threads may be appending.
516  size_type curr = volatile_load(curr_ptr);
517 
518  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
519  &m_keys[curr != invalid_index ? curr : 0]);
520 #if defined(__MIC__)
521 #pragma noprefetch
522 #endif
523  while (curr != invalid_index &&
524  !m_equal_to(volatile_load(&m_keys[curr]), k)) {
525  result.increment_list_position();
526  index_hint = curr;
527  curr_ptr = &m_next_index[curr];
528  curr = volatile_load(curr_ptr);
529  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
530  &m_keys[curr != invalid_index ? curr : 0]);
531  }
532 
533  //------------------------------------------------------------
534  // If key already present then return that index.
535  if (curr != invalid_index) {
536  const bool free_existing = new_index != invalid_index;
537  if (free_existing) {
538  // Previously claimed an unused entry that was not inserted.
539  // Release this unused entry immediately.
540  if (!m_available_indexes.reset(new_index)) {
541  KOKKOS_IMPL_DO_NOT_USE_PRINTF("Unable to free existing\n");
542  }
543  }
544 
545  result.set_existing(curr, free_existing);
546  not_done = false;
547  }
548  //------------------------------------------------------------
549  // Key is not currently in the map.
550  // If the thread has claimed an entry try to insert now.
551  else {
552  //------------------------------------------------------------
553  // If have not already claimed an unused entry then do so now.
554  if (new_index == invalid_index) {
555  bool found = false;
556  // use the hash_list as the flag for the search direction
557  Kokkos::tie(found, index_hint) =
558  m_available_indexes.find_any_unset_near(index_hint, hash_list);
559 
560  // found and index and this thread set it
561  if (!found && ++find_attempts >= max_attempts) {
562  failed_insert_ref = true;
563  not_done = false;
564  } else if (m_available_indexes.set(index_hint)) {
565  new_index = index_hint;
566  // Set key and value
567  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
568  m_keys[new_index] = k;
569 
570  if (!is_set) {
571  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
572  m_values[new_index] = v;
573  }
574 
575  // Do not proceed until key and value are updated in global memory
576  memory_fence();
577  }
578  } else if (failed_insert_ref) {
579  not_done = false;
580  }
581 
582  // Attempt to append claimed entry into the list.
583  // Another thread may also be trying to append the same list so protect
584  // with atomic.
585  if (new_index != invalid_index &&
586  curr == atomic_compare_exchange(
587  curr_ptr, static_cast<size_type>(invalid_index),
588  new_index)) {
589  // Succeeded in appending
590  result.set_success(new_index);
591  not_done = false;
592  }
593  }
594  } // while ( not_done )
595 
596  return result;
597  }
598 
599  KOKKOS_INLINE_FUNCTION
600  bool erase(key_type const &k) const {
601  bool result = false;
602 
603  if (is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
604  if (!m_scalars((int)modified_idx)) {
605  m_scalars((int)modified_idx) = true;
606  }
607 
608  size_type index = find(k);
609  if (valid_at(index)) {
610  m_available_indexes.reset(index);
611  result = true;
612  }
613  }
614 
615  return result;
616  }
617 
625  KOKKOS_INLINE_FUNCTION
626  size_type find(const key_type &k) const {
627  size_type curr = 0u < capacity()
628  ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
629  : invalid_index;
630 
631  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
632  while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
633  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
634  &m_keys[curr != invalid_index ? curr : 0]);
635  curr = m_next_index[curr];
636  }
637 
638  return curr;
639  }
640 
645  KOKKOS_INLINE_FUNCTION
646  bool exists(const key_type &k) const { return valid_at(find(k)); }
647 
656  KOKKOS_FORCEINLINE_FUNCTION
657  std::conditional_t<(is_set || has_const_value), impl_value_type,
658  impl_value_type &>
659  value_at(size_type i) const {
660  return m_values[is_set ? 0 : (i < capacity() ? i : capacity())];
661  }
662 
669  KOKKOS_FORCEINLINE_FUNCTION
670  key_type key_at(size_type i) const {
671  return m_keys[i < capacity() ? i : capacity()];
672  }
673 
674  KOKKOS_FORCEINLINE_FUNCTION
675  bool valid_at(size_type i) const { return m_available_indexes.test(i); }
676 
677  template <typename SKey, typename SValue>
678  UnorderedMap(
679  UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src,
680  typename std::enable_if<
681  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
682  SKey, SValue>::value,
683  int>::type = 0)
684  : m_bounded_insert(src.m_bounded_insert),
685  m_hasher(src.m_hasher),
686  m_equal_to(src.m_equal_to),
687  m_size(src.m_size),
688  m_available_indexes(src.m_available_indexes),
689  m_hash_lists(src.m_hash_lists),
690  m_next_index(src.m_next_index),
691  m_keys(src.m_keys),
692  m_values(src.m_values),
693  m_scalars(src.m_scalars) {}
694 
695  template <typename SKey, typename SValue>
696  typename std::enable_if<
697  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
698  SValue>::value,
699  declared_map_type &>::type
700  operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src) {
701  m_bounded_insert = src.m_bounded_insert;
702  m_hasher = src.m_hasher;
703  m_equal_to = src.m_equal_to;
704  m_size = src.m_size;
705  m_available_indexes = src.m_available_indexes;
706  m_hash_lists = src.m_hash_lists;
707  m_next_index = src.m_next_index;
708  m_keys = src.m_keys;
709  m_values = src.m_values;
710  m_scalars = src.m_scalars;
711  return *this;
712  }
713 
714  template <typename SKey, typename SValue, typename SDevice>
715  typename std::enable_if<
716  std::is_same<typename std::remove_const<SKey>::type, key_type>::value &&
717  std::is_same<typename std::remove_const<SValue>::type,
718  value_type>::value>::type
719  create_copy_view(
720  UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
721  if (m_hash_lists.data() != src.m_hash_lists.data()) {
722  insertable_map_type tmp;
723 
724  tmp.m_bounded_insert = src.m_bounded_insert;
725  tmp.m_hasher = src.m_hasher;
726  tmp.m_equal_to = src.m_equal_to;
727  tmp.m_size = src.size();
728  tmp.m_available_indexes = bitset_type(src.capacity());
729  tmp.m_hash_lists = size_type_view(
730  view_alloc(WithoutInitializing, "UnorderedMap hash list"),
731  src.m_hash_lists.extent(0));
732  tmp.m_next_index = size_type_view(
733  view_alloc(WithoutInitializing, "UnorderedMap next index"),
734  src.m_next_index.extent(0));
735  tmp.m_keys =
736  key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
737  src.m_keys.extent(0));
738  tmp.m_values = value_type_view(
739  view_alloc(WithoutInitializing, "UnorderedMap values"),
740  src.m_values.extent(0));
741  tmp.m_scalars = scalars_view("UnorderedMap scalars");
742 
743  Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
744 
745  using raw_deep_copy =
746  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
747  typename SDevice::memory_space>;
748 
749  raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
750  sizeof(size_type) * src.m_hash_lists.extent(0));
751  raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
752  sizeof(size_type) * src.m_next_index.extent(0));
753  raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
754  sizeof(key_type) * src.m_keys.extent(0));
755  if (!is_set) {
756  raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
757  sizeof(impl_value_type) * src.m_values.extent(0));
758  }
759  raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
760  sizeof(int) * num_scalars);
761 
762  *this = tmp;
763  }
764  }
765 
767  private: // private member functions
768  bool modified() const { return get_flag(modified_idx); }
769 
770  void set_flag(int flag) const {
771  using raw_deep_copy =
772  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
774  const int true_ = true;
775  raw_deep_copy(m_scalars.data() + flag, &true_, sizeof(int));
776  }
777 
778  void reset_flag(int flag) const {
779  using raw_deep_copy =
780  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
782  const int false_ = false;
783  raw_deep_copy(m_scalars.data() + flag, &false_, sizeof(int));
784  }
785 
786  bool get_flag(int flag) const {
787  using raw_deep_copy =
788  Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
789  typename device_type::memory_space>;
790  int result = false;
791  raw_deep_copy(&result, m_scalars.data() + flag, sizeof(int));
792  return result;
793  }
794 
795  static uint32_t calculate_capacity(uint32_t capacity_hint) {
796  // increase by 16% and round to nears multiple of 128
797  return capacity_hint
798  ? ((static_cast<uint32_t>(7ull * capacity_hint / 6u) + 127u) /
799  128u) *
800  128u
801  : 128u;
802  }
803 
804  private: // private members
805  bool m_bounded_insert;
806  hasher_type m_hasher;
807  equal_to_type m_equal_to;
808  mutable size_type m_size;
809  bitset_type m_available_indexes;
810  size_type_view m_hash_lists;
811  size_type_view m_next_index;
812  key_type_view m_keys;
813  value_type_view m_values;
814  scalars_view m_scalars;
815 
816  template <typename KKey, typename VValue, typename DDevice, typename HHash,
817  typename EEqualTo>
818  friend class UnorderedMap;
819 
820  template <typename UMap>
821  friend struct Impl::UnorderedMapErase;
822 
823  template <typename UMap>
824  friend struct Impl::UnorderedMapHistogram;
825 
826  template <typename UMap>
827  friend struct Impl::UnorderedMapPrint;
828 };
829 
830 // Specialization of deep_copy for two UnorderedMap objects.
831 template <typename DKey, typename DT, typename DDevice, typename SKey,
832  typename ST, typename SDevice, typename Hasher, typename EqualTo>
833 inline void deep_copy(
834  UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
835  const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
836  dst.create_copy_view(src);
837 }
838 
839 } // namespace Kokkos
840 
841 #endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
bool failed_insert() const
The current number of failed insert() calls.
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
void clear()
Clear all entries in the table.
View to an array of data.
First element of the return value of UnorderedMap::insert().
Memory management for host memory.
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
size_type size() const
The number of entries in the table.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
KOKKOS_FORCEINLINE_FUNCTION std::conditional_t<(is_set||has_const_value), impl_value_type, impl_value_type & > value_at(size_type i) const
Get the value with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
Thread-safe, performance-portable lookup table.
Definition: dummy.cpp:3
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const