-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathradix_sort_rank.hpp
112 lines (97 loc) · 3.6 KB
/
radix_sort_rank.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/*
WORK IN PROGRESS: C++ implementation of a 8xW-bit rank radix sort
WARNING: DO NOT USE IN PRODUCTION CODE. #lol
See https://github.com/eloj/radix-sorting#by-rank
PERF: This is _much_ slower than the non-ranked implementation!
*/
#pragma once
#include <array>
#include <type_traits>
#include <cinttypes>
#include <cstring> // for std::memcpy
#include "radix_sort_basic_kdf.hpp"
#ifndef RESTRICT
#define RESTRICT __restrict__
#endif
template<typename T, typename KeyFunc = decltype(basic_kdfs::kdf<T>), typename Hist, typename IdxType = size_t, typename KeyType=typename std::result_of_t<KeyFunc&&(T)>>
IdxType* rs_sort_rank(const T* RESTRICT src, IdxType* RESTRICT index_buffer, size_t n, Hist& histogram, KeyFunc && kf = basic_kdfs::kdf) {
typedef typename Hist::value_type HVT;
static_assert(sizeof(KeyType) <= 8, "KeyType must be 64-bits or less");
static_assert(std::is_unsigned<KeyType>(), "KeyType must be unsigned");
if (n < 2) {
if (n != 0)
index_buffer[0] = 0;
return index_buffer;
}
constexpr size_t wc = sizeof(KeyType);
constexpr std::array<uint8_t, 8> shift_table = { 0, 8, 16, 24, 32, 40, 48, 56 };
constexpr unsigned int hist_len = 256;
unsigned int cols[wc];
unsigned int ncols = 0;
KeyType key0;
// Histograms
size_t n_unsorted = n;
for (size_t i = 0 ; i < n ; ++i) {
// pre-sorted detection
key0 = kf(src[i]);
if ((i < n - 1) && (key0 <= kf(src[i+1]))) {
--n_unsorted;
}
for (unsigned int j = 0 ; j < wc ; ++j) {
++histogram[(hist_len*j) + ((key0 >> shift_table[j]) & 0xFF)];
}
index_buffer[i] = i;
}
if (n_unsorted < 2) {
return index_buffer;
}
// Sample first key to determine if any columns can be skipped
key0 = kf(*src);
for (unsigned int i = 0 ; i < wc ; ++i) {
if (histogram[(hist_len*i) + ((key0 >> shift_table[i]) & 0xFF)] != n) {
cols[ncols++] = i;
}
}
// Calculate offsets (exclusive scan)
for (unsigned int i = 0 ; i < ncols ; ++i) {
HVT a = 0;
for (unsigned int j = 0 ; j < hist_len ; ++j) {
HVT b = histogram[(hist_len*cols[i]) + j];
histogram[(hist_len*cols[i]) + j] = a;
a += b;
}
}
auto index_buffer_src = index_buffer;
auto index_buffer_dst = index_buffer + n;
// Sort
for (unsigned int i = 0 ; i < ncols ; ++i) {
for (size_t j = 0 ; j < n ; ++j) {
auto k = src[j];
size_t dst = histogram[(hist_len*cols[i]) + ((kf(k) >> shift_table[cols[i]]) & 0xFF)]++;
// PERF: This incurs an extra memory read compared to the non-ranked version. Getting around
// this would require us to rewrite the input such that the key and the index share a cache-line.
index_buffer_dst[dst] = index_buffer_src[j];
}
std::swap(index_buffer_src, index_buffer_dst);
}
return index_buffer_src;
}
// This version is for automatically selecting the smallest
// possible counter data-type for the histograms.
// Histograms stored on stack (2KiB-16KiB).
template<typename T, typename IdxType, typename KeyFunc = decltype(basic_kdfs::kdf<T>), int passes = sizeof(typename std::result_of_t<KeyFunc&&(T)>)>
IdxType* radix_sort_rank(const T* RESTRICT src, IdxType* RESTRICT index_buffer, size_t n, KeyFunc && kf = basic_kdfs::kdf) {
if (n < 256) {
std::array<uint8_t,256*passes> histogram{0};
return rs_sort_rank(src, index_buffer, n, histogram, kf);
} else if (n < (1ULL << 16ULL)) {
std::array<uint16_t,256*passes> histogram{0};
return rs_sort_rank(src, index_buffer, n, histogram, kf);
} else if (n < (1ULL << 32ULL)) {
std::array<uint32_t,256*passes> histogram{0};
return rs_sort_rank(src, index_buffer, n, histogram, kf);
} else {
std::array<uint64_t,256*passes> histogram{0};
return rs_sort_rank(src, index_buffer, n, histogram, kf);
}
}