cub/device/device_segmented_radix_sort.cuh
File members: cub/device/device_segmented_radix_sort.cuh
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <cub/config.cuh>
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
# pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
# pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
# pragma system_header
#endif // no system header
#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_radix_sort.cuh>
#include <cub/util_deprecated.cuh>
#include <iterator>
#include <stdio.h>
CUB_NAMESPACE_BEGIN
struct DeviceSegmentedRadixSort
{
private:
// Name reported for NVTX ranges
_CCCL_HOST_DEVICE static constexpr auto GetName() -> const char*
{
return "cub::DeviceSegmentedRadixSort";
}
public:
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortPairs(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
const ValueT* d_values_in,
ValueT* d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out);
DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out);
return DispatchSegmentedRadixSort<false, KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
false,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
const ValueT* d_values_in,
ValueT* d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortPairs<KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
d_values_in,
d_values_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortPairs(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
DoubleBuffer<ValueT>& d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
return DispatchSegmentedRadixSort<false, KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
true,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairs(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
DoubleBuffer<ValueT>& d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortPairs<KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
const ValueT* d_values_in,
ValueT* d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out);
DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out);
return DispatchSegmentedRadixSort<true, KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
false,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
const ValueT* d_values_in,
ValueT* d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortPairsDescending<KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
d_values_in,
d_values_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
DoubleBuffer<ValueT>& d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
return DispatchSegmentedRadixSort<true, KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
true,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename ValueT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
DoubleBuffer<ValueT>& d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortPairsDescending<KeyT, ValueT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortKeys(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
// Null value type
DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out);
DoubleBuffer<NullType> d_values;
return DispatchSegmentedRadixSort<false, KeyT, NullType, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
false,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortKeys<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortKeys(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
// Null value type
DoubleBuffer<NullType> d_values;
return DispatchSegmentedRadixSort<false, KeyT, NullType, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
true,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeys(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortKeys<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out);
DoubleBuffer<NullType> d_values;
return DispatchSegmentedRadixSort<true, KeyT, NullType, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
false,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
const KeyT* d_keys_in,
KeyT* d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortKeysDescending<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit = 0,
int end_bit = sizeof(KeyT) * 8,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, GetName());
// Signed integer type for global offsets
using OffsetT = int;
// Null value type
DoubleBuffer<NullType> d_values;
return DispatchSegmentedRadixSort<true, KeyT, NullType, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
true,
stream);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename KeyT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending(
void* d_temp_storage,
size_t& temp_storage_bytes,
DoubleBuffer<KeyT>& d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
int begin_bit,
int end_bit,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return SortKeysDescending<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
begin_bit,
end_bit,
stream);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
};
CUB_NAMESPACE_END