cub/device/device_segmented_reduce.cuh
File members: cub/device/device_segmented_reduce.cuh
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
#include <cub/config.cuh>
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
# pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
# pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
# pragma system_header
#endif // no system header
#include <cub/detail/choose_offset.cuh>
#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_reduce.cuh>
#include <cub/device/dispatch/dispatch_reduce_by_key.cuh>
#include <cub/iterator/arg_index_input_iterator.cuh>
#include <cub/util_deprecated.cuh>
#include <cub/util_type.cuh>
#include <cuda/std/type_traits>
#include <iterator>
CUB_NAMESPACE_BEGIN
struct DeviceSegmentedReduce
{
private:
template <typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename OffsetT,
typename ReductionOpT,
typename InitT,
typename... Ts>
CUB_RUNTIME_FUNCTION static cudaError_t segmented_reduce(
::cuda::std::false_type,
void* d_temp_storage,
std::size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
ReductionOpT reduction_op,
InitT initial_value,
cudaStream_t stream);
template <typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename OffsetT,
typename ReductionOpT,
typename InitT,
typename... Ts>
CUB_RUNTIME_FUNCTION static cudaError_t segmented_reduce(
::cuda::std::true_type,
void* d_temp_storage,
std::size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
ReductionOpT reduction_op,
InitT initial_value,
cudaStream_t stream)
{
return DispatchSegmentedReduce<
InputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
OffsetT,
ReductionOpT,
Ts...>::Dispatch(d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
reduction_op,
initial_value,
stream);
}
public:
template <typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT,
typename T>
CUB_RUNTIME_FUNCTION static cudaError_t Reduce(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
ReductionOpT reduction_op,
T initial_value,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Reduce");
// Integer type for global offsets
using OffsetT = detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT, ReductionOpT>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
reduction_op,
initial_value, // zero-initialize
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT,
typename T>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t Reduce(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
ReductionOpT reduction_op,
T initial_value,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return Reduce<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT, ReductionOpT, T>(
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
reduction_op,
initial_value,
stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
Sum(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Sum");
// Integer type for global offsets
using OffsetT = detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
// The output value type
using OutputT = cub::detail::non_void_value_t<OutputIteratorT, cub::detail::value_t<InputIteratorT>>;
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<InputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
OffsetT,
::cuda::std::plus<>>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
::cuda::std::plus<>{},
OutputT(), // zero-initialize
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t
Sum(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return Sum<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
Min(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Min");
// Integer type for global offsets
using OffsetT = detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
// The input value type
using InputT = cub::detail::value_t<InputIteratorT>;
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<InputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
OffsetT,
::cuda::minimum<>>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
::cuda::minimum<>{},
Traits<InputT>::Max(), // replace with
// std::numeric_limits<T>::max()
// when C++11 support is
// more prevalent
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t
Min(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return Min<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t ArgMin(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::ArgMin");
// Integer type for global offsets
// Using common iterator value type is a breaking change, see:
// https://github.com/NVIDIA/cccl/pull/414#discussion_r1330632615
using OffsetT = int; // detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
// The input type
using InputValueT = cub::detail::value_t<InputIteratorT>;
// The output tuple type
using OutputTupleT = cub::detail::non_void_value_t<OutputIteratorT, KeyValuePair<OffsetT, InputValueT>>;
// The output value type
using OutputValueT = typename OutputTupleT::Value;
using AccumT = OutputTupleT;
using InitT = detail::reduce::empty_problem_init_t<AccumT>;
// Wrapped input iterator to produce index-value <OffsetT, InputT> tuples
using ArgIndexInputIteratorT = ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT>;
ArgIndexInputIteratorT d_indexed_in(d_in);
// Initial value
// TODO Address https://github.com/NVIDIA/cub/issues/651
InitT initial_value{AccumT(1, Traits<InputValueT>::Max())};
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<ArgIndexInputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
OffsetT,
cub::ArgMin,
InitT,
AccumT>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_indexed_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
cub::ArgMin(),
initial_value,
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t ArgMin(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return ArgMin<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
Max(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::Max");
// Integer type for global offsets
using OffsetT = detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
// The input value type
using InputT = cub::detail::value_t<InputIteratorT>;
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT, OffsetT>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
::cuda::maximum<>{},
Traits<InputT>::Lowest(), // replace with
// std::numeric_limits<T>::lowest()
// when C++11 support is
// more prevalent
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t
Max(void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return Max<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t ArgMax(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0)
{
CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceSegmentedReduce::ArgMax");
// Integer type for global offsets
// Using common iterator value type is a breaking change, see:
// https://github.com/NVIDIA/cccl/pull/414#discussion_r1330632615
using OffsetT = int; // detail::common_iterator_value_t<BeginOffsetIteratorT, EndOffsetIteratorT>;
// The input type
using InputValueT = cub::detail::value_t<InputIteratorT>;
// The output tuple type
using OutputTupleT = cub::detail::non_void_value_t<OutputIteratorT, KeyValuePair<OffsetT, InputValueT>>;
using AccumT = OutputTupleT;
using InitT = detail::reduce::empty_problem_init_t<AccumT>;
// The output value type
using OutputValueT = typename OutputTupleT::Value;
// Wrapped input iterator to produce index-value <OffsetT, InputT> tuples
using ArgIndexInputIteratorT = ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT>;
ArgIndexInputIteratorT d_indexed_in(d_in);
// Initial value
// TODO Address https://github.com/NVIDIA/cub/issues/651
InitT initial_value{AccumT(1, Traits<InputValueT>::Lowest())};
using integral_offset_check = ::cuda::std::is_integral<OffsetT>;
static_assert(integral_offset_check::value, "Offset iterator value type should be integral.");
return segmented_reduce<ArgIndexInputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
OffsetT,
cub::ArgMax,
InitT,
AccumT>(
integral_offset_check{},
d_temp_storage,
temp_storage_bytes,
d_indexed_in,
d_out,
num_segments,
d_begin_offsets,
d_end_offsets,
cub::ArgMax(),
initial_value,
stream);
}
#ifndef _CCCL_DOXYGEN_INVOKED // Do not document
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION static cudaError_t ArgMax(
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream,
bool debug_synchronous)
{
CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG
return ArgMax<InputIteratorT, OutputIteratorT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, stream);
}
#endif // _CCCL_DOXYGEN_INVOKED
};
CUB_NAMESPACE_END