cub/device/device_run_length_encode.cuh

File members: cub/device/device_run_length_encode.cuh

/******************************************************************************
 * Copyright (c) 2011, Duane Merrill.  All rights reserved.
 * Copyright (c) 2011-2022, NVIDIA CORPORATION.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in the
 *       documentation and/or other materials provided with the distribution.
 *     * Neither the name of the NVIDIA CORPORATION nor the
 *       names of its contributors may be used to endorse or promote products
 *       derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 ******************************************************************************/

#pragma once

#include <cub/config.cuh>

#include <cuda/std/__functional/invoke.h>

#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
#  pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
#  pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
#  pragma system_header
#endif // no system header

#include <cub/detail/nvtx.cuh>
#include <cub/device/dispatch/dispatch_reduce_by_key.cuh>
#include <cub/device/dispatch/dispatch_rle.cuh>
#include <cub/device/dispatch/tuning/tuning_run_length_encode.cuh>
#include <cub/util_deprecated.cuh>

#include <iterator>

#include <stdio.h>

CUB_NAMESPACE_BEGIN

struct DeviceRunLengthEncode
{
  template <typename InputIteratorT,
            typename UniqueOutputIteratorT,
            typename LengthsOutputIteratorT,
            typename NumRunsOutputIteratorT>
  CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t Encode(
    void* d_temp_storage,
    size_t& temp_storage_bytes,
    InputIteratorT d_in,
    UniqueOutputIteratorT d_unique_out,
    LengthsOutputIteratorT d_counts_out,
    NumRunsOutputIteratorT d_num_runs_out,
    int num_items,
    cudaStream_t stream = 0)
  {
    CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceRunLengthEncode::Encode");

    using offset_t     = int; // Signed integer type for global offsets
    using equality_op  = ::cuda::std::equal_to<>; // Default == operator
    using reduction_op = ::cuda::std::plus<>; // Value reduction operator

    // The lengths output value type
    using length_t = cub::detail::non_void_value_t<LengthsOutputIteratorT, offset_t>;

    // Generator type for providing 1s values for run-length reduction
    using lengths_input_iterator_t = ConstantInputIterator<length_t, offset_t>;

    using accum_t = ::cuda::std::__accumulator_t<reduction_op, length_t, length_t>;

    using key_t = cub::detail::non_void_value_t<UniqueOutputIteratorT, cub::detail::value_t<InputIteratorT>>;

    using policy_t = detail::device_run_length_encode_policy_hub<accum_t, key_t>;

    return DispatchReduceByKey<
      InputIteratorT,
      UniqueOutputIteratorT,
      lengths_input_iterator_t,
      LengthsOutputIteratorT,
      NumRunsOutputIteratorT,
      equality_op,
      reduction_op,
      offset_t,
      accum_t,
      policy_t>::Dispatch(d_temp_storage,
                          temp_storage_bytes,
                          d_in,
                          d_unique_out,
                          lengths_input_iterator_t((length_t) 1),
                          d_counts_out,
                          d_num_runs_out,
                          equality_op(),
                          reduction_op(),
                          num_items,
                          stream);
  }

#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
  template <typename InputIteratorT,
            typename UniqueOutputIteratorT,
            typename LengthsOutputIteratorT,
            typename NumRunsOutputIteratorT>
  CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t Encode(
    void* d_temp_storage,
    size_t& temp_storage_bytes,
    InputIteratorT d_in,
    UniqueOutputIteratorT d_unique_out,
    LengthsOutputIteratorT d_counts_out,
    NumRunsOutputIteratorT d_num_runs_out,
    int num_items,
    cudaStream_t stream,
    bool debug_synchronous)
  {
    CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG

    return Encode<InputIteratorT, UniqueOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT>(
      d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream);
  }
#endif // DOXYGEN_SHOULD_SKIP_THIS

  template <typename InputIteratorT,
            typename OffsetsOutputIteratorT,
            typename LengthsOutputIteratorT,
            typename NumRunsOutputIteratorT>
  CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t NonTrivialRuns(
    void* d_temp_storage,
    size_t& temp_storage_bytes,
    InputIteratorT d_in,
    OffsetsOutputIteratorT d_offsets_out,
    LengthsOutputIteratorT d_lengths_out,
    NumRunsOutputIteratorT d_num_runs_out,
    int num_items,
    cudaStream_t stream = 0)
  {
    CUB_DETAIL_NVTX_RANGE_SCOPE_IF(d_temp_storage, "cub::DeviceRunLengthEncode::NonTrivialRuns");

    using OffsetT    = int; // Signed integer type for global offsets
    using EqualityOp = ::cuda::std::equal_to<>; // Default == operator

    return DeviceRleDispatch<
      InputIteratorT,
      OffsetsOutputIteratorT,
      LengthsOutputIteratorT,
      NumRunsOutputIteratorT,
      EqualityOp,
      OffsetT>::Dispatch(d_temp_storage,
                         temp_storage_bytes,
                         d_in,
                         d_offsets_out,
                         d_lengths_out,
                         d_num_runs_out,
                         EqualityOp(),
                         num_items,
                         stream);
  }

#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
  template <typename InputIteratorT,
            typename OffsetsOutputIteratorT,
            typename LengthsOutputIteratorT,
            typename NumRunsOutputIteratorT>
  CUB_DETAIL_RUNTIME_DEBUG_SYNC_IS_NOT_SUPPORTED CUB_RUNTIME_FUNCTION _CCCL_FORCEINLINE static cudaError_t
  NonTrivialRuns(
    void* d_temp_storage,
    size_t& temp_storage_bytes,
    InputIteratorT d_in,
    OffsetsOutputIteratorT d_offsets_out,
    LengthsOutputIteratorT d_lengths_out,
    NumRunsOutputIteratorT d_num_runs_out,
    int num_items,
    cudaStream_t stream,
    bool debug_synchronous)
  {
    CUB_DETAIL_RUNTIME_DEBUG_SYNC_USAGE_LOG

    return NonTrivialRuns<InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT>(
      d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items, stream);
  }
#endif // DOXYGEN_SHOULD_SKIP_THIS
};

CUB_NAMESPACE_END