/home/runner/work/cccl/cccl/cub/cub/warp/warp_reduce.cuh

File members: /home/runner/work/cccl/cccl/cub/cub/warp/warp_reduce.cuh

/******************************************************************************
 * Copyright (c) 2011, Duane Merrill.  All rights reserved.
 * Copyright (c) 2011-2018, NVIDIA CORPORATION.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in the
 *       documentation and/or other materials provided with the distribution.
 *     * Neither the name of the NVIDIA CORPORATION nor the
 *       names of its contributors may be used to endorse or promote products
 *       derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 ******************************************************************************/

#pragma once

#include <cub/config.cuh>

#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
#  pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
#  pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
#  pragma system_header
#endif // no system header

#include <cub/thread/thread_operators.cuh>
#include <cub/util_type.cuh>
#include <cub/warp/specializations/warp_reduce_shfl.cuh>
#include <cub/warp/specializations/warp_reduce_smem.cuh>

CUB_NAMESPACE_BEGIN

template <typename T, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int LEGACY_PTX_ARCH = 0>
class WarpReduce
{
private:
  /******************************************************************************
   * Constants and type definitions
   ******************************************************************************/

  enum
  {
    IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0)),

    IS_POW_OF_TWO = PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE,
  };

public:
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document

  using InternalWarpReduce = cub::detail::
    conditional_t<IS_POW_OF_TWO, WarpReduceShfl<T, LOGICAL_WARP_THREADS>, WarpReduceSmem<T, LOGICAL_WARP_THREADS>>;

#endif // DOXYGEN_SHOULD_SKIP_THIS

private:
  using _TempStorage = typename InternalWarpReduce::TempStorage;

  /******************************************************************************
   * Thread fields
   ******************************************************************************/

  _TempStorage& temp_storage;

  /******************************************************************************
   * Utility methods
   ******************************************************************************/

public:
  struct TempStorage : Uninitialized<_TempStorage>
  {};

  _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduce(TempStorage& temp_storage)
      : temp_storage(temp_storage.Alias())
  {}

  _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input)
  {
    return InternalWarpReduce(temp_storage).template Reduce<true>(input, LOGICAL_WARP_THREADS, cub::Sum());
  }

  _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int valid_items)
  {
    // Determine if we don't need bounds checking
    return InternalWarpReduce(temp_storage).template Reduce<false>(input, valid_items, cub::Sum());
  }

  template <typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedSum(T input, FlagT head_flag)
  {
    return HeadSegmentedReduce(input, head_flag, cub::Sum());
  }

  template <typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedSum(T input, FlagT tail_flag)
  {
    return TailSegmentedReduce(input, tail_flag, cub::Sum());
  }

  template <typename ReductionOp>
  _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op)
  {
    return InternalWarpReduce(temp_storage).template Reduce<true>(input, LOGICAL_WARP_THREADS, reduction_op);
  }

  template <typename ReductionOp>
  _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op, int valid_items)
  {
    return InternalWarpReduce(temp_storage).template Reduce<false>(input, valid_items, reduction_op);
  }

  template <typename ReductionOp, typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedReduce(T input, FlagT head_flag, ReductionOp reduction_op)
  {
    return InternalWarpReduce(temp_storage).template SegmentedReduce<true>(input, head_flag, reduction_op);
  }

  template <typename ReductionOp, typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedReduce(T input, FlagT tail_flag, ReductionOp reduction_op)
  {
    return InternalWarpReduce(temp_storage).template SegmentedReduce<false>(input, tail_flag, reduction_op);
  }

};

#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename T, int LEGACY_PTX_ARCH>
class WarpReduce<T, 1, LEGACY_PTX_ARCH>
{
private:
  using _TempStorage = cub::NullType;

public:
  struct InternalWarpReduce
  {
    struct TempStorage : Uninitialized<_TempStorage>
    {};

    _CCCL_DEVICE _CCCL_FORCEINLINE InternalWarpReduce(TempStorage& /*temp_storage */) {}

    template <bool ALL_LANES_VALID, typename ReductionOp>
    _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, int /* valid_items */, ReductionOp /* reduction_op */)
    {
      return input;
    }

    template <bool HEAD_SEGMENTED, typename FlagT, typename ReductionOp>
    _CCCL_DEVICE _CCCL_FORCEINLINE T SegmentedReduce(T input, FlagT /* flag */, ReductionOp /* reduction_op */)
    {
      return input;
    }
  };

  using TempStorage = typename InternalWarpReduce::TempStorage;

  _CCCL_DEVICE _CCCL_FORCEINLINE WarpReduce(TempStorage& /*temp_storage */) {}

  _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input)
  {
    return input;
  }

  _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int /* valid_items */)
  {
    return input;
  }

  template <typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedSum(T input, FlagT /* head_flag */)
  {
    return input;
  }

  template <typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedSum(T input, FlagT /* tail_flag */)
  {
    return input;
  }

  template <typename ReductionOp>
  _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp /* reduction_op */)
  {
    return input;
  }

  template <typename ReductionOp>
  _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp /* reduction_op */, int /* valid_items */)
  {
    return input;
  }

  template <typename ReductionOp, typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T HeadSegmentedReduce(T input, FlagT /* head_flag */, ReductionOp /* reduction_op */)
  {
    return input;
  }

  template <typename ReductionOp, typename FlagT>
  _CCCL_DEVICE _CCCL_FORCEINLINE T TailSegmentedReduce(T input, FlagT /* tail_flag */, ReductionOp /* reduction_op */)
  {
    return input;
  }
};
#endif // DOXYGEN_SHOULD_SKIP_THIS

CUB_NAMESPACE_END