cub/grid/grid_queue.cuh

File members: cub/grid/grid_queue.cuh

/******************************************************************************
 * Copyright (c) 2011, Duane Merrill.  All rights reserved.
 * Copyright (c) 2011-2018, NVIDIA CORPORATION.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in the
 *       documentation and/or other materials provided with the distribution.
 *     * Neither the name of the NVIDIA CORPORATION nor the
 *       names of its contributors may be used to endorse or promote products
 *       derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 ******************************************************************************/

#pragma once

#include <cub/config.cuh>

#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
#  pragma GCC system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
#  pragma clang system_header
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
#  pragma system_header
#endif // no system header

#include <cub/util_debug.cuh>

#include <nv/target>

CUB_NAMESPACE_BEGIN

template <typename OffsetT>
class GridQueue
{
private:
  enum
  {
    FILL  = 0,
    DRAIN = 1,
  };

  OffsetT* d_counters;

public:
  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE static size_t AllocationSize()
  {
    return sizeof(OffsetT) * 2;
  }

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE GridQueue()
      : d_counters(nullptr)
  {}

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE GridQueue(void* d_storage)
      : d_counters((OffsetT*) d_storage)
  {}

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t FillAndResetDrain(OffsetT fill_size, cudaStream_t stream = 0)
  {
    cudaError_t result = cudaErrorUnknown;

    NV_IF_TARGET(
      NV_IS_DEVICE,
      ((void) stream; d_counters[FILL] = fill_size; d_counters[DRAIN] = 0; result = cudaSuccess;),
      (OffsetT counters[2]; counters[FILL] = fill_size; counters[DRAIN] = 0;
       result = CubDebug(cudaMemcpyAsync(d_counters, counters, sizeof(OffsetT) * 2, cudaMemcpyHostToDevice, stream));));

    return result;
  }

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t ResetDrain(cudaStream_t stream = 0)
  {
    cudaError_t result = cudaErrorUnknown;

    NV_IF_TARGET(NV_IS_DEVICE,
                 ((void) stream; d_counters[DRAIN] = 0; result = cudaSuccess;),
                 (result = CubDebug(cudaMemsetAsync(d_counters + DRAIN, 0, sizeof(OffsetT), stream));));

    return result;
  }

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t ResetFill(cudaStream_t stream = 0)
  {
    cudaError_t result = cudaErrorUnknown;

    NV_IF_TARGET(NV_IS_DEVICE,
                 ((void) stream; d_counters[FILL] = 0; result = cudaSuccess;),
                 (result = CubDebug(cudaMemsetAsync(d_counters + FILL, 0, sizeof(OffsetT), stream));));

    return result;
  }

  _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t FillSize(OffsetT& fill_size, cudaStream_t stream = 0)
  {
    cudaError_t result = cudaErrorUnknown;

    NV_IF_TARGET(NV_IS_DEVICE,
                 ((void) stream; fill_size = d_counters[FILL]; result = cudaSuccess;),
                 (result = CubDebug(
                    cudaMemcpyAsync(&fill_size, d_counters + FILL, sizeof(OffsetT), cudaMemcpyDeviceToHost, stream));));

    return result;
  }

  _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT Drain(OffsetT num_items)
  {
    return atomicAdd(d_counters + DRAIN, num_items);
  }

  _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT Fill(OffsetT num_items)
  {
    return atomicAdd(d_counters + FILL, num_items);
  }
};

#ifndef _CCCL_DOXYGEN_INVOKED // Do not document

template <typename OffsetT>
__global__ void FillAndResetDrainKernel(GridQueue<OffsetT> grid_queue, OffsetT num_items)
{
  grid_queue.FillAndResetDrain(num_items);
}

#endif // _CCCL_DOXYGEN_INVOKED

CUB_NAMESPACE_END