CUTLASS
CUDA Templates for Linear Algebra Subroutines and Solvers
device/kernel/tensor_foreach.h
Go to the documentation of this file.
1 /***************************************************************************************************
2  * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  **************************************************************************************************/
25 
26 #pragma once
27 
28 #include "cutlass/cutlass.h"
29 #include "cutlass/coord.h"
30 
31 namespace cutlass {
32 namespace reference {
33 namespace device {
34 namespace kernel {
35 
37 
39 namespace detail {
40 
42 template <typename Func, int Rank, int RankRemaining>
44 
46  __inline__ __device__
47  TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
48 
49  int64_t product = 1;
50 
52  for (int i = Rank - RankRemaining; i < Rank; ++i) {
53  product *= size[i];
54  }
55 
56  coord[Rank - 1 - RankRemaining] = index / product;
57  int64_t remaining = index % product;
58 
59  TensorForEachHelper<Func, Rank, RankRemaining-1>(func, size, coord, remaining);
60  }
61 };
62 
64 template <typename Func, int Rank>
65 struct TensorForEachHelper<Func, Rank, 0> {
66 
68  __inline__ __device__
69  TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
70 
71  coord[Rank - 1] = index;
72 
73  if (coord < size) {
74  func(coord);
75  }
76  }
77 };
78 
79 } // namespace detail
80 
82 
84 template <typename Func, int Rank, typename Params>
85 __global__ void TensorForEach(Coord<Rank> size, Params params = Params()) {
86 
87  Func func(params);
88 
89  int64_t index = threadIdx.x + blockIdx.x * blockDim.x;
90  int64_t max_index = 1;
91 
93  for (int i = 0; i < Rank; ++i) {
94  max_index *= size[i];
95  }
96 
98  while (index < max_index) {
99  Coord<Rank> coord;
100 
101  detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, size, coord, index);
102  index += blockDim.x * gridDim.x;
103  }
104 }
105 
107 
109 template <typename Func, int Rank, typename Params>
110 __global__ void TensorDiagonalForEach(Coord<Rank> size, Params params, int start, int end) {
111 
112  Func func(params);
113 
114  int64_t index = threadIdx.x + blockIdx.x * blockDim.x + start;
115 
116  if (index < end) {
117  Coord<Rank> coord;
118 
120  for (int i = 0; i < Rank; ++i) {
121  coord[i] = index;
122  }
123 
124  func(coord);
125  }
126 }
127 
129 
130 template <typename Element, typename Func>
131 __global__ void BlockForEach(
132  Element *ptr,
133  size_t capacity,
134  typename Func::Params params) {
135 
136  Func func(params);
137 
138  size_t index = threadIdx.x + blockIdx.x * blockDim.x;
139 
140  for (; index < capacity; index += blockDim.x * gridDim.x) {
141  ptr[index] = func();
142  }
143 }
144 
146 
147 } // namespace kernel
148 } // namespace device
149 } // namespace reference
150 } // namespace cutlass
151 
Definition: aligned_buffer.h:35
A Coord is a coordinate of arbitrary rank into a tensor or matrix.
__inline__ __device__ TensorForEachHelper(Func &func, Coord< Rank > const &size, Coord< Rank > &coord, int64_t index)
Constructor for fastest changing rank.
Definition: device/kernel/tensor_foreach.h:69
#define CUTLASS_PRAGMA_UNROLL
Definition: cutlass.h:110
__global__ void BlockForEach(Element *ptr, size_t capacity, typename Func::Params params)
Definition: device/kernel/tensor_foreach.h:131
#define CUTLASS_PRAGMA_NO_UNROLL
Definition: cutlass.h:111
Statically-sized array specifying Coords within a tensor.
Definition: coord.h:43
__inline__ __device__ TensorForEachHelper(Func &func, Coord< Rank > const &size, Coord< Rank > &coord, int64_t index)
Constructor for general rank.
Definition: device/kernel/tensor_foreach.h:47
__global__ void TensorDiagonalForEach(Coord< Rank > size, Params params, int start, int end)
Kernel calls a functor for each element along a tensor&#39;s diagonal.
Definition: device/kernel/tensor_foreach.h:110
__global__ void TensorForEach(Coord< Rank > size, Params params=Params())
Kernel calls a functor for each element in a tensor&#39;s index space.
Definition: device/kernel/tensor_foreach.h:85
Helper to perform for-each operation.
Definition: device/kernel/tensor_foreach.h:43
Basic include for CUTLASS.