CUTLASS
CUDA Templates for Linear Algebra Subroutines and Solvers
gemm/thread/mma_sm61.h
Go to the documentation of this file.
1 /***************************************************************************************************
2  * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  **************************************************************************************************/
29 #pragma once
30 
31 #include "cutlass/cutlass.h"
32 #include "cutlass/tensor_ref.h"
33 #include "cutlass/layout/matrix.h"
34 #include "cutlass/gemm/gemm.h"
36 
38 
39 namespace cutlass {
40 namespace gemm {
41 namespace thread {
42 
44 
46 template <
48  typename Shape_,
50  typename LayoutC_
51 >
52 struct Mma<
53  Shape_,
54  int8_t,
55  layout::RowMajor,
56  int8_t,
58  int32_t,
59  LayoutC_,
60  arch::OpMultiplyAdd,
61  bool> {
62 
64  using Shape = Shape_;
65 
67  using ElementA = int8_t;
68 
71 
73  using ElementB = int8_t;
74 
77 
79  using ElementC = int32_t;
80 
82  using LayoutC = LayoutC_;
83 
85  using Operator = arch::OpMultiplyAdd;
86 
88  using FragmentA = Array<ElementA, Shape::kMK>;
89 
91  using FragmentB = Array<ElementB, Shape::kKN>;
92 
94  using FragmentC = Array<ElementC, Shape::kMN>;
95 
96  //
97  // Methods
98  //
99 
103  FragmentC & D,
104  FragmentA const & A,
105  FragmentB const & B,
106  FragmentC const & C) {
107 
109  reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
110 
111  // Copy accumulators
112  D = C;
113 
115  using Mma = arch::Mma<
117  1,
118  ElementA,
119  LayoutA,
120  ElementB,
121  LayoutB,
122  ElementC,
123  LayoutC,
124  arch::OpMultiplyAdd>;
125 
126  Mma mma;
127 
128  // Compute matrix product
130  for (int k = 0; k < Shape::kK / Mma::Shape::kK; ++k) {
131 
133  for (int n = 0; n < Shape::kN; ++n) {
134 
136  for (int m = 0; m < Shape::kM; ++m) {
137  MatrixCoord mn(m, n);
138 
139  Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
140  Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
141 
142  Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
143 
144  mma(
145  tmp,
146  ptr_A[m * Shape::kK / Mma::Shape::kK + k],
147  ptr_B[n * Shape::kK / Mma::Shape::kK + k],
148  tmp);
149 
150  d.at(mn) = reinterpret_cast<int32_t &>(tmp);
151  }
152  }
153  }
154  }
155 };
156 
159 template <
161  typename Shape_,
163  typename LayoutC_
164 >
165 struct Mma<
166  Shape_,
167  int8_t,
168  layout::ColumnMajor,
169  int8_t,
171  int32_t,
172  LayoutC_,
173  arch::OpMultiplyAdd,
174  int8_t> {
175 
177  using Shape = Shape_;
178 
180  using ElementA = int8_t;
181 
184 
186  using ElementB = int8_t;
187 
190 
192  using ElementC = int32_t;
193 
195  using LayoutC = LayoutC_;
196 
198  using Operator = arch::OpMultiplyAdd;
199 
201  using FragmentA = Array<ElementA, Shape::kMK>;
202 
204  using FragmentB = Array<ElementB, Shape::kKN>;
205 
207  using FragmentC = Array<ElementC, Shape::kMN>;
208 
209  //
210  // Methods
211  //
212 
216  FragmentC & D,
217  FragmentA const & A,
218  FragmentB const & B,
219  FragmentC const & C) {
220 
222  reinterpret_cast<ElementC *>(&D), LayoutC::packed({ Shape::kM, Shape::kN }));
223 
224  // Copy accumulators
225  D = C;
226 
228  using Mma = arch::Mma<
230  1,
231  ElementA,
232  LayoutA,
233  ElementB,
234  LayoutB,
235  ElementC,
236  LayoutC,
237  arch::OpMultiplyAdd>;
238 
239  Mma mma;
240  Array<int8_t, 4> const *ptr_A = reinterpret_cast<Array<int8_t, 4> const *>(&A);
241  Array<int8_t, 4> const *ptr_B = reinterpret_cast<Array<int8_t, 4> const *>(&B);
242 
243  // Compute matrix product
245  for (int k = 0; k < Shape::kK / Mma::Shape::kK; ++k) {
246 
248  for (int n = 0; n < Shape::kN; ++n) {
249 
251  for (int m = 0; m < Shape::kM; ++m) {
252  MatrixCoord mn(m, n);
253 
254  Array<int32_t, 1> tmp = reinterpret_cast<Array<int32_t, 1> &>(d.at(mn));
255 
256  mma(
257  tmp,
258  ptr_A[m + k * Shape::kM],
259  ptr_B[n + k * Shape::kN],
260  tmp);
261 
262  d.at(mn) = reinterpret_cast<int32_t &>(tmp);
263  }
264  }
265  }
266  }
267 };
268 
269 } // namespace thread
270 } // namespace gemm
271 } // namespace cutlass
272 
Definition: aligned_buffer.h:35
Defines a structure containing strides, bounds, and a pointer to tensor data.
CUTLASS_HOST_DEVICE void operator()(FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C)
Computes a matrix product D = A * B + C.
Definition: gemm/thread/mma_sm61.h:102
Shape_ Shape
Size of the Gemm problem - concept: gemm::GemmShape<>
Definition: gemm/thread/mma_sm61.h:64
CUTLASS_HOST_DEVICE void operator()(FragmentC &D, FragmentA const &A, FragmentB const &B, FragmentC const &C)
Computes a matrix product D = A * B + C.
Definition: gemm/thread/mma_sm61.h:215
arch::OpMultiplyAdd Operator
Underlying mathematical operator.
Definition: gemm/thread/mma_sm61.h:85
Defines common types used for all GEMM-like operators.
arch::OpMultiplyAdd Operator
Underlying mathematical operator.
Definition: gemm/thread/mma_sm61.h:198
Mapping function for column-major matrices.
Definition: layout/matrix.h:142
#define CUTLASS_PRAGMA_UNROLL
Definition: cutlass.h:110
#define CUTLASS_HOST_DEVICE
Definition: cutlass.h:89
Templates exposing architecture support for warp-level multiply-add operations.
Shape of a matrix multiply-add operation.
Definition: include/cutlass/gemm/gemm.h:57
Mapping function for row-major matrices.
Definition: layout/matrix.h:50
LayoutC_ LayoutC
Layout of C matrix (concept: layout::MapFunc)
Definition: gemm/thread/mma_sm61.h:195
CUTLASS_HOST_DEVICE Reference at(TensorCoord const &coord) const
Returns a reference to the element at a given Coord.
Definition: tensor_ref.h:307
Structure to compute the matrix product.
Definition: gemm/thread/mma.h:66
LayoutC_ LayoutC
Layout of C matrix (concept: layout::MapFunc)
Definition: gemm/thread/mma_sm61.h:82
Defines layout functions used by TensorRef and derived classes.
Matrix multiply-add operation.
Definition: arch/mma.h:92
Basic include for CUTLASS.
Definition: matrix_coord.h:39
Shape_ Shape
Size of the Gemm problem - concept: gemm::GemmShape<>
Definition: gemm/thread/mma_sm61.h:177