CUTLASS
CUDA Templates for Linear Algebra Subroutines and Solvers
fragment_iterator_wmma_tensor_op.h
Go to the documentation of this file.
1 /***************************************************************************************************
2  * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  **************************************************************************************************/
38 #pragma once
39 
40 #if !defined(__clang__)
41 
42 #include "cutlass/wmma_array.h"
43 #include "cutlass/layout/matrix.h"
44 
46 
48 
49 namespace cutlass {
50 namespace epilogue {
51 namespace warp {
52 
54 
56 template <
57  typename WarpShape,
58  typename OperatorShape,
59  typename OperatorElementC,
60  typename OperatorFragmentC,
61  typename Layout
62 >
64 
66 
68 template <
69  typename WarpShape_,
70  typename OperatorShape_,
71  typename OperatorElementC_,
72  typename OperatorFragmentC_
73 >
74 class FragmentIteratorWmmaTensorOp<WarpShape_, OperatorShape_, OperatorElementC_, OperatorFragmentC_, layout::RowMajor> {
75 public:
76 
77  using WarpShape = WarpShape_;
78  using OperatorShape = OperatorShape_;
79  using OperatorElementC = OperatorElementC_;
80  using OperatorFragmentC = OperatorFragmentC_;
82 
83  using Policy = WmmaTensorOpPolicy<WarpShape, OperatorShape, Layout>;
84 
86  using Fragment = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kColumn>;
87 
89  using AccumulatorTile = WmmaFragmentArray<OperatorFragmentC, Policy::OperatorCount::kCount>;
90 
92 
93 private:
94 
96  using AccessType = WmmaFragmentArray<OperatorFragmentC, Policy::kWmmaFragmentsPerAccess>;
97 
98 private:
99 
100  //
101  // Data members
102  //
103 
105  AccessType const *accumulators_;
106 
108  int index_;
109 
110 public:
111 
115  accumulators_(reinterpret_cast<AccessType const *>(&accum)),
116  index_(0) {
117  }
118 
122  ++index_;
123  return *this;
124  }
125 
129  --index_;
130  return *this;
131  }
132 
135  void load(Fragment &frag, int index_offset = 0) const {
136  AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag);
137 
139  for(int n=0; n < Policy::OperatorCount::kColumn; n++) {
140 
141  int accumulator_access_offset = index_ * Policy::OperatorCount::kColumn + n;
142 
143  frag_ptr[n] = accumulators_[accumulator_access_offset];
144  }
145  }
146 };
147 
148 
149 } // namespace warp
150 } // namespace epilogue
151 } // namespace cutlass
152 
154 
155 #endif // !defined(__clang__)
156 
WmmaFragmentArray< OperatorFragmentC, Policy::OperatorCount::kColumn > Fragment
This is the fragment size produced by one access of the iterator.
Definition: fragment_iterator_wmma_tensor_op.h:86
CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp & operator++()
Increments.
Definition: fragment_iterator_wmma_tensor_op.h:121
Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe ...
Definition: aligned_buffer.h:35
CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp(AccumulatorTile const &accum)
Constructs an iterator.
Definition: fragment_iterator_wmma_tensor_op.h:114
WmmaTensorOpPolicy< WarpShape, OperatorShape, Layout > Policy
Definition: fragment_iterator_wmma_tensor_op.h:83
CUTLASS_HOST_DEVICE void load(Fragment &frag, int index_offset=0) const
Loads a fragment from the referenced part of the accumulator tile.
Definition: fragment_iterator_wmma_tensor_op.h:135
#define CUTLASS_PRAGMA_UNROLL
Definition: cutlass.h:110
#define CUTLASS_HOST_DEVICE
Definition: cutlass.h:89
Definition: fragment_iterator_wmma_tensor_op.h:63
Mapping function for row-major matrices.
Definition: layout/matrix.h:50
Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a &#39;column-major&#39; arrangement of TensorOp instructions, of which a row-oriented slice is visible per iteration.
Defines layout functions used by TensorRef and derived classes.
WmmaFragmentArray< OperatorFragmentC, Policy::OperatorCount::kCount > AccumulatorTile
This is the complete warp-level accumulator tile.
Definition: fragment_iterator_wmma_tensor_op.h:89
CUTLASS_HOST_DEVICE FragmentIteratorWmmaTensorOp & operator--()
Decrements.
Definition: fragment_iterator_wmma_tensor_op.h:128