Eigen  5.0.1-dev+60122df6
 
Loading...
Searching...
No Matches
TriangularSolverMatrix.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5// Modifications Copyright (C) 2022 Intel Corporation
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H
12#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
13
14// IWYU pragma: private
15#include "../InternalHeaderCheck.h"
16
17namespace Eigen {
18
19namespace internal {
20
21template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride,
22 bool Specialized>
23struct trsmKernelL {
24 // Generic Implementation of triangular solve for triangular matrix on left and multiple rhs.
25 // Handles non-packed matrices.
26 static void kernel(Index size, Index otherSize, const Scalar* _tri, Index triStride, Scalar* _other, Index otherIncr,
27 Index otherStride);
28};
29
30template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride,
31 bool Specialized>
32struct trsmKernelR {
33 // Generic Implementation of triangular solve for triangular matrix on right and multiple lhs.
34 // Handles non-packed matrices.
35 static void kernel(Index size, Index otherSize, const Scalar* _tri, Index triStride, Scalar* _other, Index otherIncr,
36 Index otherStride);
37};
38
39template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride,
40 bool Specialized>
41EIGEN_STRONG_INLINE void trsmKernelL<Scalar, Index, Mode, Conjugate, TriStorageOrder, OtherInnerStride,
42 Specialized>::kernel(Index size, Index otherSize, const Scalar* _tri,
43 Index triStride, Scalar* _other, Index otherIncr,
44 Index otherStride) {
45 typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> TriMapper;
46 typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> OtherMapper;
47 TriMapper tri(_tri, triStride);
48 OtherMapper other(_other, otherStride, otherIncr);
49
50 enum { IsLower = (Mode & Lower) == Lower };
51 conj_if<Conjugate> conj;
52
53 // tr solve
54 for (Index k = 0; k < size; ++k) {
55 // TODO write a small kernel handling this (can be shared with trsv)
56 Index i = IsLower ? k : -k - 1;
57 Index rs = size - k - 1; // remaining size
58 Index s = TriStorageOrder == RowMajor ? (IsLower ? 0 : i + 1) : IsLower ? i + 1 : i - rs;
59
60 Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(Scalar(1) / conj(tri(i, i)));
61 for (Index j = 0; j < otherSize; ++j) {
62 if (TriStorageOrder == RowMajor) {
63 Scalar b(0);
64 const Scalar* l = &tri(i, s);
65 typename OtherMapper::LinearMapper r = other.getLinearMapper(s, j);
66 for (Index i3 = 0; i3 < k; ++i3) b += conj(l[i3]) * r(i3);
67
68 other(i, j) = (other(i, j) - b) * a;
69 } else {
70 Scalar& otherij = other(i, j);
71 otherij *= a;
72 Scalar b = otherij;
73 typename OtherMapper::LinearMapper r = other.getLinearMapper(s, j);
74 typename TriMapper::LinearMapper l = tri.getLinearMapper(s, i);
75 for (Index i3 = 0; i3 < rs; ++i3) r(i3) -= b * conj(l(i3));
76 }
77 }
78 }
79}
80
81template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride,
82 bool Specialized>
83EIGEN_STRONG_INLINE void trsmKernelR<Scalar, Index, Mode, Conjugate, TriStorageOrder, OtherInnerStride,
84 Specialized>::kernel(Index size, Index otherSize, const Scalar* _tri,
85 Index triStride, Scalar* _other, Index otherIncr,
86 Index otherStride) {
87 typedef typename NumTraits<Scalar>::Real RealScalar;
88 typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> LhsMapper;
89 typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> RhsMapper;
90 LhsMapper lhs(_other, otherStride, otherIncr);
91 RhsMapper rhs(_tri, triStride);
92
93 enum { RhsStorageOrder = TriStorageOrder, IsLower = (Mode & Lower) == Lower };
94 conj_if<Conjugate> conj;
95
96 for (Index k = 0; k < size; ++k) {
97 Index j = IsLower ? size - k - 1 : k;
98
99 typename LhsMapper::LinearMapper r = lhs.getLinearMapper(0, j);
100 for (Index k3 = 0; k3 < k; ++k3) {
101 Scalar b = conj(rhs(IsLower ? j + 1 + k3 : k3, j));
102 typename LhsMapper::LinearMapper a = lhs.getLinearMapper(0, IsLower ? j + 1 + k3 : k3);
103 for (Index i = 0; i < otherSize; ++i) r(i) -= a(i) * b;
104 }
105 if ((Mode & UnitDiag) == 0) {
106 Scalar inv_rjj = RealScalar(1) / conj(rhs(j, j));
107 for (Index i = 0; i < otherSize; ++i) r(i) *= inv_rjj;
108 }
109 }
110}
111
112// if the rhs is row major, let's transpose the product
113template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder,
114 int OtherInnerStride>
115struct triangular_solve_matrix<Scalar, Index, Side, Mode, Conjugate, TriStorageOrder, RowMajor, OtherInnerStride> {
116 static void run(Index size, Index cols, const Scalar* tri, Index triStride, Scalar* _other, Index otherIncr,
117 Index otherStride, level3_blocking<Scalar, Scalar>& blocking) {
118 triangular_solve_matrix<
119 Scalar, Index, Side == OnTheLeft ? OnTheRight : OnTheLeft, (Mode & UnitDiag) | ((Mode & Upper) ? Lower : Upper),
120 NumTraits<Scalar>::IsComplex && Conjugate, TriStorageOrder == RowMajor ? ColMajor : RowMajor, ColMajor,
121 OtherInnerStride>::run(size, cols, tri, triStride, _other, otherIncr, otherStride, blocking);
122 }
123};
124
125/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
126 */
127template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
128struct triangular_solve_matrix<Scalar, Index, OnTheLeft, Mode, Conjugate, TriStorageOrder, ColMajor, OtherInnerStride> {
129 static EIGEN_DONT_INLINE void run(Index size, Index otherSize, const Scalar* _tri, Index triStride, Scalar* _other,
130 Index otherIncr, Index otherStride, level3_blocking<Scalar, Scalar>& blocking);
131};
132
133template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
134EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar, Index, OnTheLeft, Mode, Conjugate, TriStorageOrder, ColMajor,
135 OtherInnerStride>::run(Index size, Index otherSize, const Scalar* _tri,
136 Index triStride, Scalar* _other, Index otherIncr,
137 Index otherStride,
138 level3_blocking<Scalar, Scalar>& blocking) {
139 Index cols = otherSize;
140
141 std::ptrdiff_t l1, l2, l3;
142 manage_caching_sizes(GetAction, &l1, &l2, &l3);
143
144#if defined(EIGEN_VECTORIZE_AVX512) && EIGEN_USE_AVX512_TRSM_L_KERNELS && EIGEN_ENABLE_AVX512_NOCOPY_TRSM_L_CUTOFFS
145 EIGEN_IF_CONSTEXPR(
146 (OtherInnerStride == 1 && (std::is_same<Scalar, float>::value || std::is_same<Scalar, double>::value))) {
147 // Very rough cutoffs to determine when to call trsm w/o packing
148 // For small problem sizes trsmKernel compiled with clang is generally faster.
149 // TODO: Investigate better heuristics for cutoffs.
150 double L2Cap = 0.5; // 50% of L2 size
151 if (size < avx512_trsm_cutoff<Scalar>(l2, cols, L2Cap)) {
152 trsmKernelL<Scalar, Index, Mode, Conjugate, TriStorageOrder, 1, /*Specialized=*/true>::kernel(
153 size, cols, _tri, triStride, _other, 1, otherStride);
154 return;
155 }
156 }
157#endif
158
159 typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> TriMapper;
160 typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> OtherMapper;
161 TriMapper tri(_tri, triStride);
162 OtherMapper other(_other, otherStride, otherIncr);
163
164 typedef gebp_traits<Scalar, Scalar> Traits;
165
166 enum { SmallPanelWidth = plain_enum_max(Traits::mr, Traits::nr), IsLower = (Mode & Lower) == Lower };
167
168 Index kc = blocking.kc(); // cache block size along the K direction
169 Index mc = (std::min)(size, blocking.mc()); // cache block size along the M direction
170
171 std::size_t sizeA = kc * mc;
172 std::size_t sizeB = kc * cols;
173
174 ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
175 ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
176
177 gebp_kernel<Scalar, Scalar, Index, OtherMapper, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
178 gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing,
179 TriStorageOrder>
180 pack_lhs;
181 gemm_pack_rhs<Scalar, Index, OtherMapper, Traits::nr, ColMajor, false, true> pack_rhs;
182
183 // the goal here is to subdivise the Rhs panels such that we keep some cache
184 // coherence when accessing the rhs elements
185 Index subcols = cols > 0 ? l2 / (4 * sizeof(Scalar) * std::max<Index>(otherStride, size)) : 0;
186 subcols = std::max<Index>((subcols / Traits::nr) * Traits::nr, Traits::nr);
187
188 for (Index k2 = IsLower ? 0 : size; IsLower ? k2 < size : k2 > 0; IsLower ? k2 += kc : k2 -= kc) {
189 const Index actual_kc = (std::min)(IsLower ? size - k2 : k2, kc);
190
191 // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
192 // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
193 // A11 (the triangular part) and A21 the remaining rectangular part.
194 // Then the high level algorithm is:
195 // - B = R1 => general block copy (done during the next step)
196 // - R1 = A11^-1 B => tricky part
197 // - update B from the new R1 => actually this has to be performed continuously during the above step
198 // - R2 -= A21 * B => GEPP
199
200 // The tricky part: compute R1 = A11^-1 B while updating B from R1
201 // The idea is to split A11 into multiple small vertical panels.
202 // Each panel can be split into a small triangular part T1k which is processed without optimization,
203 // and the remaining small part T2k which is processed using gebp with appropriate block strides
204 for (Index j2 = 0; j2 < cols; j2 += subcols) {
205 Index actual_cols = (std::min)(cols - j2, subcols);
206 // for each small vertical panels [T1k^T, T2k^T]^T of lhs
207 for (Index k1 = 0; k1 < actual_kc; k1 += SmallPanelWidth) {
208 Index actualPanelWidth = std::min<Index>(actual_kc - k1, SmallPanelWidth);
209 // tr solve
210 {
211 Index i = IsLower ? k2 + k1 : k2 - k1;
212#if defined(EIGEN_VECTORIZE_AVX512) && EIGEN_USE_AVX512_TRSM_L_KERNELS
213 EIGEN_IF_CONSTEXPR(
214 (OtherInnerStride == 1 && (std::is_same<Scalar, float>::value || std::is_same<Scalar, double>::value))) {
215 i = IsLower ? k2 + k1 : k2 - k1 - actualPanelWidth;
216 }
217#endif
218 trsmKernelL<Scalar, Index, Mode, Conjugate, TriStorageOrder, OtherInnerStride, /*Specialized=*/true>::kernel(
219 actualPanelWidth, actual_cols, _tri + i + (i)*triStride, triStride,
220 _other + i * OtherInnerStride + j2 * otherStride, otherIncr, otherStride);
221 }
222
223 Index lengthTarget = actual_kc - k1 - actualPanelWidth;
224 Index startBlock = IsLower ? k2 + k1 : k2 - k1 - actualPanelWidth;
225 Index blockBOffset = IsLower ? k1 : lengthTarget;
226
227 // update the respective rows of B from other
228 pack_rhs(blockB + actual_kc * j2, other.getSubMapper(startBlock, j2), actualPanelWidth, actual_cols, actual_kc,
229 blockBOffset);
230
231 // GEBP
232 if (lengthTarget > 0) {
233 Index startTarget = IsLower ? k2 + k1 + actualPanelWidth : k2 - actual_kc;
234
235 pack_lhs(blockA, tri.getSubMapper(startTarget, startBlock), actualPanelWidth, lengthTarget);
236
237 gebp_kernel(other.getSubMapper(startTarget, j2), blockA, blockB + actual_kc * j2, lengthTarget,
238 actualPanelWidth, actual_cols, Scalar(-1), actualPanelWidth, actual_kc, 0, blockBOffset);
239 }
240 }
241 }
242
243 // R2 -= A21 * B => GEPP
244 {
245 Index start = IsLower ? k2 + kc : 0;
246 Index end = IsLower ? size : k2 - kc;
247 for (Index i2 = start; i2 < end; i2 += mc) {
248 const Index actual_mc = (std::min)(mc, end - i2);
249 if (actual_mc > 0) {
250 pack_lhs(blockA, tri.getSubMapper(i2, IsLower ? k2 : k2 - kc), actual_kc, actual_mc);
251
252 gebp_kernel(other.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0);
253 }
254 }
255 }
256 }
257}
258
259/* Optimized triangular solver with multiple left hand sides and the triangular matrix on the right
260 */
261template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
262struct triangular_solve_matrix<Scalar, Index, OnTheRight, Mode, Conjugate, TriStorageOrder, ColMajor,
263 OtherInnerStride> {
264 static EIGEN_DONT_INLINE void run(Index size, Index otherSize, const Scalar* _tri, Index triStride, Scalar* _other,
265 Index otherIncr, Index otherStride, level3_blocking<Scalar, Scalar>& blocking);
266};
267
268template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder, int OtherInnerStride>
269EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar, Index, OnTheRight, Mode, Conjugate, TriStorageOrder, ColMajor,
270 OtherInnerStride>::run(Index size, Index otherSize, const Scalar* _tri,
271 Index triStride, Scalar* _other, Index otherIncr,
272 Index otherStride,
273 level3_blocking<Scalar, Scalar>& blocking) {
274 Index rows = otherSize;
275
276#if defined(EIGEN_VECTORIZE_AVX512) && EIGEN_USE_AVX512_TRSM_R_KERNELS && EIGEN_ENABLE_AVX512_NOCOPY_TRSM_R_CUTOFFS
277 EIGEN_IF_CONSTEXPR(
278 (OtherInnerStride == 1 && (std::is_same<Scalar, float>::value || std::is_same<Scalar, double>::value))) {
279 // TODO: Investigate better heuristics for cutoffs.
280 std::ptrdiff_t l1, l2, l3;
281 manage_caching_sizes(GetAction, &l1, &l2, &l3);
282 double L2Cap = 0.5; // 50% of L2 size
283 if (size < avx512_trsm_cutoff<Scalar>(l2, rows, L2Cap)) {
284 trsmKernelR<Scalar, Index, Mode, Conjugate, TriStorageOrder, OtherInnerStride, /*Specialized=*/true>::kernel(
285 size, rows, _tri, triStride, _other, 1, otherStride);
286 return;
287 }
288 }
289#endif
290
291 typedef blas_data_mapper<Scalar, Index, ColMajor, Unaligned, OtherInnerStride> LhsMapper;
292 typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> RhsMapper;
293 LhsMapper lhs(_other, otherStride, otherIncr);
294 RhsMapper rhs(_tri, triStride);
295
296 typedef gebp_traits<Scalar, Scalar> Traits;
297 enum {
298 RhsStorageOrder = TriStorageOrder,
299 SmallPanelWidth = plain_enum_max(Traits::mr, Traits::nr),
300 IsLower = (Mode & Lower) == Lower
301 };
302
303 Index kc = blocking.kc(); // cache block size along the K direction
304 Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction
305
306 std::size_t sizeA = kc * mc;
307 std::size_t sizeB = kc * size;
308
309 ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
310 ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
311
312 gebp_kernel<Scalar, Scalar, Index, LhsMapper, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
313 gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
314 gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder, false, true> pack_rhs_panel;
315 gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor,
316 false, true>
317 pack_lhs_panel;
318
319 for (Index k2 = IsLower ? size : 0; IsLower ? k2 > 0 : k2 < size; IsLower ? k2 -= kc : k2 += kc) {
320 const Index actual_kc = (std::min)(IsLower ? k2 : size - k2, kc);
321 Index actual_k2 = IsLower ? k2 - actual_kc : k2;
322
323 Index startPanel = IsLower ? 0 : k2 + actual_kc;
324 Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
325 Scalar* geb = blockB + actual_kc * actual_kc;
326
327 if (rs > 0) pack_rhs(geb, rhs.getSubMapper(actual_k2, startPanel), actual_kc, rs);
328
329 // triangular packing (we only pack the panels off the diagonal,
330 // neglecting the blocks overlapping the diagonal
331 {
332 for (Index j2 = 0; j2 < actual_kc; j2 += SmallPanelWidth) {
333 Index actualPanelWidth = std::min<Index>(actual_kc - j2, SmallPanelWidth);
334 Index actual_j2 = actual_k2 + j2;
335 Index panelOffset = IsLower ? j2 + actualPanelWidth : 0;
336 Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
337
338 if (panelLength > 0)
339 pack_rhs_panel(blockB + j2 * actual_kc, rhs.getSubMapper(actual_k2 + panelOffset, actual_j2), panelLength,
340 actualPanelWidth, actual_kc, panelOffset);
341 }
342 }
343
344 for (Index i2 = 0; i2 < rows; i2 += mc) {
345 const Index actual_mc = (std::min)(mc, rows - i2);
346
347 // triangular solver kernel
348 {
349 // for each small block of the diagonal (=> vertical panels of rhs)
350 for (Index j2 = IsLower ? (actual_kc - ((actual_kc % SmallPanelWidth) ? Index(actual_kc % SmallPanelWidth)
351 : Index(SmallPanelWidth)))
352 : 0;
353 IsLower ? j2 >= 0 : j2 < actual_kc; IsLower ? j2 -= SmallPanelWidth : j2 += SmallPanelWidth) {
354 Index actualPanelWidth = std::min<Index>(actual_kc - j2, SmallPanelWidth);
355 Index absolute_j2 = actual_k2 + j2;
356 Index panelOffset = IsLower ? j2 + actualPanelWidth : 0;
357 Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
358
359 // GEBP
360 if (panelLength > 0) {
361 gebp_kernel(lhs.getSubMapper(i2, absolute_j2), blockA, blockB + j2 * actual_kc, actual_mc, panelLength,
362 actualPanelWidth, Scalar(-1), actual_kc, actual_kc, // strides
363 panelOffset, panelOffset); // offsets
364 }
365
366 {
367 // unblocked triangular solve
368 trsmKernelR<Scalar, Index, Mode, Conjugate, TriStorageOrder, OtherInnerStride,
369 /*Specialized=*/true>::kernel(actualPanelWidth, actual_mc,
370 _tri + absolute_j2 + absolute_j2 * triStride, triStride,
371 _other + i2 * OtherInnerStride + absolute_j2 * otherStride,
372 otherIncr, otherStride);
373 }
374 // pack the just computed part of lhs to A
375 pack_lhs_panel(blockA, lhs.getSubMapper(i2, absolute_j2), actualPanelWidth, actual_mc, actual_kc, j2);
376 }
377 }
378
379 if (rs > 0)
380 gebp_kernel(lhs.getSubMapper(i2, startPanel), blockA, geb, actual_mc, actual_kc, rs, Scalar(-1), -1, -1, 0, 0);
381 }
382 }
383}
384} // end namespace internal
385
386} // end namespace Eigen
387
388#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H
@ UnitDiag
Definition Constants.h:215
@ Lower
Definition Constants.h:211
@ Upper
Definition Constants.h:213
@ ColMajor
Definition Constants.h:318
@ RowMajor
Definition Constants.h:320
@ OnTheLeft
Definition Constants.h:331
@ OnTheRight
Definition Constants.h:333
Namespace containing all symbols from the Eigen library.
Definition B01_Experimental.dox:1
const Eigen::CwiseUnaryOp< Eigen::internal::scalar_conjugate_op< typename Derived::Scalar >, const Derived > conj(const Eigen::ArrayBase< Derived > &x)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:82