Eigen-unsupported  5.0.1-dev+284dcc12
 
Loading...
Searching...
No Matches
BiCGSTABL.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2020 Chris Schoutrop <c.e.m.schoutrop@tue.nl>
5// Copyright (C) 2020 Jens Wehner <j.wehner@esciencecenter.nl>
6// Copyright (C) 2020 Jan van Dijk <j.v.dijk@tue.nl>
7// Copyright (C) 2020 Adithya Vijaykumar
8//
9// This Source Code Form is subject to the terms of the Mozilla
10// Public License v. 2.0. If a copy of the MPL was not distributed
11// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
13/*
14
15 This implementation of BiCGStab(L) is based on the papers
16 General algorithm:
17 1. G.L.G. Sleijpen, D.R. Fokkema. (1993). BiCGstab(l) for linear equations
18 involving unsymmetric matrices with complex spectrum. Electronic Transactions
19 on Numerical Analysis. Polynomial step update:
20 2. G.L.G. Sleijpen, M.B. Van Gijzen. (2010) Exploiting BiCGstab(l)
21 strategies to induce dimension reduction SIAM Journal on Scientific Computing.
22 3. Fokkema, Diederik R. Enhanced implementation of BiCGstab (l) for
23 solving linear systems of equations. Universiteit Utrecht. Mathematisch
24 Instituut, 1996
25 4. Sleijpen, G. L., & van der Vorst, H. A. (1996). Reliable updated
26 residuals in hybrid Bi-CG methods. Computing, 56(2), 141-163.
27*/
28
29#ifndef EIGEN_BICGSTABL_H
30#define EIGEN_BICGSTABL_H
31
32namespace Eigen {
33
34namespace internal {
46template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
47bool bicgstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters,
48 typename Dest::RealScalar &tol_error, Index L) {
49 using numext::abs;
50 using numext::sqrt;
51 typedef typename Dest::RealScalar RealScalar;
52 typedef typename Dest::Scalar Scalar;
53 const Index N = rhs.size();
54 L = L < x.rows() ? L : x.rows();
55
56 Index k = 0;
57
58 const RealScalar tol = tol_error;
59 const Index maxIters = iters;
60
61 typedef Matrix<Scalar, Dynamic, 1> VectorType;
62 typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
63
64 DenseMatrixType rHat(N, L + 1);
65 DenseMatrixType uHat(N, L + 1);
66
67 // We start with an initial guess x_0 and let us set r_0 as (residual
68 // calculated from x_0)
69 VectorType x0 = x;
70 rHat.col(0) = rhs - mat * x0; // r_0
71
72 x.setZero(); // This will contain the updates to the solution.
73 // rShadow is arbitrary, but must never be orthogonal to any residual.
74 VectorType rShadow = VectorType::Random(N);
75
76 VectorType x_prime = x;
77
78 // Redundant: x is already set to 0
79 // x.setZero();
80 VectorType b_prime = rHat.col(0);
81
82 // Other vectors and scalars initialization
83 Scalar rho0 = 1.0;
84 Scalar alpha = 0.0;
85 Scalar omega = 1.0;
86
87 uHat.col(0).setZero();
88
89 bool bicg_convergence = false;
90
91 const RealScalar normb = rhs.stableNorm();
92 if (internal::isApprox(normb, RealScalar(0))) {
93 x.setZero();
94 iters = 0;
95 return true;
96 }
97 RealScalar normr = rHat.col(0).stableNorm();
98 RealScalar Mx = normr;
99 RealScalar Mr = normr;
100
101 // Keep track of the solution with the lowest residual
102 RealScalar normr_min = normr;
103 VectorType x_min = x_prime + x;
104
105 // Criterion for when to apply the group-wise update, conform ref 3.
106 const RealScalar delta = 0.01;
107
108 bool compute_res = false;
109 bool update_app = false;
110
111 while (normr > tol * normb && k < maxIters) {
112 rho0 *= -omega;
113
114 for (Index j = 0; j < L; ++j) {
115 const Scalar rho1 = rShadow.dot(rHat.col(j));
116
117 if (!(numext::isfinite)(rho1) || rho0 == RealScalar(0.0)) {
118 // We cannot continue computing, return the best solution found.
119 x += x_prime;
120
121 // Check if x is better than the best stored solution thus far.
122 normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
123
124 if (normr > normr_min || !(numext::isfinite)(normr)) {
125 // x_min is a better solution than x, return x_min
126 x = x_min;
127 normr = normr_min;
128 }
129 tol_error = normr / normb;
130 iters = k;
131 // x contains the updates to x0, add those back to obtain the solution
132 x = precond.solve(x);
133 x += x0;
134 return (normr < tol * normb);
135 }
136
137 const Scalar beta = alpha * (rho1 / rho0);
138 rho0 = rho1;
139 // Update search directions
140 uHat.leftCols(j + 1) = rHat.leftCols(j + 1) - beta * uHat.leftCols(j + 1);
141 uHat.col(j + 1) = mat * precond.solve(uHat.col(j));
142 const Scalar sigma = rShadow.dot(uHat.col(j + 1));
143 alpha = rho1 / sigma;
144 // Update residuals
145 rHat.leftCols(j + 1) -= alpha * uHat.middleCols(1, j + 1);
146 rHat.col(j + 1) = mat * precond.solve(rHat.col(j));
147 // Complete BiCG iteration by updating x
148 x += alpha * uHat.col(0);
149 normr = rHat.col(0).stableNorm();
150 // Check for early exit
151 if (normr < tol * normb) {
152 /*
153 Convergence was achieved during BiCG step.
154 Without this check BiCGStab(L) fails for trivial matrices, such as
155 when the preconditioner already is the inverse, or the input matrix is
156 identity.
157 */
158 bicg_convergence = true;
159 break;
160 } else if (normr < normr_min) {
161 // We found an x with lower residual, keep this one.
162 x_min = x + x_prime;
163 normr_min = normr;
164 }
165 }
166 if (!bicg_convergence) {
167 /*
168 The polynomial/minimize residual step.
169
170 QR Householder method for argmin is more stable than (modified)
171 Gram-Schmidt, in the sense that there is less loss of orthogonality. It
172 is more accurate than solving the normal equations, since the normal
173 equations scale with condition number squared.
174 */
175 const VectorType gamma = rHat.rightCols(L).householderQr().solve(rHat.col(0));
176 x += rHat.leftCols(L) * gamma;
177 rHat.col(0) -= rHat.rightCols(L) * gamma;
178 uHat.col(0) -= uHat.rightCols(L) * gamma;
179 normr = rHat.col(0).stableNorm();
180 omega = gamma(L - 1);
181 }
182 if (normr < normr_min) {
183 // We found an x with lower residual, keep this one.
184 x_min = x + x_prime;
185 normr_min = normr;
186 }
187
188 k++;
189
190 /*
191 Reliable update part
192
193 The recursively computed residual can deviate from the actual residual
194 after several iterations. However, computing the residual from the
195 definition costs extra MVs and should not be done at each iteration. The
196 reliable update strategy computes the true residual from the definition:
197 r=b-A*x at strategic intervals. Furthermore a "group wise update" strategy
198 is used to combine updates, which improves accuracy.
199 */
200
201 // Maximum norm of residuals since last update of x.
202 Mx = numext::maxi(Mx, normr);
203 // Maximum norm of residuals since last computation of the true residual.
204 Mr = numext::maxi(Mr, normr);
205
206 if (normr < delta * normb && normb <= Mx) {
207 update_app = true;
208 }
209
210 if (update_app || (normr < delta * Mr && normb <= Mr)) {
211 compute_res = true;
212 }
213
214 if (bicg_convergence) {
215 update_app = true;
216 compute_res = true;
217 bicg_convergence = false;
218 }
219
220 if (compute_res) {
221 // Explicitly compute residual from the definition
222
223 // This is equivalent to the shifted version of rhs - mat *
224 // (precond.solve(x)+x0)
225 rHat.col(0) = b_prime - mat * precond.solve(x);
226 normr = rHat.col(0).stableNorm();
227 Mr = normr;
228
229 if (update_app) {
230 // After the group wise update, the original problem is translated to a
231 // shifted one.
232 x_prime += x;
233 x.setZero();
234 b_prime = rHat.col(0);
235 Mx = normr;
236 }
237 }
238 if (normr < normr_min) {
239 // We found an x with lower residual, keep this one.
240 x_min = x + x_prime;
241 normr_min = normr;
242 }
243
244 compute_res = false;
245 update_app = false;
246 }
247
248 // Convert internal variable to the true solution vector x
249 x += x_prime;
250
251 normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
252 if (normr > normr_min || !(numext::isfinite)(normr)) {
253 // x_min is a better solution than x, return x_min
254 x = x_min;
255 normr = normr_min;
256 }
257 tol_error = normr / normb;
258 iters = k;
259
260 // x contains the updates to x0, add those back to obtain the solution
261 x = precond.solve(x);
262 x += x0;
263 return true;
264}
265
266} // namespace internal
267
268template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar>>
269class BiCGSTABL;
270
271namespace internal {
272
273template <typename MatrixType_, typename Preconditioner_>
274struct traits<Eigen::BiCGSTABL<MatrixType_, Preconditioner_>> {
275 typedef MatrixType_ MatrixType;
276 typedef Preconditioner_ Preconditioner;
277};
278
279} // namespace internal
280
281template <typename MatrixType_, typename Preconditioner_>
282class BiCGSTABL : public IterativeSolverBase<BiCGSTABL<MatrixType_, Preconditioner_>> {
284 using Base::m_error;
285 using Base::m_info;
286 using Base::m_isInitialized;
287 using Base::m_iterations;
288 using Base::matrix;
289 Index m_L;
290
291 public:
292 typedef MatrixType_ MatrixType;
293 typedef typename MatrixType::Scalar Scalar;
294 typedef typename MatrixType::RealScalar RealScalar;
295 typedef Preconditioner_ Preconditioner;
296
298 BiCGSTABL() : m_L(2) {}
299
311 template <typename MatrixDerived>
312 explicit BiCGSTABL(const EigenBase<MatrixDerived> &A) : Base(A.derived()), m_L(2) {}
313
319 template <typename Rhs, typename Dest>
320 void _solve_vector_with_guess_impl(const Rhs &b, Dest &x) const {
321 m_iterations = Base::maxIterations();
322
323 m_error = Base::m_tolerance;
324
325 bool ret = internal::bicgstabl(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_L);
326 m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
327 }
328
331 void setL(Index L) {
332 eigen_assert(L >= 1 && "L needs to be positive");
333 m_L = L;
334 }
335};
336
337} // namespace Eigen
338
339#endif /* EIGEN_BICGSTABL_H */
NumericalIssue
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index