Eigen  3.2.10
 
Loading...
Searching...
No Matches
Inverse_SSE.h
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2001 Intel Corporation
5// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
6// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
7//
8// This Source Code Form is subject to the terms of the Mozilla
9// Public License v. 2.0. If a copy of the MPL was not distributed
10// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
12// The SSE code for the 4x4 float and double matrix inverse in this file
13// comes from the following Intel's library:
14// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
15//
16// Here is the respective copyright and license statement:
17//
18// Copyright (c) 2001 Intel Corporation.
19//
20// Permition is granted to use, copy, distribute and prepare derivative works
21// of this library for any purpose and without fee, provided, that the above
22// copyright notice and this statement appear in all copies.
23// Intel makes no representations about the suitability of this software for
24// any purpose, and specifically disclaims all warranties.
25// See LEGAL.TXT for all the legal information.
26
27#ifndef EIGEN_INVERSE_SSE_H
28#define EIGEN_INVERSE_SSE_H
29
30namespace Eigen {
31
32namespace internal {
33
34template<typename MatrixType, typename ResultType>
35struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
36{
37 enum {
38 MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
39 ResultAlignment = bool(ResultType::Flags&AlignedBit),
40 StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
41 };
42
43 static void run(const MatrixType& matrix, ResultType& result)
44 {
45 EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
46
47 // Load the full matrix into registers
48 __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
49 __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
50 __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
51 __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
52
53 // The inverse is calculated using "Divide and Conquer" technique. The
54 // original matrix is divide into four 2x2 sub-matrices. Since each
55 // register holds four matrix element, the smaller matrices are
56 // represented as a registers. Hence we get a better locality of the
57 // calculations.
58
59 __m128 A, B, C, D; // the four sub-matrices
60 if(!StorageOrdersMatch)
61 {
62 A = _mm_unpacklo_ps(_L1, _L2);
63 B = _mm_unpacklo_ps(_L3, _L4);
64 C = _mm_unpackhi_ps(_L1, _L2);
65 D = _mm_unpackhi_ps(_L3, _L4);
66 }
67 else
68 {
69 A = _mm_movelh_ps(_L1, _L2);
70 B = _mm_movehl_ps(_L2, _L1);
71 C = _mm_movelh_ps(_L3, _L4);
72 D = _mm_movehl_ps(_L4, _L3);
73 }
74
75 __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
76 DC, AB;
77 __m128 dA, dB, dC, dD; // determinant of the sub-matrices
78 __m128 det, d, d1, d2;
79 __m128 rd; // reciprocal of the determinant
80
81 // AB = A# * B
82 AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
83 AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
84 // DC = D# * C
85 DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
86 DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
87
88 // dA = |A|
89 dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
90 dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
91 // dB = |B|
92 dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
93 dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
94
95 // dC = |C|
96 dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
97 dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
98 // dD = |D|
99 dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
100 dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
101
102 // d = trace(AB*DC) = trace(A#*B*D#*C)
103 d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
104
105 // iD = C*A#*B
106 iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
107 iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
108 // iA = B*D#*C
109 iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
110 iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
111
112 // d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
113 d = _mm_add_ps(d, _mm_movehl_ps(d, d));
114 d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
115 d1 = _mm_mul_ss(dA,dD);
116 d2 = _mm_mul_ss(dB,dC);
117
118 // iD = D*|A| - C*A#*B
119 iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
120
121 // iA = A*|D| - B*D#*C;
122 iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
123
124 // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
125 det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
126 rd = _mm_div_ss(_mm_set_ss(1.0f), det);
127
128// #ifdef ZERO_SINGULAR
129// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
130// #endif
131
132 // iB = D * (A#B)# = D*B#*A
133 iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
134 iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
135 // iC = A * (D#C)# = A*C#*D
136 iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
137 iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
138
139 rd = _mm_shuffle_ps(rd,rd,0);
140 rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
141
142 // iB = C*|B| - D*B#*A
143 iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
144
145 // iC = B*|C| - A*C#*D;
146 iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
147
148 // iX = iX / det
149 iA = _mm_mul_ps(rd,iA);
150 iB = _mm_mul_ps(rd,iB);
151 iC = _mm_mul_ps(rd,iC);
152 iD = _mm_mul_ps(rd,iD);
153
154 DenseIndex res_stride = result.outerStride();
155 float* res = result.data();
156 pstoret<float, Packet4f, ResultAlignment>(res+0, _mm_shuffle_ps(iA,iB,0x77));
157 pstoret<float, Packet4f, ResultAlignment>(res+res_stride, _mm_shuffle_ps(iA,iB,0x22));
158 pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));
159 pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));
160 }
161
162};
163
164template<typename MatrixType, typename ResultType>
165struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
166{
167 enum {
168 MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
169 ResultAlignment = bool(ResultType::Flags&AlignedBit),
170 StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
171 };
172 static void run(const MatrixType& matrix, ResultType& result)
173 {
174 const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
175 const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
176
177 // The inverse is calculated using "Divide and Conquer" technique. The
178 // original matrix is divide into four 2x2 sub-matrices. Since each
179 // register of the matrix holds two element, the smaller matrices are
180 // consisted of two registers. Hence we get a better locality of the
181 // calculations.
182
183 // the four sub-matrices
184 __m128d A1, A2, B1, B2, C1, C2, D1, D2;
185
186 if(StorageOrdersMatch)
187 {
188 A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
189 A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
190 C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
191 C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
192 }
193 else
194 {
195 __m128d tmp;
196 A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
197 A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
198 tmp = A1;
199 A1 = _mm_unpacklo_pd(A1,A2);
200 A2 = _mm_unpackhi_pd(tmp,A2);
201 tmp = C1;
202 C1 = _mm_unpacklo_pd(C1,C2);
203 C2 = _mm_unpackhi_pd(tmp,C2);
204
205 B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
206 B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
207 tmp = B1;
208 B1 = _mm_unpacklo_pd(B1,B2);
209 B2 = _mm_unpackhi_pd(tmp,B2);
210 tmp = D1;
211 D1 = _mm_unpacklo_pd(D1,D2);
212 D2 = _mm_unpackhi_pd(tmp,D2);
213 }
214
215 __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
216 DC1, DC2, AB1, AB2;
217 __m128d dA, dB, dC, dD; // determinant of the sub-matrices
218 __m128d det, d1, d2, rd;
219
220 // dA = |A|
221 dA = _mm_shuffle_pd(A2, A2, 1);
222 dA = _mm_mul_pd(A1, dA);
223 dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
224 // dB = |B|
225 dB = _mm_shuffle_pd(B2, B2, 1);
226 dB = _mm_mul_pd(B1, dB);
227 dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
228
229 // AB = A# * B
230 AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
231 AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
232 AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
233 AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
234
235 // dC = |C|
236 dC = _mm_shuffle_pd(C2, C2, 1);
237 dC = _mm_mul_pd(C1, dC);
238 dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
239 // dD = |D|
240 dD = _mm_shuffle_pd(D2, D2, 1);
241 dD = _mm_mul_pd(D1, dD);
242 dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
243
244 // DC = D# * C
245 DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
246 DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
247 DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
248 DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
249
250 // rd = trace(AB*DC) = trace(A#*B*D#*C)
251 d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
252 d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
253 rd = _mm_add_pd(d1, d2);
254 rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
255
256 // iD = C*A#*B
257 iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
258 iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
259 iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
260 iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
261
262 // iA = B*D#*C
263 iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
264 iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
265 iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
266 iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
267
268 // iD = D*|A| - C*A#*B
269 dA = _mm_shuffle_pd(dA,dA,0);
270 iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
271 iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
272
273 // iA = A*|D| - B*D#*C;
274 dD = _mm_shuffle_pd(dD,dD,0);
275 iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
276 iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
277
278 d1 = _mm_mul_sd(dA, dD);
279 d2 = _mm_mul_sd(dB, dC);
280
281 // iB = D * (A#B)# = D*B#*A
282 iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
283 iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
284 iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
285 iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
286
287 // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
288 det = _mm_add_sd(d1, d2);
289 det = _mm_sub_sd(det, rd);
290
291 // iC = A * (D#C)# = A*C#*D
292 iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
293 iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
294 iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
295 iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
296
297 rd = _mm_div_sd(_mm_set_sd(1.0), det);
298// #ifdef ZERO_SINGULAR
299// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
300// #endif
301 rd = _mm_shuffle_pd(rd,rd,0);
302
303 // iB = C*|B| - D*B#*A
304 dB = _mm_shuffle_pd(dB,dB,0);
305 iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
306 iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
307
308 d1 = _mm_xor_pd(rd, _Sign_PN);
309 d2 = _mm_xor_pd(rd, _Sign_NP);
310
311 // iC = B*|C| - A*C#*D;
312 dC = _mm_shuffle_pd(dC,dC,0);
313 iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
314 iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
315
316 DenseIndex res_stride = result.outerStride();
317 double* res = result.data();
318 pstoret<double, Packet2d, ResultAlignment>(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));
319 pstoret<double, Packet2d, ResultAlignment>(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
320 pstoret<double, Packet2d, ResultAlignment>(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));
321 pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
322 pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));
323 pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
324 pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));
325 pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
326 }
327};
328
329} // end namespace internal
330
331} // end namespace Eigen
332
333#endif // EIGEN_INVERSE_SSE_H
const unsigned int RowMajorBit
Definition Constants.h:53
const unsigned int AlignedBit
Definition Constants.h:147
Definition LDLT.h:18