go home Home | Main Page | Modules | Namespace List | Class Hierarchy | Alphabetical List | Data Structures | File List | Namespace Members | Data Fields | Globals | Related Pages
vnl_adjugate_fixed.h
Go to the documentation of this file.
1// This is core/vnl/vnl_adjugate_fixed.h
2#ifndef vnl_adjugate_fixed_h_
3#define vnl_adjugate_fixed_h_
4//:
5// \file
6// \brief Calculates adjugate or cofactor matrix of a small vnl_matrix_fixed.
7// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
8// adjugate == inverse/det
9// cofactor == adjugate^T
10// \author Floris Berendsen
11// \date 18 April 2013
12//
13// \verbatim
14// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
15// \endverbatim
16
17#include <vnl/vnl_matrix_fixed.h>
18#include <vnl/vnl_vector_fixed.h>
19#include <vnl/vnl_matrix.h>
20#include <vnl/vnl_det.h>
21
22//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
23// This allows you to write e.g.
24//
25// x = vnl_adjugate(A) * b;
26//
27// Note that this function is inlined (except for the call to vnl_det()),
28// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
29// since that one is using svd.
30//
31// \relatesalso vnl_matrix_fixed
32
33template <class T>
34vnl_matrix_fixed<T, 1, 1> vnl_adjugate(vnl_matrix_fixed<T, 1, 1> const & m)
35{
36 return vnl_matrix_fixed<T, 1, 1>(m(0, 0));
37}
38
39//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
40// This allows you to write e.g.
41//
42// x = vnl_adjugate(A) * b;
43//
44// Note that this function is inlined (except for the call to vnl_det()),
45// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
46// since that one is using svd.
47//
48// \relatesalso vnl_matrix_fixed
49
50template <class T>
51vnl_matrix_fixed<T, 2, 2> vnl_adjugate(vnl_matrix_fixed<T, 2, 2> const & m)
52{
53 T d[4];
54 d[0] = m(1, 1);
55 d[1] = -m(0, 1);
56 d[3] = m(0, 0);
57 d[2] = -m(1, 0);
58 return vnl_matrix_fixed<T, 2, 2>(d);
59}
60
61//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
62// This allows you to write e.g.
63//
64// x = vnl_adjugate_fixed(A) * b;
65//
66// Note that this function is inlined (except for the call to vnl_det()),
67// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
68// since that one is using svd.
69//
70// \relatesalso vnl_matrix_fixed
71
72template <class T>
73vnl_matrix_fixed<T, 3, 3> vnl_adjugate(vnl_matrix_fixed<T, 3, 3> const & m)
74{
75 T d[9];
76 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
77 d[1] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
78 d[2] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
79 d[3] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
80 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
81 d[5] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
82 d[6] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
83 d[7] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
84 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
85 return vnl_matrix_fixed<T, 3, 3>(d);
86}
87
88//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
89// This allows you to write e.g.
90//
91// x = vnl_adjugate_fixed(A) * b;
92//
93// Note that this function is inlined (except for the call to vnl_det()),
94// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
95// since that one is using svd.
96//
97// \relatesalso vnl_matrix_fixed
98
99template <class T>
100vnl_matrix_fixed<T, 4, 4> vnl_adjugate(vnl_matrix_fixed<T, 4, 4> const & m)
101{
102 T d[16];
103 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
104 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
105 d[1] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
106 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
107 d[2] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
108 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
109 d[3] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
110 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
111 d[4] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
112 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
113 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
114 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
115 d[6] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
116 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
117 d[7] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
118 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
119 d[8] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
120 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
121 d[9] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
122 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
123 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
124 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
125 d[11] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
126 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
127 d[12] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
128 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
129 d[13] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
130 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
131 d[14] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
132 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
133 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
134 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
135 return vnl_matrix_fixed<T, 4, 4>(d);
136}
137
138//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
139// This allows you to write e.g.
140//
141// x = vnl_adjugate_fixed(A) * b;
142//
143// Note that this function is inlined (except for the call to vnl_det()),
144// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
145// since that one is using svd.
146//
147// \relatesalso vnl_matrix
148
149template <class T>
150vnl_matrix<T>
151vnl_adjugate_asfixed(vnl_matrix<T> const & m)
152{
153 assert(m.rows() == m.columns());
154 assert(m.rows() <= 4);
155 if (m.rows() == 1)
156 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
157 else if (m.rows() == 2)
158 return vnl_adjugate(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
159 else if (m.rows() == 3)
160 return vnl_adjugate(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
161 else
162 return vnl_adjugate(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
163}
164
165//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
166// This allows you to write e.g.
167//
168// x = vnl_cofactor(A) * b;
169//
170// Note that this function is inlined (except for the call to vnl_det()),
171// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
172// since that one is using svd. This is also faster than using
173//
174// x = vnl_adjugate_fixed(A).transpose() * b;
175//
176// \relatesalso vnl_matrix_fixed
177
178template <class T>
179vnl_matrix_fixed<T, 1, 1> vnl_cofactor(vnl_matrix_fixed<T, 1, 1> const & m)
180{
181 return vnl_matrix_fixed<T, 1, 1>(T(1) / m(0, 0));
182}
183
184//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
185// This allows you to write e.g.
186//
187// x = vnl_cofactor(A) * b;
188//
189// Note that this function is inlined (except for the call to vnl_det()),
190// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
191// since that one is using svd. This is also faster than using
192//
193// x = vnl_adjugate_fixed(A).transpose() * b;
194//
195// \relatesalso vnl_matrix_fixed
196
197template <class T>
198vnl_matrix_fixed<T, 2, 2> vnl_cofactor(vnl_matrix_fixed<T, 2, 2> const & m)
199{
200
201 T d[4];
202 d[0] = m(1, 1);
203 d[2] = -m(0, 1);
204 d[3] = m(0, 0);
205 d[1] = -m(1, 0);
206 return vnl_matrix_fixed<T, 2, 2>(d);
207}
208
209//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
210// This allows you to write e.g.
211//
212// x = vnl_cofactor(A) * b;
213//
214// Note that this function is inlined (except for the call to vnl_det()),
215// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
216// since that one is using svd. This is also faster than using
217//
218// x = vnl_adjugate_fixed(A).transpose() * b;
219//
220// \relatesalso vnl_matrix_fixed
221
222template <class T>
223vnl_matrix_fixed<T, 3, 3> vnl_cofactor(vnl_matrix_fixed<T, 3, 3> const & m)
224{
225
226 T d[9];
227 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
228 d[3] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
229 d[6] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
230 d[1] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
231 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
232 d[7] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
233 d[2] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
234 d[5] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
235 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
236 return vnl_matrix_fixed<T, 3, 3>(d);
237}
238
239//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
240// This allows you to write e.g.
241//
242// x = vnl_cofactor(A) * b;
243//
244// Note that this function is inlined (except for the call to vnl_det()),
245// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
246// since that one is using svd. This is also faster than using
247//
248// x = vnl_adjugate_fixed(A).transpose() * b;
249//
250// \relatesalso vnl_matrix_fixed
251
252template <class T>
253vnl_matrix_fixed<T, 4, 4> vnl_cofactor(vnl_matrix_fixed<T, 4, 4> const & m)
254{
255 T d[16];
256 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
257 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
258 d[4] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
259 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
260 d[8] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
261 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
262 d[12] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
263 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
264 d[1] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
265 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
266 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
267 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
268 d[9] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
269 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
270 d[13] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
271 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
272 d[2] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
273 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
274 d[6] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
275 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
276 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
277 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
278 d[14] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
279 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
280 d[3] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
281 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
282 d[7] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
283 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
284 d[11] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
285 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
286 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
287 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
288 return vnl_matrix_fixed<T, 4, 4>(d);
289}
290
291//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
292// This allows you to write e.g.
293//
294// x = vnl_cofactor(A) * b;
295//
296// Note that this function is inlined (except for the call to vnl_det()),
297// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
298// since that one is using svd. This is also faster than using
299//
300// x = vnl_adjugate_fixed(A).transpose() * b;
301//
302// \relatesalso vnl_matrix
303
304template <class T>
305vnl_matrix<T>
306vnl_cofactor(vnl_matrix<T> const & m)
307{
308 assert(m.rows() == m.columns());
309 assert(m.rows() <= 4);
310 if (m.rows() == 1)
311 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
312 else if (m.rows() == 2)
313 return vnl_cofactor(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
314 else if (m.rows() == 3)
315 return vnl_cofactor(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
316 else
317 return vnl_cofactor(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
318}
319
320
321template <class T>
322vnl_vector_fixed<T, 3> vnl_cofactor_row1(vnl_vector_fixed<T, 3> const & row2, vnl_vector_fixed<T, 3> const & row3)
323{
324 T d[3];
325 d[0] = (row2[1] * row3[2] - row2[2] * row3[1]);
326 d[1] = (row2[2] * row3[0] - row2[0] * row3[2]);
327 d[2] = (row2[0] * row3[1] - row2[1] * row3[0]);
328 return vnl_vector_fixed<T, 3>(d);
329}
330
331#endif // vnl_adjugate_fixed_h_
vnl_vector_fixed< T, 3 > vnl_cofactor_row1(vnl_vector_fixed< T, 3 > const &row2, vnl_vector_fixed< T, 3 > const &row3)
vnl_matrix< T > vnl_adjugate_asfixed(vnl_matrix< T > const &m)
vnl_matrix_fixed< T, 1, 1 > vnl_adjugate(vnl_matrix_fixed< T, 1, 1 > const &m)
vnl_matrix_fixed< T, 1, 1 > vnl_cofactor(vnl_matrix_fixed< T, 1, 1 > const &m)


Generated on Wed 12 Apr 2023 for elastix by doxygen 1.9.6 elastix logo