|
@@ -34,9 +34,6 @@ import { IllegalOperationError, IllegalArgumentError } from '../utils/errors';
|
34
|
34
|
/** A guess of the horizontal field-of-view of a typical camera, in degrees */
|
35
|
35
|
const HFOV_GUESS = 60; // https://developer.apple.com/library/archive/documentation/DeviceInformation/Reference/iOSDeviceCompatibility/Cameras/Cameras.html
|
36
|
36
|
|
37
|
|
-/** Number of iterations used to refine the estimated pose */
|
38
|
|
-const POSE_ITERATIONS = 30;
|
39
|
|
-
|
40
|
37
|
/** Convert degrees to radians */
|
41
|
38
|
const DEG2RAD = 0.017453292519943295; // pi / 180
|
42
|
39
|
|
|
@@ -47,17 +44,26 @@ const RAD2DEG = 57.29577951308232; // 180 / pi
|
47
|
44
|
const EPSILON = 1e-6;
|
48
|
45
|
|
49
|
46
|
/** Index of the horizontal focal length in the camera intrinsics matrix (column-major format) */
|
50
|
|
-export const FX = 0;
|
|
47
|
+const FX = 0;
|
51
|
48
|
|
52
|
49
|
/** Index of the vertical focal length in the camera intrinsics matrix */
|
53
|
|
-export const FY = 4;
|
|
50
|
+const FY = 4;
|
54
|
51
|
|
55
|
52
|
/** Index of the horizontal position of the principal point in the camera intrinsics matrix */
|
56
|
|
-export const U0 = 6;
|
|
53
|
+const U0 = 6;
|
57
|
54
|
|
58
|
55
|
/** Index of the vertical position of the principal point in the camera intrinsics matrix */
|
59
|
|
-export const V0 = 7;
|
|
56
|
+const V0 = 7;
|
|
57
|
+
|
|
58
|
+/** Number of iterations used to refine the estimated pose */
|
|
59
|
+const POSE_ITERATIONS = 30;
|
60
|
60
|
|
|
61
|
+/** Maximum number of iterations used when refining the translation vector */
|
|
62
|
+const REFINE_TRANSLATION_ITERATIONS = 15;
|
|
63
|
+
|
|
64
|
+/** Tolerance used to exit early when refining the translation vector */
|
|
65
|
+const REFINE_TRANSLATION_TOLERANCE = 1; // in units compatible with the size of the image sensor
|
|
66
|
+//FIXME make it a percentage?
|
61
|
67
|
|
62
|
68
|
|
63
|
69
|
|
|
@@ -66,8 +72,8 @@ export const V0 = 7;
|
66
|
72
|
*/
|
67
|
73
|
export class CameraModel
|
68
|
74
|
{
|
69
|
|
- /** size of the image sensor, in pixels */
|
70
|
|
- private _screenSize: SpeedySize;
|
|
75
|
+ /** size of the image */
|
|
76
|
+ private _imageSize: SpeedySize;
|
71
|
77
|
|
72
|
78
|
/** 3x4 camera matrix */
|
73
|
79
|
private _matrix: SpeedyMatrix;
|
|
@@ -88,7 +94,7 @@ export class CameraModel
|
88
|
94
|
*/
|
89
|
95
|
constructor()
|
90
|
96
|
{
|
91
|
|
- this._screenSize = Speedy.Size(0, 0);
|
|
97
|
+ this._imageSize = Speedy.Size(0, 0);
|
92
|
98
|
this._matrix = Speedy.Matrix.Eye(3, 4);
|
93
|
99
|
this._intrinsics = [1,0,0,0,1,0,0,0,1]; // 3x3 identity matrix
|
94
|
100
|
this._extrinsics = [1,0,0,0,1,0,0,0,1,0,0,0]; // 3x4 matrix [ R | t ] = [ I | 0 ] no rotation & no translation
|
|
@@ -97,23 +103,19 @@ export class CameraModel
|
97
|
103
|
|
98
|
104
|
/**
|
99
|
105
|
* Initialize the model
|
100
|
|
- * @param screenSize
|
|
106
|
+ * @param imageSize
|
101
|
107
|
*/
|
102
|
|
- init(screenSize: SpeedySize): void
|
|
108
|
+ init(imageSize: SpeedySize): void
|
103
|
109
|
{
|
104
|
|
- // validate
|
105
|
|
- if(screenSize.area() == 0)
|
106
|
|
- throw new IllegalArgumentError(`Can't initialize the camera model with screenSize = ${screenSize.toString()}`);
|
|
110
|
+ // log
|
|
111
|
+ Utils.log(`Initializing the camera model...`);
|
107
|
112
|
|
108
|
|
- // set the screen size
|
109
|
|
- this._screenSize.width = screenSize.width;
|
110
|
|
- this._screenSize.height = screenSize.height;
|
|
113
|
+ // set the imageSize
|
|
114
|
+ this._imageSize.width = imageSize.width;
|
|
115
|
+ this._imageSize.height = imageSize.height;
|
111
|
116
|
|
112
|
117
|
// reset the model
|
113
|
118
|
this.reset();
|
114
|
|
-
|
115
|
|
- // log
|
116
|
|
- Utils.log(`Initializing the camera model...`);
|
117
|
119
|
}
|
118
|
120
|
|
119
|
121
|
/**
|
|
@@ -128,31 +130,14 @@ export class CameraModel
|
128
|
130
|
/**
|
129
|
131
|
* Update the camera model
|
130
|
132
|
* @param homography 3x3 perspective transform
|
131
|
|
- * @param screenSize may change over time (e.g., when going from portrait to landscape or vice-versa)
|
132
|
133
|
* @returns promise that resolves to a camera matrix
|
133
|
134
|
*/
|
134
|
|
- update(homography: SpeedyMatrix, screenSize: SpeedySize): SpeedyPromise<SpeedyMatrix>
|
|
135
|
+ update(homography: SpeedyMatrix): SpeedyPromise<SpeedyMatrix>
|
135
|
136
|
{
|
136
|
137
|
// validate the shape of the homography
|
137
|
138
|
if(homography.rows != 3 || homography.columns != 3)
|
138
|
139
|
throw new IllegalArgumentError(`Camera model: provide a homography matrix`);
|
139
|
140
|
|
140
|
|
- // validate screenSize
|
141
|
|
- if(screenSize.area() == 0)
|
142
|
|
- throw new IllegalArgumentError(`Camera model: invalid screenSize = ${screenSize.toString()}`);
|
143
|
|
-
|
144
|
|
- // changed screen size?
|
145
|
|
- if(!this._screenSize.equals(screenSize)) {
|
146
|
|
- Utils.log(`Camera model: detected a change in screen size...`);
|
147
|
|
-
|
148
|
|
- // update the screen size
|
149
|
|
- this._screenSize.width = screenSize.width;
|
150
|
|
- this._screenSize.height = screenSize.height;
|
151
|
|
-
|
152
|
|
- // reset camera
|
153
|
|
- this.reset();
|
154
|
|
- }
|
155
|
|
-
|
156
|
141
|
// read the entries of the homography
|
157
|
142
|
const h = homography.read();
|
158
|
143
|
const h11 = h[0], h12 = h[3], h13 = h[6],
|
|
@@ -191,9 +176,7 @@ export class CameraModel
|
191
|
176
|
}
|
192
|
177
|
|
193
|
178
|
/**
|
194
|
|
- * The camera matrix that maps the 3D normalized space [-1,1]^3 to the
|
195
|
|
- * 2D AR screen space (measured in pixels)
|
196
|
|
- * @returns 3x4 camera matrix
|
|
179
|
+ * The 3x4 camera matrix
|
197
|
180
|
*/
|
198
|
181
|
get matrix(): SpeedyMatrix
|
199
|
182
|
{
|
|
@@ -201,26 +184,52 @@ export class CameraModel
|
201
|
184
|
}
|
202
|
185
|
|
203
|
186
|
/**
|
204
|
|
- * Camera intrinsics matrix
|
205
|
|
- * @returns 3x3 intrinsics matrix in column-major format
|
|
187
|
+ * The aspect ratio of the image
|
206
|
188
|
*/
|
207
|
|
- get intrinsics(): number[]
|
|
189
|
+ get aspectRatio(): number
|
208
|
190
|
{
|
209
|
|
- return this._intrinsics;
|
|
191
|
+ return this._imageSize.width / this._imageSize.height;
|
210
|
192
|
}
|
211
|
193
|
|
212
|
194
|
/**
|
213
|
|
- * Camera extrinsics matrix
|
214
|
|
- * @returns 3x4 extrinsics matrix [ R | t ] in column-major format
|
|
195
|
+ * Focal length in pixels (projection distance in the pinhole camera model)
|
|
196
|
+ * same as (focal length in mm) * (number of pixels per world unit in pixels/mm)
|
215
|
197
|
*/
|
216
|
|
- get extrinsics(): number[]
|
|
198
|
+ get focalLength(): number
|
217
|
199
|
{
|
218
|
|
- return this._extrinsics;
|
|
200
|
+ return this._intrinsics[FX]; // fx == fy
|
219
|
201
|
}
|
220
|
202
|
|
221
|
203
|
/**
|
|
204
|
+ * Horizontal field-of-view, given in radians
|
|
205
|
+ */
|
|
206
|
+ get fovx(): number
|
|
207
|
+ {
|
|
208
|
+ return 2 * Math.atan(this._intrinsics[U0] / this._intrinsics[FX]);
|
|
209
|
+ }
|
|
210
|
+
|
|
211
|
+ /**
|
|
212
|
+ * Vertical field-of-view, given in radians
|
|
213
|
+ */
|
|
214
|
+ get fovy(): number
|
|
215
|
+ {
|
|
216
|
+ return 2 * Math.atan(this._intrinsics[V0] / this._intrinsics[FY]);
|
|
217
|
+ }
|
|
218
|
+
|
|
219
|
+ /**
|
|
220
|
+ * Principal point
|
|
221
|
+ * @returns principal point
|
|
222
|
+ */
|
|
223
|
+ /*
|
|
224
|
+ principalPoint(): SpeedyPoint2
|
|
225
|
+ {
|
|
226
|
+ return Speedy.Point2(this._intrinsics[U0], this._intrinsics[V0]);
|
|
227
|
+ }
|
|
228
|
+ */
|
|
229
|
+
|
|
230
|
+ /**
|
222
|
231
|
* Convert coordinates from normalized space [-1,1]^3 to a
|
223
|
|
- * "3D pixel space" based on the dimensions of the AR screen.
|
|
232
|
+ * "3D pixel space" based on the dimensions of the image sensor.
|
224
|
233
|
*
|
225
|
234
|
* We perform a 180-degrees rotation around the x-axis so that
|
226
|
235
|
* it looks nicer (the y-axis grows downwards in image space).
|
|
@@ -236,8 +245,8 @@ export class CameraModel
|
236
|
245
|
*/
|
237
|
246
|
denormalizer(): SpeedyMatrix
|
238
|
247
|
{
|
239
|
|
- const w = this._screenSize.width / 2; // half width, in pixels
|
240
|
|
- const h = this._screenSize.height / 2; // half height, in pixels
|
|
248
|
+ const w = this._imageSize.width / 2; // half width, in pixels
|
|
249
|
+ const h = this._imageSize.height / 2; // half height, in pixels
|
241
|
250
|
const d = Math.min(w, h); // virtual unit length, in pixels
|
242
|
251
|
|
243
|
252
|
/*
|
|
@@ -258,49 +267,74 @@ export class CameraModel
|
258
|
267
|
}
|
259
|
268
|
|
260
|
269
|
/**
|
261
|
|
- * Size of the AR screen space, in pixels
|
262
|
|
- * @returns size in pixels
|
|
270
|
+ * Compute the view matrix in AR screen space, measured in pixels.
|
|
271
|
+ * This 4x4 matrix moves 3D points from world space to view space.
|
|
272
|
+ * We assume that the camera is looking in the direction of the
|
|
273
|
+ * negative z-axis (WebGL-friendly)
|
|
274
|
+ * @param camera
|
|
275
|
+ * @returns a 4x4 matrix describing a rotation and a translation
|
263
|
276
|
*/
|
264
|
|
- get screenSize(): SpeedySize
|
|
277
|
+ computeViewMatrix(): SpeedyMatrix
|
265
|
278
|
{
|
266
|
|
- return this._screenSize;
|
267
|
|
- }
|
|
279
|
+ const E = this._extrinsics;
|
268
|
280
|
|
269
|
|
- /**
|
270
|
|
- * Focal length in pixel units (projection distance in the pinhole camera model)
|
271
|
|
- * same as (focal length in mm) * (number of pixels per world unit in pixels/mm)
|
272
|
|
- * @returns focal length
|
273
|
|
- */
|
274
|
|
- get focalLength(): number
|
275
|
|
- {
|
276
|
|
- return this._intrinsics[FY]; // fx == fy
|
277
|
|
- }
|
|
281
|
+ /*
|
278
|
282
|
|
279
|
|
- /**
|
280
|
|
- * Horizontal field-of-view, given in radians
|
281
|
|
- * @returns vertical field-of-view
|
282
|
|
- */
|
283
|
|
- get fovx(): number
|
284
|
|
- {
|
285
|
|
- return 2 * Math.atan(this._intrinsics[U0] / this._intrinsics[FX]);
|
286
|
|
- }
|
|
283
|
+ // this is the view matrix in AR screen space, measured in pixels
|
|
284
|
+ // we augment the extrinsics matrix, making it 4x4 by adding a
|
|
285
|
+ // [ 0 0 0 1 ] row. Below, E is a 3x4 extrinsics matrix
|
|
286
|
+ const V = Speedy.Matrix(4, 4, [
|
|
287
|
+ E[0], E[1], E[2], 0,
|
|
288
|
+ E[3], E[4], E[5], 0,
|
|
289
|
+ E[6], E[7], E[8], 0,
|
|
290
|
+ E[9], E[10], E[11], 1
|
|
291
|
+ ]);
|
287
|
292
|
|
288
|
|
- /**
|
289
|
|
- * Vertical field-of-view, given in radians
|
290
|
|
- * @returns vertical field-of-view
|
291
|
|
- */
|
292
|
|
- get fovy(): number
|
293
|
|
- {
|
294
|
|
- return 2 * Math.atan(this._intrinsics[V0] / this._intrinsics[FY]);
|
|
293
|
+ // we premultiply V by F, which performs a rotation around the
|
|
294
|
+ // x-axis by 180 degrees, so that we get the 3D objects in front
|
|
295
|
+ // of the camera pointing in the direction of the negative z-axis
|
|
296
|
+ const F = Speedy.Matrix(4, 4, [
|
|
297
|
+ 1, 0, 0, 0,
|
|
298
|
+ 0,-1, 0, 0,
|
|
299
|
+ 0, 0,-1, 0,
|
|
300
|
+ 0, 0, 0, 1
|
|
301
|
+ ]);
|
|
302
|
+
|
|
303
|
+ Matrix F * V is matrix V with the second and third rows negated
|
|
304
|
+
|
|
305
|
+ */
|
|
306
|
+
|
|
307
|
+ return Speedy.Matrix(4, 4, [
|
|
308
|
+ E[0],-E[1],-E[2], 0,
|
|
309
|
+ E[3],-E[4],-E[5], 0,
|
|
310
|
+ E[6],-E[7],-E[8], 0,
|
|
311
|
+ E[9],-E[10],-E[11], 1
|
|
312
|
+ ]);
|
295
|
313
|
}
|
296
|
314
|
|
297
|
315
|
/**
|
298
|
|
- * Principal point
|
299
|
|
- * @returns principal point, in pixel coordinates
|
|
316
|
+ * Compute a perspective projection matrix for WebGL
|
|
317
|
+ * @param near distance of the near plane
|
|
318
|
+ * @param far distance of the far plane
|
300
|
319
|
*/
|
301
|
|
- principalPoint(): SpeedyPoint2
|
|
320
|
+ computeProjectionMatrix(near: number, far: number): SpeedyMatrix
|
302
|
321
|
{
|
303
|
|
- return Speedy.Point2(this._intrinsics[U0], this._intrinsics[V0]);
|
|
322
|
+ const K = this._intrinsics;
|
|
323
|
+
|
|
324
|
+ // we assume that the principal point is at the center of the image
|
|
325
|
+ const top = near * (K[V0] / K[FY]);
|
|
326
|
+ const right = near * (K[U0] / K[FX]);
|
|
327
|
+ const bottom = -top, left = -right; // symmetric frustum
|
|
328
|
+
|
|
329
|
+ // a derivation of this projection matrix can be found at
|
|
330
|
+ // https://www.songho.ca/opengl/gl_projectionmatrix.html
|
|
331
|
+ // http://learnwebgl.brown37.net/08_projections/projections_perspective.html
|
|
332
|
+ return Speedy.Matrix(4, 4, [
|
|
333
|
+ 2 * near / (right - left), 0, 0, 0,
|
|
334
|
+ 0, 2 * near / (top - bottom), 0, 0,
|
|
335
|
+ (right + left) / (right - left), (top + bottom) / (top - bottom), -(far + near) / (far - near), -1,
|
|
336
|
+ 0, 0, -2 * far * near / (far - near), 0
|
|
337
|
+ ]);
|
304
|
338
|
}
|
305
|
339
|
|
306
|
340
|
/**
|
|
@@ -321,10 +355,10 @@ export class CameraModel
|
321
|
355
|
*/
|
322
|
356
|
private _resetIntrinsics(): void
|
323
|
357
|
{
|
324
|
|
- const cameraWidth = Math.max(this._screenSize.width, this._screenSize.height); // portrait or landscape?
|
|
358
|
+ const cameraWidth = Math.max(this._imageSize.width, this._imageSize.height); // portrait or landscape?
|
325
|
359
|
|
326
|
|
- const u0 = this._screenSize.width / 2;
|
327
|
|
- const v0 = this._screenSize.height / 2;
|
|
360
|
+ const u0 = this._imageSize.width / 2;
|
|
361
|
+ const v0 = this._imageSize.height / 2;
|
328
|
362
|
const fx = (cameraWidth / 2) / Math.tan(DEG2RAD * HFOV_GUESS / 2);
|
329
|
363
|
const fy = fx;
|
330
|
364
|
|
|
@@ -401,10 +435,10 @@ export class CameraModel
|
401
|
435
|
|
402
|
436
|
// sanity check
|
403
|
437
|
if(Number.isNaN(scale))
|
404
|
|
- return Speedy.Matrix(3, 3, (new Array(9)).fill(Number.NaN));
|
|
438
|
+ return Speedy.Matrix(3, 3, (new Array<number>(9)).fill(Number.NaN));
|
405
|
439
|
|
406
|
440
|
// recover the rotation
|
407
|
|
- let r = new Array(6) as number[];
|
|
441
|
+ let r = new Array<number>(6);
|
408
|
442
|
r[0] = scale * h11;
|
409
|
443
|
r[1] = scale * h21;
|
410
|
444
|
r[2] = scale * h31;
|
|
@@ -412,8 +446,8 @@ export class CameraModel
|
412
|
446
|
r[4] = scale * h22;
|
413
|
447
|
r[5] = scale * h32;
|
414
|
448
|
|
415
|
|
- // refine the rotation
|
416
|
|
- r = this._refineRotation(r); // r is initially noisy
|
|
449
|
+ // refine the rotation (r is initially noisy)
|
|
450
|
+ r = this._refineRotation(r);
|
417
|
451
|
|
418
|
452
|
/*
|
419
|
453
|
|
|
@@ -438,7 +472,7 @@ export class CameraModel
|
438
|
472
|
scale /= h1norm2 + h2norm2;
|
439
|
473
|
|
440
|
474
|
// recover the translation
|
441
|
|
- let t = new Array(3) as number[];
|
|
475
|
+ let t = new Array<number>(3);
|
442
|
476
|
t[0] = scale * h13;
|
443
|
477
|
t[1] = scale * h23;
|
444
|
478
|
t[2] = scale * h33;
|
|
@@ -539,6 +573,8 @@ export class CameraModel
|
539
|
573
|
// compute the Cholesky decomposition LL' of the diagonal matrix D
|
540
|
574
|
// whose entries are the two eigenvalues of R'R and then invert L
|
541
|
575
|
const s1 = Math.sqrt(eigval1), s2 = Math.sqrt(eigval2); // singular values of R (pick s1 >= s2)
|
|
576
|
+
|
|
577
|
+ /*
|
542
|
578
|
const Linv = Speedy.Matrix(2, 2, [1/s1, 0, 0, 1/s2]); // L inverse
|
543
|
579
|
|
544
|
580
|
// compute the correction matrix C = Q * Linv * Q', where Q = [q1|q2]
|
|
@@ -550,6 +586,25 @@ export class CameraModel
|
550
|
586
|
// correct the rotation vectors r1 and r2 using C
|
551
|
587
|
const R = Speedy.Matrix(3, 2, [r11, r21, r31, r12, r22, r32]);
|
552
|
588
|
return Speedy.Matrix(R.times(C)).read();
|
|
589
|
+ */
|
|
590
|
+
|
|
591
|
+ // find C = Q * Linv * Q' manually
|
|
592
|
+ // [ a b ] is symmetric
|
|
593
|
+ // [ b c ]
|
|
594
|
+ const a = x1*x1/s1 + x2*x2/s2;
|
|
595
|
+ const b = x1*y1/s1 + x2*y2/s2;
|
|
596
|
+ const c = y1*y1/s1 + y2*y2/s2;
|
|
597
|
+
|
|
598
|
+ // find RC manually
|
|
599
|
+ return [
|
|
600
|
+ a*r11 + b*r12,
|
|
601
|
+ a*r21 + b*r22,
|
|
602
|
+ a*r31 + b*r32,
|
|
603
|
+
|
|
604
|
+ b*r11 + c*r12,
|
|
605
|
+ b*r21 + c*r22,
|
|
606
|
+ b*r31 + c*r32
|
|
607
|
+ ];
|
553
|
608
|
}
|
554
|
609
|
|
555
|
610
|
/**
|
|
@@ -587,7 +642,7 @@ export class CameraModel
|
587
|
642
|
const r21 = rot[1], r22 = rot[4];
|
588
|
643
|
const r31 = rot[2], r32 = rot[5];
|
589
|
644
|
|
590
|
|
- // sample points [ xi yi ]' in AR screen space
|
|
645
|
+ // sample points [ xi yi ]' in screen space
|
591
|
646
|
//const x = [ 0.5, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0, 0.5, 0.0 ];
|
592
|
647
|
//const y = [ 0.5, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 1.0, 0.5 ];
|
593
|
648
|
const x = [ 0.5, 0.0, 1.0, 1.0, 0.0 ];
|
|
@@ -595,17 +650,17 @@ export class CameraModel
|
595
|
650
|
const n = x.length;
|
596
|
651
|
const n3 = 3*n;
|
597
|
652
|
|
598
|
|
- const width = this._screenSize.width;
|
599
|
|
- const height = this._screenSize.height;
|
|
653
|
+ const width = this._imageSize.width;
|
|
654
|
+ const height = this._imageSize.height;
|
600
|
655
|
for(let i = 0; i < n; i++) {
|
601
|
656
|
x[i] *= width;
|
602
|
657
|
y[i] *= height;
|
603
|
658
|
}
|
604
|
659
|
|
605
|
660
|
// set auxiliary values: ai = H [ xi yi 1 ]'
|
606
|
|
- const a1 = new Array(n) as number[];
|
607
|
|
- const a2 = new Array(n) as number[];
|
608
|
|
- const a3 = new Array(n) as number[];
|
|
661
|
+ const a1 = new Array<number>(n);
|
|
662
|
+ const a2 = new Array<number>(n);
|
|
663
|
+ const a3 = new Array<number>(n);
|
609
|
664
|
for(let i = 0; i < n; i++) {
|
610
|
665
|
a1[i] = x[i] * h11 + y[i] * h12 + h13;
|
611
|
666
|
a2[i] = x[i] * h21 + y[i] * h22 + h23;
|
|
@@ -614,8 +669,8 @@ export class CameraModel
|
614
|
669
|
|
615
|
670
|
// we'll solve M t = v for t with linear least squares
|
616
|
671
|
// M: 3n x 3, v: 3n x 1, t: 3 x 1
|
617
|
|
- const m = new Array(3*n * 3) as number[];
|
618
|
|
- const v = new Array(3*n) as number[];
|
|
672
|
+ const m = new Array<number>(3*n * 3);
|
|
673
|
+ const v = new Array<number>(3*n);
|
619
|
674
|
for(let i = 0, k = 0; k < n; i += 3, k++) {
|
620
|
675
|
m[i] = m[i+n3+1] = m[i+n3+n3+2] = 0;
|
621
|
676
|
m[i+n3] = -(m[i+1] = a3[k]);
|
|
@@ -676,20 +731,18 @@ export class CameraModel
|
676
|
731
|
*/
|
677
|
732
|
|
678
|
733
|
// gradient descent: super lightweight implementation
|
679
|
|
- const r = new Array(3*n) as number[];
|
680
|
|
- const c = new Array(3) as number[];
|
681
|
|
- const Mc = new Array(3*n) as number[];
|
|
734
|
+ const r = new Array<number>(3*n);
|
|
735
|
+ const c = new Array<number>(3);
|
|
736
|
+ const Mc = new Array<number>(3*n);
|
682
|
737
|
|
683
|
738
|
// initial guess
|
684
|
|
- const t = new Array(3) as number[];
|
|
739
|
+ const t = new Array<number>(3);
|
685
|
740
|
t[0] = t0[0];
|
686
|
741
|
t[1] = t0[1];
|
687
|
742
|
t[2] = t0[2];
|
688
|
743
|
|
689
|
744
|
// iterate
|
690
|
|
- const MAX_ITERATIONS = 15;
|
691
|
|
- const TOLERANCE = 1;
|
692
|
|
- for(let it = 0; it < MAX_ITERATIONS; it++) {
|
|
745
|
+ for(let it = 0; it < REFINE_TRANSLATION_ITERATIONS; it++) {
|
693
|
746
|
//console.log("it",it+1);
|
694
|
747
|
|
695
|
748
|
// compute residual r = Mt - v
|
|
@@ -719,7 +772,7 @@ export class CameraModel
|
719
|
772
|
for(let i = 0; i < 3; i++)
|
720
|
773
|
num += c[i] * c[i];
|
721
|
774
|
//console.log("c'c=",num);
|
722
|
|
- if(num < TOLERANCE)
|
|
775
|
+ if(num < REFINE_TRANSLATION_TOLERANCE)
|
723
|
776
|
break;
|
724
|
777
|
|
725
|
778
|
// compute (Mc)'(Mc)
|
|
@@ -778,7 +831,7 @@ export class CameraModel
|
778
|
831
|
}
|
779
|
832
|
|
780
|
833
|
/**
|
781
|
|
- * Estimate the pose [ R | t ] given a homography in AR screen space
|
|
834
|
+ * Estimate the pose [ R | t ] given a homography in sensor space
|
782
|
835
|
* @param homography must be valid
|
783
|
836
|
* @returns 3x4 matrix
|
784
|
837
|
*/
|