pyvale 2025.7.1__cp311-cp311-musllinux_1_2_aarch64.whl → 2025.7.2__cp311-cp311-musllinux_1_2_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyvale might be problematic. Click here for more details.

@@ -30,7 +30,7 @@ namespace util {
30
30
  std::vector<double> ftol_arr;
31
31
  std::vector<double> xtol_arr;
32
32
  std::vector<double> cost_arr;
33
- std::vector<bool> conv_arr;
33
+ std::vector<uint8_t> conv_arr;
34
34
  bool at_end;
35
35
 
36
36
 
@@ -189,12 +189,16 @@ namespace util {
189
189
  }
190
190
  }
191
191
 
192
- // When partial count num of px in roi
192
+ // When partial count num of px in roi. if its outside
193
+ // the image its still not valid
193
194
  else {
194
- if (is_valid_in_dims(px_x, px_y, px_hori, px_vert) &&
195
- is_valid_in_roi(px_x, px_y, px_hori, px_vert, img_roi)) {
195
+ if (is_valid_in_roi(px_x, px_y, px_hori, px_vert, img_roi)) {
196
196
  valid_count++;
197
197
  }
198
+ if (!is_valid_in_dims(px_x, px_y, px_hori, px_vert)) {
199
+ valid = false;
200
+ break;
201
+ }
198
202
  }
199
203
  }
200
204
 
@@ -202,7 +206,7 @@ namespace util {
202
206
  }
203
207
 
204
208
  // TODO: this is hardcoded so that atleast 70% of pixels in subset must be in ROI
205
- if (partial) {
209
+ if (partial && valid) {
206
210
  if (valid_count >= (ss_size*ss_size) * (0.70)) {
207
211
  valid = true;
208
212
  } else {
@@ -394,23 +398,44 @@ namespace util {
394
398
  int idx = img * ssdata.num + i;
395
399
  //int idx_p = num_params*idx;
396
400
 
401
+ // if the subset has not converged, set values to nan
402
+ if (!saveconf.output_unconverged && !conv_arr[idx]) {
403
+ u_arr[idx] = NAN;
404
+ v_arr[idx] = NAN;
405
+ for (int p = 0; p < num_params; p++){
406
+ p_arr[num_params*idx+p] = NAN;
407
+ }
408
+ cost_arr[idx] = NAN;
409
+ ftol_arr[idx] = NAN;
410
+ xtol_arr[idx] = NAN;
411
+ }
412
+
413
+
397
414
  double mag = std::sqrt(u_arr[idx]*u_arr[idx]+
398
415
  v_arr[idx]*v_arr[idx]);
399
416
 
400
417
  // convert from corner to centre subset coords
401
418
  double ss_x = ssdata.coords[2*i ] + static_cast<double>(ssdata.size)/2.0 - 0.5;
402
419
  double ss_y = ssdata.coords[2*i+1] + static_cast<double>(ssdata.size)/2.0 - 0.5;
420
+
403
421
 
404
422
  write_int(outfile, ss_x);
405
423
  write_int(outfile, ss_y);
406
424
  write_dbl(outfile, u_arr[idx]);
407
425
  write_dbl(outfile, v_arr[idx]);
408
426
  write_dbl(outfile, mag);
409
- write_bool(outfile, conv_arr[idx]);
427
+ write_uint8t(outfile, conv_arr[idx]);
410
428
  write_dbl(outfile, cost_arr[idx]);
411
429
  write_dbl(outfile, ftol_arr[idx]);
412
430
  write_dbl(outfile, xtol_arr[idx]);
413
431
  write_int(outfile, niter_arr[idx]);
432
+
433
+ if (saveconf.shape_params) {
434
+ for (int p = 0; p < num_params; p++){
435
+ write_dbl(outfile, p_arr[num_params*idx+p]);
436
+ }
437
+ }
438
+
414
439
  }
415
440
 
416
441
  outfile.close();
@@ -429,7 +454,18 @@ namespace util {
429
454
  outfile << "cost" << delimiter;
430
455
  outfile << "ftol" << delimiter;
431
456
  outfile << "xtol" << delimiter;
432
- outfile << "num_iterations\n";
457
+ outfile << "num_iterations";
458
+
459
+ // column headers for shape parameters
460
+ if (saveconf.shape_params) {
461
+ for (int p = 0; p < num_params; p++){
462
+ outfile << delimiter;
463
+ outfile << "shape_p" << p;
464
+ }
465
+ }
466
+
467
+ // newline after headers
468
+ outfile << "\n";
433
469
 
434
470
  for (int i = 0; i < ssdata.num; i++) {
435
471
 
@@ -440,6 +476,18 @@ namespace util {
440
476
  double ss_x = ssdata.coords[2*i ] + static_cast<double>(ssdata.size)/2.0 - 0.5;
441
477
  double ss_y = ssdata.coords[2*i+1] + static_cast<double>(ssdata.size)/2.0 - 0.5;
442
478
 
479
+ // if the subset has not converged, set values to nan
480
+ if (!saveconf.output_unconverged && !conv_arr[idx]) {
481
+ u_arr[idx] = NAN;
482
+ v_arr[idx] = NAN;
483
+ for (int p = 0; p < num_params; p++){
484
+ p_arr[num_params*idx+p] = NAN;
485
+ }
486
+ cost_arr[idx] = NAN;
487
+ ftol_arr[idx] = NAN;
488
+ xtol_arr[idx] = NAN;
489
+ }
490
+
443
491
 
444
492
  outfile << ss_x << delimiter;
445
493
  outfile << ss_y << delimiter;
@@ -447,14 +495,24 @@ namespace util {
447
495
  outfile << v_arr[idx] << delimiter;
448
496
  outfile << sqrt(u_arr[idx]*u_arr[idx]+
449
497
  v_arr[idx]*v_arr[idx]) << delimiter;
450
- //for (int p = 0; p < num_params; p++){
451
- // outfile << p_arr[idx_p+p] << delimiter;
452
- //}
453
- outfile << conv_arr[idx] << delimiter;
498
+ outfile << static_cast<int>(conv_arr[idx]) << delimiter;
454
499
  outfile << cost_arr[idx] << delimiter;
455
500
  outfile << ftol_arr[idx] << delimiter;
456
501
  outfile << xtol_arr[idx] << delimiter;
457
- outfile << niter_arr[idx] << "\n";
502
+ outfile << niter_arr[idx];
503
+
504
+ // write shape parameters if requested
505
+ if (saveconf.shape_params) {
506
+ for (int p = 0; p < num_params; p++){
507
+ outfile << delimiter;
508
+ outfile << p_arr[num_params*idx+p];
509
+ }
510
+ }
511
+
512
+ // newline after each subset
513
+ outfile << "\n";
514
+
515
+
458
516
  }
459
517
  outfile.close();
460
518
  }
@@ -532,8 +590,8 @@ namespace util {
532
590
  }
533
591
 
534
592
  void create_progress_bar(indicators::ProgressBar &bar,
535
- const std::vector<std::string> &filenames,
536
- const int img_num, const int num_ss){
593
+ const std::string &bar_title,
594
+ const int num_ss){
537
595
  //Hide cursor
538
596
  indicators::show_console_cursor(false);
539
597
  bar.set_option(indicators::option::BarWidth{50});
@@ -542,22 +600,20 @@ namespace util {
542
600
  bar.set_option(indicators::option::Lead{"#"});
543
601
  bar.set_option(indicators::option::Remainder{"-"});
544
602
  bar.set_option(indicators::option::End{"]"});
545
- bar.set_option(indicators::option::PrefixText{filenames[img_num]});
603
+ bar.set_option(indicators::option::PrefixText{bar_title});
546
604
  bar.set_option(indicators::option::ShowPercentage{true});
547
605
  bar.set_option(indicators::option::ShowElapsedTime{true});
548
606
  }
549
607
 
550
- void update_progress_bar(indicators::ProgressBar &bar, int i, int num_ss, std::atomic<int> &prev_pct) {
551
- int curr_pct = static_cast<float>(i) / static_cast<float>(num_ss) * 100;
552
- int expected = prev_pct.load();
608
+ void update_progress_bar(indicators::ProgressBar &bar, int i, int num_ss, int &prev_pct) {
609
+ int curr_pct = static_cast<int>((static_cast<float>(i) / num_ss) * 100.0f);
553
610
 
554
- // Only update bar if we've passed the previous percentage
555
- if (curr_pct > expected && prev_pct.compare_exchange_strong(expected, curr_pct)) {
556
- #pragma omp critical
557
- bar.set_progress(curr_pct);
611
+ // Only update if we've passed a new percentage
612
+ if (curr_pct > prev_pct) {
613
+ prev_pct = curr_pct;
614
+ bar.set_progress(curr_pct);
558
615
  }
559
616
  }
560
617
 
561
618
 
562
-
563
619
  }
pyvale/dic2d.py CHANGED
@@ -6,9 +6,14 @@
6
6
 
7
7
 
8
8
 
9
+ import os
10
+ import io
11
+ import sys
9
12
  import numpy as np
10
13
  from pathlib import Path
11
14
 
15
+ import pybind11
16
+
12
17
  # import cython module
13
18
  import pyvale.dic2dcpp as dic2dcpp
14
19
  import pyvale.dicchecks as dicchecks
@@ -17,7 +22,7 @@ import pyvale.dicchecks as dicchecks
17
22
  def dic_2d(reference: np.ndarray | str | Path,
18
23
  deformed: np.ndarray | str | Path,
19
24
  roi_mask: np.ndarray,
20
- seed: list[int],
25
+ seed: list[int] | list[np.int32] | np.ndarray,
21
26
  subset_size: int = 21,
22
27
  subset_step: int = 10,
23
28
  correlation_criteria: str="ZNSSD",
@@ -27,6 +32,7 @@ def dic_2d(reference: np.ndarray | str | Path,
27
32
  opt_precision: float=0.001,
28
33
  opt_threshold: float=0.9,
29
34
  bf_threshold: float=0.6,
35
+ num_threads: int | None = None,
30
36
  max_displacement: int=128,
31
37
  scanning_method: str="RG",
32
38
  fft_mad: bool=False,
@@ -35,7 +41,11 @@ def dic_2d(reference: np.ndarray | str | Path,
35
41
  output_basepath: Path | str = "./",
36
42
  output_binary: bool=False,
37
43
  output_prefix: str="dic_results_",
38
- output_delimiter: str=",") -> None:
44
+ output_delimiter: str=",",
45
+ output_unconverged: bool=False,
46
+ output_shape_params: bool=False,
47
+ debug_level: int=0) -> None:
48
+
39
49
  """
40
50
  Perform 2D Digital Image Correlation (DIC) between a reference image and one or more deformed images.
41
51
 
@@ -51,7 +61,7 @@ def dic_2d(reference: np.ndarray | str | Path,
51
61
  The deformed image(s) (3D array for multiple images) or path/pattern to image files.
52
62
  roi_mask : np.ndarray
53
63
  A binary mask indicating the Region of Interest (ROI) for analysis (same size as image).
54
- seed : list of int, optional
64
+ seed : list[int], list[np.int32] or np.ndarray
55
65
  Coordinates `[x, y]` of the seed point for Reliability-Guided (RG) scanning, default is empty.
56
66
  subset_size : int, optional
57
67
  Size of the square subset window in pixels (default: 21).
@@ -70,6 +80,8 @@ def dic_2d(reference: np.ndarray | str | Path,
70
80
  Precision threshold for iterative optimization convergence (default: 0.001).
71
81
  opt_threshold : float, optional
72
82
  Minimum correlation improvement threshold to continue iterations (default: 0.9).
83
+ num_threads : int, optional
84
+ Number of threads to use for parallel computation (default: None, uses all available).
73
85
  bf_threshold : float, optional
74
86
  Correlation threshold used in rigid bruteforce check for a subset to be considered a
75
87
  good match(default: 0.6).
@@ -103,6 +115,12 @@ def dic_2d(reference: np.ndarray | str | Path,
103
115
  changed to ".csv" or ".dic2d" depending on whether outputting as a binary.
104
116
  output_delimiter : str, optional
105
117
  Delimiter used in text output files (default: ",").
118
+ output_unconverged : bool, optional
119
+ If True, subset results as they were for the final iteration of the optimization
120
+ that did not converge will be saved (default: False).
121
+ output_shape_params : bool, optional
122
+ If True, all shape parameters will be saved in the output files (default: False).
123
+ debug_level:
106
124
 
107
125
  Returns
108
126
  -------
@@ -151,6 +169,7 @@ def dic_2d(reference: np.ndarray | str | Path,
151
169
  config.filenames = filenames
152
170
  config.fft_mad = fft_mad
153
171
  config.fft_mad_scale = fft_mad_scale
172
+ config.debug_level = debug_level
154
173
 
155
174
  # assigning c++ struct vals for save config
156
175
  saveconf = dic2dcpp.SaveConfig()
@@ -159,6 +178,14 @@ def dic_2d(reference: np.ndarray | str | Path,
159
178
  saveconf.prefix = output_prefix
160
179
  saveconf.delimiter = output_delimiter
161
180
  saveconf.at_end = output_at_end
181
+ saveconf.output_unconverged = output_unconverged
182
+ saveconf.shape_params = output_shape_params
183
+
184
+
185
+ #set the number of OMP threads
186
+ if num_threads is not None:
187
+ dic2dcpp.set_num_threads(num_threads)
162
188
 
163
189
  # calling the c++ dic engine
164
- dic2dcpp.dic_engine(ref_arr, def_arr, roi_c, config, saveconf)
190
+ with dic2dcpp.ostream_redirect(stdout=True, stderr=True):
191
+ dic2dcpp.dic_engine(ref_arr, def_arr, roi_c, config, saveconf)
pyvale/dicchecks.py CHANGED
@@ -55,16 +55,18 @@ def check_output_directory(output_basepath: str,
55
55
 
56
56
  if conflicting_files:
57
57
  conflicting_files.sort()
58
- print("The following output files already exist and may be overwritten:")
58
+ print("WARNING: The following output files already exist and may be overwritten:")
59
59
  for f in conflicting_files:
60
60
  print(f" - {os.path.join(output_basepath, f)}")
61
61
  print("")
62
62
 
63
- user_input = input("Do you want to continue? (y/n): ").strip().lower()
64
63
 
65
- if user_input not in ("y", "yes", "Y", "YES"):
66
- print("Aborting to avoid overwriting data in output directory.")
67
- exit(0)
64
+ ###### TURNING USER INPUT OFF FOR NOW ######
65
+ # user_input = input("Do you want to continue? (y/n): ").strip().lower()
66
+
67
+ # if user_input not in ("y", "yes", "Y", "YES"):
68
+ # print("Aborting to avoid overwriting data in output directory.")
69
+ # exit(0)
68
70
 
69
71
 
70
72
  def check_correlation_criteria(correlation_criteria: str) -> None:
@@ -247,7 +249,7 @@ def check_subsets(subset_size: int, subset_step: int) -> None:
247
249
 
248
250
 
249
251
 
250
- def check_and_update_rg_seed(seed: list[int], roi_mask: np.ndarray, scanning_method: str, px_hori: int, px_vert: int, subset_size: int, subset_step: int) -> list[int]:
252
+ def check_and_update_rg_seed(seed: list[int] | list[np.int32] | np.ndarray, roi_mask: np.ndarray, scanning_method: str, px_hori: int, px_vert: int, subset_size: int, subset_step: int) -> list[int]:
251
253
  """
252
254
  Validate and update the region-growing seed location to align with image bounds and subset spacing.
253
255
 
@@ -260,7 +262,7 @@ def check_and_update_rg_seed(seed: list[int], roi_mask: np.ndarray, scanning_met
260
262
 
261
263
  Parameters
262
264
  ----------
263
- seed : list of int
265
+ seed : list[int], list[np.int32] or np.ndarray
264
266
  The initial seed coordinates as a list of two integers: [x, y].
265
267
  roi_mask : np.ndarray
266
268
  A 2D binary mask (same size as the image) indicating the region of interest.
@@ -287,11 +289,19 @@ def check_and_update_rg_seed(seed: list[int], roi_mask: np.ndarray, scanning_met
287
289
  if scanning_method != "RG":
288
290
  return [0,0]
289
291
 
290
- if not (isinstance(seed, list) and len(seed) == 2 and all(isinstance(coord, int) for coord in seed)):
291
- raise ValueError("Reliability Guided seed is either missing or has been defined incorrectly. must be a list of two integers: seed=[x, y]")
292
+ if (len(seed) != 2):
293
+ raise ValueError(f"Reliability Guided seed does not have two elements: " \
294
+ f"seed={seed}. Seed " \
295
+ f" must be a list of two integers: seed=[x, y]")
296
+
297
+ if not isinstance(seed, (list, np.ndarray)) or not all(isinstance(coord, (int, np.int32)) for coord in seed):
298
+ raise ValueError("Reliability Guided seed must be a list of two integers: seed=[x, y]")
292
299
 
293
300
  x, y = seed
294
301
 
302
+ if x < 0 or x >= px_hori or y < 0 or y >= px_vert:
303
+ raise ValueError(f"Seed ({x}, {y}) goes outside the image bounds: ({px_hori}, {px_vert})")
304
+
295
305
  corner_x = x - subset_size//2
296
306
  corner_y = y - subset_size//2
297
307
 
@@ -302,17 +312,11 @@ def check_and_update_rg_seed(seed: list[int], roi_mask: np.ndarray, scanning_met
302
312
  new_x = round_to_step(corner_x, subset_step)
303
313
  new_y = round_to_step(corner_y, subset_step)
304
314
 
305
-
306
- # Clamp to image bounds
307
- new_x = min(max(new_x, 0), px_hori - 1)
308
- new_y = min(max(new_y, 0), px_vert - 1)
309
-
310
315
  # check if all pixel values within the seed location are within the ROI
311
316
  # seed coordinates are the central pixel to the subset
312
317
  max_x = new_x + subset_size//2+1
313
318
  max_y = new_y + subset_size//2+1
314
319
 
315
-
316
320
  # Check if all pixel values in the ROI are valid
317
321
  for i in range(corner_x, max_x):
318
322
  for j in range(corner_y, max_y):
@@ -435,9 +439,17 @@ def check_and_get_images(reference: np.ndarray | str | Path,
435
439
  ref_arr = reference
436
440
  def_arr = deformed
437
441
 
438
- if (reference.shape != deformed[0].shape or reference.shape != roi.shape):
442
+ # user might only pass a single deformed image. need to convert to 'stack'
443
+ if (reference.shape == deformed.shape):
444
+ def_arr = def_arr.reshape((1,def_arr.shape[0],def_arr.shape[1]))
445
+
446
+ elif (reference.shape != deformed[0].shape or reference.shape != roi.shape):
439
447
  raise ValueError(f"Shape mismatch: reference {reference.shape}, "
440
448
  f"deformed[0] {deformed[0].shape}, roi {roi.shape}")
449
+
450
+
451
+ # need to set some dummy filenames in the case that the user passes numpy arrays
452
+ filenames = [f"deformed image {i}" for i in range(def_arr.shape[0])]
441
453
 
442
454
  # it might be the case that the roi has been manipulated prior to DIC run
443
455
  # and therefore we need to to prevent the roi mask from being a 'view'
pyvale/dicdataimport.py CHANGED
@@ -23,7 +23,7 @@ calculations.
23
23
  def dic_data_import(data: str | Path,
24
24
  binary: bool = False,
25
25
  layout: str = "matrix",
26
- delimiter: str = " ") -> DICResults:
26
+ delimiter: str = ",") -> DICResults:
27
27
  """
28
28
  Import DIC result data from human readable text or binary files.
29
29
 
@@ -93,18 +93,67 @@ def dic_data_import(data: str | Path,
93
93
  raise ValueError("Mismatch in coordinates across frames.")
94
94
  frames.append(f)
95
95
 
96
- # Stack fields into arrays
97
- arrays = [np.stack([frame[i] for frame in frames]) for i in range(8)]
96
+ # Stack results (except ss_x and ss_y) into arrays
97
+ arrays = [np.stack([frame[i] for frame in frames]) for i in range(len(fields))]
98
98
 
99
99
  if layout == "matrix":
100
+
101
+ # convert x and y data to meshgrid
100
102
  x_unique = np.unique(ss_x_ref)
101
103
  y_unique = np.unique(ss_y_ref)
102
104
  X, Y = np.meshgrid(x_unique, y_unique)
103
105
  shape = (len(files), len(y_unique), len(x_unique))
106
+
107
+
104
108
  arrays = [to_grid(a,shape,ss_x_ref, ss_y_ref, x_unique,y_unique) for a in arrays]
105
- return DICResults(X, Y, *arrays, filenames)
109
+
110
+
111
+ # sorting out shape function parameters if they are present in the files
112
+ current_shape = arrays[0].shape # (file,x,y)
113
+ shape_params = np.zeros(())
114
+
115
+ # rigid
116
+ if len(fields) == 10:
117
+ shape_params = np.zeros(current_shape+(2,))
118
+ shape_params[:,:,:,0] = arrays[8]
119
+ shape_params[:,:,:,1] = arrays[9]
120
+ if len(fields) == 14:
121
+ shape_params = np.zeros(current_shape+(6,))
122
+ shape_params[:,:,:,0] = arrays[8]
123
+ shape_params[:,:,:,1] = arrays[9]
124
+ shape_params[:,:,:,2] = arrays[10]
125
+ shape_params[:,:,:,3] = arrays[11]
126
+ shape_params[:,:,:,4] = arrays[12]
127
+ shape_params[:,:,:,5] = arrays[13]
128
+
129
+
130
+
131
+
132
+ return DICResults(X, Y, arrays[0], arrays[1], arrays[2], arrays[3],
133
+ arrays[4], arrays[5], arrays[6], arrays[7],
134
+ shape_params, filenames)
135
+ # column layout
106
136
  else:
107
- return DICResults(ss_x_ref, ss_y_ref, *arrays, filenames)
137
+
138
+ shape_params = np.zeros(())
139
+ current_shape = arrays[0].shape # (file,(x,y))
140
+ # rigid
141
+ if len(fields) == 10:
142
+ shape_params = np.zeros(current_shape+(2,))
143
+ shape_params[:,:,0] = arrays[8]
144
+ shape_params[:,:,1] = arrays[9]
145
+ if len(fields) == 14:
146
+ shape_params = np.zeros(current_shape+(6,))
147
+ shape_params[:,:,0] = arrays[8]
148
+ shape_params[:,:,1] = arrays[9]
149
+ shape_params[:,:,2] = arrays[10]
150
+ shape_params[:,:,3] = arrays[11]
151
+ shape_params[:,:,4] = arrays[12]
152
+ shape_params[:,:,5] = arrays[13]
153
+
154
+ return DICResults(ss_x_ref, ss_y_ref, arrays[0], arrays[1], arrays[2], arrays[3],
155
+ arrays[4], arrays[5], arrays[6], arrays[7],
156
+ shape_params, filenames)
108
157
 
109
158
 
110
159
 
@@ -118,6 +167,8 @@ def read_binary(file: str, delimiter: str):
118
167
  - 2 × int32 (subset coordinates)
119
168
  - 6 × float64 (u, v, match quality, cost, ftol, xtol)
120
169
  - 1 × int32 (number of iterations)
170
+ - 1 × uint8 (convergence flag)
171
+ - 2 or 6 × float64 (shape parameters)
121
172
 
122
173
  Parameters
123
174
  ----------
@@ -138,26 +189,74 @@ def read_binary(file: str, delimiter: str):
138
189
  ValueError
139
190
  If the binary file size does not align with expected row size.
140
191
  """
141
-
142
- row_size = (3 * 4 + 6 * 8)
192
+
193
+ # row size can either be 3×4 + 6×8 + 1 = 61 bytes (without shape params)
194
+ # or 3×4 + 6×8 + 1 + 6×8 = 109 bytes (with shape params)
143
195
  with open(file, "rb") as f:
144
196
  raw = f.read()
145
- if len(raw) % row_size != 0:
146
- raise ValueError("Binary file has incomplete rows.")
197
+
198
+ has_shape_params = False
199
+ has_rigid_params = False
200
+ has_affine_params = False
201
+
202
+ row_size_basic = 3 * 4 + 6 * 8 + 1 # 61 bytes
203
+ row_size_with_rigid = row_size_basic + 2 * 8 # 77 bytes
204
+ row_size_with_affine = row_size_basic + 6 * 8 # 109 bytes
205
+
206
+ if len(raw) % row_size_basic == 0:
207
+ row_size = row_size_basic
208
+ has_shape_params = False
209
+ elif len(raw) % row_size_with_rigid == 0:
210
+ has_shape_params = True
211
+ row_size = row_size_with_rigid
212
+ has_rigid_params = True
213
+ has_affine_params = False
214
+ elif len(raw) % row_size_with_affine == 0:
215
+ has_shape_params = True
216
+ row_size = row_size_with_affine
217
+ has_affine_params = True
218
+ has_rigid_params = False
219
+ else:
220
+ raise ValueError(
221
+ f"Binary file has incomplete rows: {file}. "
222
+ f"Expected row size: 65 ((without shape params), "
223
+ f"81 (with rigid shape params) bytes, "
224
+ f"109 (with affine shape params). "
225
+ f"Actual size: {len(raw)} bytes."
226
+ )
227
+
147
228
  rows = len(raw) // row_size
148
229
  arr = np.frombuffer(raw, dtype=np.uint8).reshape(rows, row_size)
149
- def extract(col, dtype, start): return np.frombuffer(arr[:, start:start+col], dtype=dtype)
150
- ss_x = extract(4, np.int32, 0)
151
- ss_y = extract(4, np.int32, 4)
152
- u = extract(8, np.float64, 8)
153
- v = extract(8, np.float64, 16)
154
- m = extract(8, np.float64, 24)
155
- conv = extract(1, np.bool_, 25)
156
- cost = extract(8, np.float64, 33)
157
- ftol = extract(8, np.float64, 41)
158
- xtol = extract(8, np.float64, 49)
230
+
231
+ def extract(col, dtype, start):
232
+ return np.frombuffer(arr[:, start:start+col].copy(), dtype=dtype)
233
+
234
+ ss_x = extract(4, np.int32, 0)
235
+ ss_y = extract(4, np.int32, 4)
236
+ u = extract(8, np.float64, 8)
237
+ v = extract(8, np.float64, 16)
238
+ m = extract(8, np.float64, 24)
239
+ conv = extract(1, np.uint8, 32).astype(bool)
240
+ cost = extract(8, np.float64, 33)
241
+ ftol = extract(8, np.float64, 41)
242
+ xtol = extract(8, np.float64, 49)
159
243
  niter = extract(4, np.int32, 57)
160
- return ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter
244
+
245
+ if has_shape_params:
246
+ if has_rigid_params:
247
+ p0 = extract(8, np.float64, 61)
248
+ p1 = extract(8, np.float64, 69)
249
+ return ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter, p0,p1
250
+ if has_affine_params:
251
+ p0 = extract(8, np.float64, 61)
252
+ p1 = extract(8, np.float64, 69)
253
+ p2 = extract(8, np.float64, 77)
254
+ p3 = extract(8, np.float64, 85)
255
+ p4 = extract(8, np.float64, 93)
256
+ p5 = extract(8, np.float64, 101)
257
+ return ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter, p0,p1,p2,p3,p4,p5
258
+ else:
259
+ return ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter
161
260
 
162
261
 
163
262
 
@@ -168,6 +267,7 @@ def read_text(file: str, delimiter: str):
168
267
 
169
268
  Expects at least 9 columns:
170
269
  [ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter]
270
+ Could also include shape parameters if present.
171
271
 
172
272
  Parameters
173
273
  ----------
@@ -181,7 +281,7 @@ def read_text(file: str, delimiter: str):
181
281
  -------
182
282
  tuple of np.ndarray
183
283
  Arrays corresponding to:
184
- (ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter)
284
+ (ss_x, ss_y, u, v, m, conv, cost, ftol, xtol, niter, shape_params)
185
285
 
186
286
  Raises
187
287
  ------
@@ -190,18 +290,41 @@ def read_text(file: str, delimiter: str):
190
290
  """
191
291
 
192
292
  data = np.loadtxt(file, delimiter=delimiter, skiprows=1)
293
+
193
294
  if data.shape[1] < 9:
194
295
  raise ValueError("Text data must have at least 9 columns.")
195
- return (
196
- data[:, 0].astype(np.int32), # ss_x
197
- data[:, 1].astype(np.int32), # ss_y
198
- data[:, 2], data[:, 3], data[:, 4], # u, v, mag
199
- data[:, 5].astype(np.bool_), # convergence
200
- data[:, 6], data[:, 7], data[:,8], # cost, ftol, xtol
201
- data[:, 9].astype(np.int32) #niter
202
- )
203
-
204
-
296
+
297
+ if data.shape[1] == 10:
298
+ return (
299
+ data[:, 0].astype(np.int32), # ss_x
300
+ data[:, 1].astype(np.int32), # ss_y
301
+ data[:, 2], data[:, 3], data[:, 4], # u, v, mag
302
+ data[:, 5].astype(np.bool_), # convergence
303
+ data[:, 6], data[:, 7], data[:,8], # cost, ftol, xtol
304
+ data[:, 9].astype(np.int32) #niter
305
+ )
306
+ #rigid
307
+ elif data.shape[1]==12:
308
+ return (
309
+ data[:, 0].astype(np.int32), # ss_x
310
+ data[:, 1].astype(np.int32), # ss_y
311
+ data[:, 2], data[:, 3], data[:, 4], # u, v, mag
312
+ data[:, 5].astype(np.bool_), # convergence
313
+ data[:, 6], data[:, 7], data[:,8], # cost, ftol, xtol
314
+ data[:, 9].astype(np.int32), #niter
315
+ data[:,10], data[:,11] # shape params (rigid)
316
+ )
317
+ #affine
318
+ elif data.shape[1]==16:
319
+ return (
320
+ data[:, 0].astype(np.int32), # ss_x
321
+ data[:, 1].astype(np.int32), # ss_y
322
+ data[:, 2], data[:, 3], data[:, 4], # u, v, mag
323
+ data[:, 5].astype(np.bool_), # convergence
324
+ data[:, 6], data[:, 7], data[:,8], # cost, ftol, xtol
325
+ data[:, 9].astype(np.int32), #niter
326
+ data[:,10], data[:,11], data[:,12], data[:,13], data[:,14], data[:,15] # shape params (affine)
327
+ )
205
328
 
206
329
 
207
330