halib 0.1.91__py3-none-any.whl → 0.1.93__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- halib/research/plot.py +684 -201
- halib/utils/video.py +6 -0
- {halib-0.1.91.dist-info → halib-0.1.93.dist-info}/METADATA +6 -2
- {halib-0.1.91.dist-info → halib-0.1.93.dist-info}/RECORD +7 -7
- {halib-0.1.91.dist-info → halib-0.1.93.dist-info}/WHEEL +0 -0
- {halib-0.1.91.dist-info → halib-0.1.93.dist-info}/licenses/LICENSE.txt +0 -0
- {halib-0.1.91.dist-info → halib-0.1.93.dist-info}/top_level.txt +0 -0
halib/research/plot.py
CHANGED
|
@@ -1,24 +1,28 @@
|
|
|
1
|
+
import ast
|
|
1
2
|
import os
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
import click
|
|
6
|
+
import base64
|
|
2
7
|
import pandas as pd
|
|
8
|
+
|
|
9
|
+
from PIL import Image
|
|
10
|
+
from io import BytesIO
|
|
11
|
+
|
|
3
12
|
import plotly.express as px
|
|
4
|
-
from
|
|
5
|
-
from ..common import now_str, norm_str, ConsoleLog
|
|
13
|
+
from ..common import now_str
|
|
6
14
|
from ..filetype import csvfile
|
|
15
|
+
import plotly.graph_objects as go
|
|
7
16
|
from ..system import filesys as fs
|
|
8
|
-
import click
|
|
9
|
-
import time
|
|
10
17
|
|
|
11
|
-
|
|
12
|
-
import plotly.graph_objects as go
|
|
13
|
-
from PIL import Image
|
|
14
|
-
import base64
|
|
15
|
-
from io import BytesIO
|
|
18
|
+
from rich.console import Console
|
|
16
19
|
from typing import Callable, Optional, Tuple, List, Union
|
|
17
20
|
|
|
18
21
|
|
|
19
22
|
console = Console()
|
|
20
23
|
desktop_path = os.path.expanduser("~/Desktop")
|
|
21
24
|
|
|
25
|
+
|
|
22
26
|
class PlotHelper:
|
|
23
27
|
def _verify_csv(self, csv_file):
|
|
24
28
|
"""Read a CSV and normalize column names (lowercase)."""
|
|
@@ -179,273 +183,752 @@ class PlotHelper:
|
|
|
179
183
|
console.log("Stopped live updates.")
|
|
180
184
|
else:
|
|
181
185
|
run_once()
|
|
186
|
+
|
|
182
187
|
@staticmethod
|
|
183
|
-
def
|
|
188
|
+
def get_img_grid_df(input_dir, log=False):
|
|
189
|
+
"""
|
|
190
|
+
Use images in input_dir to create a dataframe for plot_image_grid.
|
|
191
|
+
|
|
192
|
+
Directory structures supported:
|
|
193
|
+
|
|
194
|
+
A. Row/Col structure:
|
|
195
|
+
input_dir/
|
|
196
|
+
├── row0/
|
|
197
|
+
│ ├── col0/
|
|
198
|
+
│ │ ├── 0.png
|
|
199
|
+
│ │ ├── 1.png
|
|
200
|
+
│ └── col1/
|
|
201
|
+
│ ├── 0.png
|
|
202
|
+
│ ├── 1.png
|
|
203
|
+
├── row1/
|
|
204
|
+
│ ├── col0/
|
|
205
|
+
│ │ ├── 0.png
|
|
206
|
+
│ │ ├── 1.png
|
|
207
|
+
│ └── col1/
|
|
208
|
+
│ ├── 0.png
|
|
209
|
+
│ ├── 1.png
|
|
210
|
+
|
|
211
|
+
B. Row-only structure (no cols):
|
|
212
|
+
input_dir/
|
|
213
|
+
├── row0/
|
|
214
|
+
│ ├── 0.png
|
|
215
|
+
│ ├── 1.png
|
|
216
|
+
├── row1/
|
|
217
|
+
│ ├── 0.png
|
|
218
|
+
│ ├── 1.png
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
pd.DataFrame: DataFrame suitable for plot_image_grid.
|
|
222
|
+
Each cell contains a list of image paths.
|
|
184
223
|
"""
|
|
185
|
-
|
|
224
|
+
# --- Collect row dirs ---
|
|
225
|
+
rows = sorted([r for r in fs.list_dirs(input_dir) if r.startswith("row")])
|
|
226
|
+
if not rows:
|
|
227
|
+
raise ValueError(f"No 'row*' directories found in {input_dir}")
|
|
228
|
+
|
|
229
|
+
first_row_path = os.path.join(input_dir, rows[0])
|
|
230
|
+
subdirs = fs.list_dirs(first_row_path)
|
|
231
|
+
|
|
232
|
+
if subdirs: # --- Case A: row/col structure ---
|
|
233
|
+
cols_ref = sorted(subdirs)
|
|
234
|
+
|
|
235
|
+
# Ensure column consistency
|
|
236
|
+
meta_dict = {row: sorted(fs.list_dirs(os.path.join(input_dir, row))) for row in rows}
|
|
237
|
+
for row, cols in meta_dict.items():
|
|
238
|
+
if cols != cols_ref:
|
|
239
|
+
raise ValueError(f"Row {row} has mismatched columns: {cols} vs {cols_ref}")
|
|
240
|
+
|
|
241
|
+
# Collect image paths
|
|
242
|
+
meta_with_paths = {
|
|
243
|
+
row: {
|
|
244
|
+
col: fs.filter_files_by_extension(os.path.join(input_dir, row, col), ["png", "jpg", "jpeg"])
|
|
245
|
+
for col in cols_ref
|
|
246
|
+
}
|
|
247
|
+
for row in rows
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
# Validate equal number of images per (row, col)
|
|
251
|
+
n_imgs = len(meta_with_paths[rows[0]][cols_ref[0]])
|
|
252
|
+
for row, cols in meta_with_paths.items():
|
|
253
|
+
for col, paths in cols.items():
|
|
254
|
+
if len(paths) != n_imgs:
|
|
255
|
+
raise ValueError(
|
|
256
|
+
f"Inconsistent file counts in {row}/{col}: {len(paths)} vs expected {n_imgs}"
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
# Flatten long format
|
|
260
|
+
data = {"row": [row for row in rows for _ in range(n_imgs)]}
|
|
261
|
+
for col in cols_ref:
|
|
262
|
+
data[col] = [meta_with_paths[row][col][i] for row in rows for i in range(n_imgs)]
|
|
263
|
+
|
|
264
|
+
else: # --- Case B: row-only structure ---
|
|
265
|
+
meta_with_paths = {
|
|
266
|
+
row: fs.filter_files_by_extension(os.path.join(input_dir, row), ["png", "jpg", "jpeg"])
|
|
267
|
+
for row in rows
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
# Validate equal number of images per row
|
|
271
|
+
n_imgs = len(next(iter(meta_with_paths.values())))
|
|
272
|
+
for row, paths in meta_with_paths.items():
|
|
273
|
+
if len(paths) != n_imgs:
|
|
274
|
+
raise ValueError(f"Inconsistent file counts in {row}: {len(paths)} vs expected {n_imgs}")
|
|
275
|
+
|
|
276
|
+
# Flatten long format (images indexed as img0,img1,...)
|
|
277
|
+
data = {"row": rows}
|
|
278
|
+
for i in range(n_imgs):
|
|
279
|
+
data[f"img{i}"] = [meta_with_paths[row][i] for row in rows]
|
|
280
|
+
|
|
281
|
+
# --- Convert to wide "multi-list" format ---
|
|
282
|
+
df = pd.DataFrame(data)
|
|
283
|
+
row_col = df.columns[0] # first col = row labels
|
|
284
|
+
col_cols = df.columns[1:] # the rest = groupable cols
|
|
285
|
+
|
|
286
|
+
df = (
|
|
287
|
+
df.melt(id_vars=[row_col], var_name="col", value_name="path")
|
|
288
|
+
.groupby([row_col, "col"])["path"]
|
|
289
|
+
.apply(list)
|
|
290
|
+
.unstack("col")
|
|
291
|
+
.reset_index()
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
if log:
|
|
295
|
+
csvfile.fn_display_df(df)
|
|
186
296
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
297
|
+
return df
|
|
298
|
+
|
|
299
|
+
@staticmethod
|
|
300
|
+
def _parse_cell_to_list(cell) -> List[str]:
|
|
301
|
+
"""Parse a DataFrame cell that may already be a list, a Python-list string, JSON list string,
|
|
302
|
+
or a single path. Returns list[str]."""
|
|
303
|
+
if cell is None:
|
|
304
|
+
return []
|
|
305
|
+
# pandas NA
|
|
306
|
+
try:
|
|
307
|
+
if pd.isna(cell):
|
|
308
|
+
return []
|
|
309
|
+
except Exception:
|
|
310
|
+
pass
|
|
311
|
+
|
|
312
|
+
if isinstance(cell, list):
|
|
313
|
+
return [str(x) for x in cell]
|
|
314
|
+
|
|
315
|
+
if isinstance(cell, (tuple, set)):
|
|
316
|
+
return [str(x) for x in cell]
|
|
317
|
+
|
|
318
|
+
if isinstance(cell, str):
|
|
319
|
+
s = cell.strip()
|
|
320
|
+
if not s:
|
|
321
|
+
return []
|
|
322
|
+
|
|
323
|
+
# Try Python literal (e.g. "['a','b']")
|
|
324
|
+
try:
|
|
325
|
+
val = ast.literal_eval(s)
|
|
326
|
+
if isinstance(val, (list, tuple)):
|
|
327
|
+
return [str(x) for x in val]
|
|
328
|
+
if isinstance(val, str):
|
|
329
|
+
return [val]
|
|
330
|
+
except Exception:
|
|
331
|
+
pass
|
|
332
|
+
|
|
333
|
+
# Try JSON
|
|
334
|
+
try:
|
|
335
|
+
val = json.loads(s)
|
|
336
|
+
if isinstance(val, list):
|
|
337
|
+
return [str(x) for x in val]
|
|
338
|
+
if isinstance(val, str):
|
|
339
|
+
return [val]
|
|
340
|
+
except Exception:
|
|
341
|
+
pass
|
|
342
|
+
|
|
343
|
+
# Fallback: split on common separators
|
|
344
|
+
for sep in [";;", ";", "|", ", "]:
|
|
345
|
+
if sep in s:
|
|
346
|
+
parts = [p.strip() for p in s.split(sep) if p.strip()]
|
|
347
|
+
if parts:
|
|
348
|
+
return parts
|
|
349
|
+
|
|
350
|
+
# Single path string
|
|
351
|
+
return [s]
|
|
352
|
+
|
|
353
|
+
# anything else -> coerce to string
|
|
354
|
+
return [str(cell)]
|
|
355
|
+
|
|
356
|
+
@staticmethod
|
|
357
|
+
def plot_image_grid(
|
|
358
|
+
indir_or_csvf_or_df: Union[str, pd.DataFrame],
|
|
359
|
+
img_width: int = 300,
|
|
360
|
+
img_height: int = 300,
|
|
361
|
+
img_stack_direction: str = "horizontal", # "horizontal" or "vertical"
|
|
362
|
+
img_stack_padding_px: int = 5,
|
|
363
|
+
img_scale_mode: str = "fit", # "fit" or "fill"
|
|
364
|
+
format_row_label_func: Optional[Callable[[str], str]] = None,
|
|
365
|
+
format_col_label_func: Optional[Callable[[str], str]] = None,
|
|
366
|
+
title: str = "",
|
|
367
|
+
tickfont=dict(size=16, family="Arial", color="black"), # <-- bigger labels
|
|
368
|
+
fig_margin: dict = dict(l=50, r=50, t=50, b=50),
|
|
369
|
+
outline_color: str = "",
|
|
370
|
+
outline_size: int = 1,
|
|
371
|
+
cell_margin_px: int = 10, # spacing between cells
|
|
372
|
+
row_line_size: int = 0, # if >0, draw horizontal dotted lines
|
|
373
|
+
col_line_size: int = 0 # if >0, draw vertical dotted lines
|
|
374
|
+
):
|
|
191
375
|
"""
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
376
|
+
Plot a grid of images using Plotly.
|
|
377
|
+
|
|
378
|
+
- Accepts DataFrame where each cell is either:
|
|
379
|
+
* a Python list object,
|
|
380
|
+
* a string representation of a Python list (e.g. "['a','b']"),
|
|
381
|
+
* a JSON list string, or
|
|
382
|
+
* a single path string.
|
|
383
|
+
- For each cell, stack the images into a single composite that exactly fits
|
|
384
|
+
(img_width, img_height) is the target size for each individual image in the stack.
|
|
385
|
+
The final cell size will depend on the number of images and stacking direction.
|
|
386
|
+
"""
|
|
387
|
+
|
|
388
|
+
def process_image_for_slot(path: str, target_size: Tuple[int, int], scale_mode: str, outline: str, outline_size: int) -> Image.Image:
|
|
389
|
+
try:
|
|
390
|
+
img = Image.open(path).convert("RGB")
|
|
391
|
+
except Exception:
|
|
392
|
+
return Image.new("RGB", target_size, (255, 255, 255))
|
|
393
|
+
|
|
394
|
+
if scale_mode == "fit":
|
|
395
|
+
img_ratio = img.width / img.height
|
|
396
|
+
target_ratio = target_size[0] / target_size[1]
|
|
397
|
+
|
|
398
|
+
if img_ratio > target_ratio:
|
|
399
|
+
new_height = target_size[1]
|
|
400
|
+
new_width = max(1, int(new_height * img_ratio))
|
|
401
|
+
else:
|
|
402
|
+
new_width = target_size[0]
|
|
403
|
+
new_height = max(1, int(new_width / img_ratio))
|
|
404
|
+
|
|
405
|
+
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
406
|
+
left = (new_width - target_size[0]) // 2
|
|
407
|
+
top = (new_height - target_size[1]) // 2
|
|
408
|
+
right = left + target_size[0]
|
|
409
|
+
bottom = top + target_size[1]
|
|
410
|
+
|
|
411
|
+
if len(outline) == 7 and outline.startswith("#"):
|
|
412
|
+
border_px = outline_size
|
|
413
|
+
bordered = Image.new("RGB", (target_size[0] + 2*border_px, target_size[1] + 2*border_px), outline)
|
|
414
|
+
bordered.paste(img.crop((left, top, right, bottom)), (border_px, border_px))
|
|
415
|
+
return bordered
|
|
416
|
+
return img.crop((left, top, right, bottom))
|
|
417
|
+
|
|
418
|
+
elif scale_mode == "fill":
|
|
419
|
+
if len(outline) == 7 and outline.startswith("#"):
|
|
420
|
+
border_px = outline_size
|
|
421
|
+
bordered = Image.new("RGB", (target_size[0] + 2*border_px, target_size[1] + 2*border_px), outline)
|
|
422
|
+
img = img.resize(target_size, Image.Resampling.LANCZOS)
|
|
423
|
+
bordered.paste(img, (border_px, border_px))
|
|
424
|
+
return bordered
|
|
425
|
+
return img.resize(target_size, Image.Resampling.LANCZOS)
|
|
426
|
+
else:
|
|
427
|
+
raise ValueError("img_scale_mode must be 'fit' or 'fill'.")
|
|
428
|
+
|
|
429
|
+
def stack_images_base64(image_paths: List[str], direction: str, single_img_size: Tuple[int,int], outline: str, outline_size: int, padding: int) -> Tuple[str, Tuple[int,int]]:
|
|
430
|
+
image_paths = [p for p in image_paths if p is not None and str(p).strip() != ""]
|
|
431
|
+
n = len(image_paths)
|
|
432
|
+
if n == 0:
|
|
433
|
+
blank = Image.new("RGB", single_img_size, (255,255,255))
|
|
434
|
+
buf = BytesIO()
|
|
435
|
+
blank.save(buf, format="PNG")
|
|
436
|
+
return "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode(), single_img_size
|
|
437
|
+
|
|
438
|
+
processed = [process_image_for_slot(p, single_img_size, img_scale_mode, outline, outline_size) for p in image_paths]
|
|
439
|
+
pad_total = padding * (n-1)
|
|
440
|
+
|
|
441
|
+
if direction == "horizontal":
|
|
442
|
+
total_w = sum(im.width for im in processed) + pad_total
|
|
443
|
+
total_h = max(im.height for im in processed)
|
|
444
|
+
stacked = Image.new("RGB", (total_w, total_h), (255,255,255))
|
|
445
|
+
x = 0
|
|
446
|
+
for im in processed:
|
|
447
|
+
stacked.paste(im, (x,0))
|
|
448
|
+
x += im.width + padding
|
|
449
|
+
elif direction == "vertical":
|
|
450
|
+
total_w = max(im.width for im in processed)
|
|
451
|
+
total_h = sum(im.height for im in processed) + pad_total
|
|
452
|
+
stacked = Image.new("RGB", (total_w, total_h), (255,255,255))
|
|
453
|
+
y = 0
|
|
454
|
+
for im in processed:
|
|
455
|
+
stacked.paste(im, (0,y))
|
|
456
|
+
y += im.height + padding
|
|
457
|
+
else:
|
|
458
|
+
raise ValueError("img_stack_direction must be 'horizontal' or 'vertical'.")
|
|
459
|
+
|
|
460
|
+
buf = BytesIO()
|
|
461
|
+
stacked.save(buf, format="PNG")
|
|
462
|
+
encoded = base64.b64encode(buf.getvalue()).decode()
|
|
463
|
+
return f"data:image/png;base64,{encoded}", (total_w, total_h)
|
|
464
|
+
|
|
465
|
+
# --- Load DataFrame ---
|
|
466
|
+
if isinstance(indir_or_csvf_or_df, str):
|
|
467
|
+
fname, ext = os.path.splitext(indir_or_csvf_or_df)
|
|
468
|
+
if ext.lower() == ".csv":
|
|
469
|
+
df = pd.read_csv(indir_or_csvf_or_df)
|
|
470
|
+
elif os.path.isdir(indir_or_csvf_or_df):
|
|
471
|
+
df = PlotHelper.img_grid_indir_1(indir_or_csvf_or_df, log=False)
|
|
472
|
+
else:
|
|
473
|
+
raise ValueError("Input string must be a valid CSV file or directory path")
|
|
474
|
+
elif isinstance(indir_or_csvf_or_df, pd.DataFrame):
|
|
475
|
+
df = indir_or_csvf_or_df.copy()
|
|
476
|
+
else:
|
|
477
|
+
raise ValueError("Input must be CSV file path, DataFrame, or directory path")
|
|
478
|
+
|
|
479
|
+
rows = df.iloc[:,0].astype(str).tolist()
|
|
480
|
+
columns = list(df.columns[1:])
|
|
481
|
+
n_rows, n_cols = len(rows), len(columns)
|
|
482
|
+
|
|
208
483
|
fig = go.Figure()
|
|
484
|
+
col_widths = [0]*n_cols
|
|
485
|
+
row_heights = [0]*n_rows
|
|
209
486
|
|
|
210
|
-
|
|
211
|
-
|
|
487
|
+
cell_imgs = [[None]*n_cols for _ in range(n_rows)]
|
|
488
|
+
for i in range(n_rows):
|
|
489
|
+
for j, col_label in enumerate(columns):
|
|
490
|
+
raw_cell = df.iloc[i, j+1]
|
|
491
|
+
image_paths = PlotHelper._parse_cell_to_list(raw_cell)
|
|
492
|
+
image_paths = [str(p).strip() for p in image_paths if str(p).strip() != ""]
|
|
493
|
+
|
|
494
|
+
img_src, (cell_w_actual, cell_h_actual) = stack_images_base64(
|
|
495
|
+
image_paths, img_stack_direction, (img_width, img_height),
|
|
496
|
+
outline=outline_color, outline_size=outline_size,
|
|
497
|
+
padding=img_stack_padding_px
|
|
498
|
+
)
|
|
212
499
|
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
500
|
+
col_widths[j] = max(col_widths[j], cell_w_actual)
|
|
501
|
+
row_heights[i] = max(row_heights[i], cell_h_actual)
|
|
502
|
+
cell_imgs[i][j] = img_src
|
|
503
|
+
|
|
504
|
+
# Compute x/y positions including cell_margin
|
|
505
|
+
x_positions = []
|
|
506
|
+
cum_w = 0
|
|
507
|
+
for w in col_widths:
|
|
508
|
+
x_positions.append(cum_w)
|
|
509
|
+
cum_w += w + cell_margin_px
|
|
510
|
+
|
|
511
|
+
y_positions = []
|
|
512
|
+
cum_h = 0
|
|
513
|
+
for h in row_heights:
|
|
514
|
+
y_positions.append(-cum_h)
|
|
515
|
+
cum_h += h + cell_margin_px
|
|
516
|
+
|
|
517
|
+
# Add images to figure
|
|
518
|
+
for i in range(n_rows):
|
|
519
|
+
for j in range(n_cols):
|
|
218
520
|
fig.add_layout_image(
|
|
219
521
|
dict(
|
|
220
|
-
source=
|
|
221
|
-
x=j,
|
|
222
|
-
y
|
|
522
|
+
source=cell_imgs[i][j],
|
|
523
|
+
x=x_positions[j],
|
|
524
|
+
y=y_positions[i],
|
|
223
525
|
xref="x",
|
|
224
526
|
yref="y",
|
|
225
|
-
sizex=
|
|
226
|
-
sizey=
|
|
527
|
+
sizex=col_widths[j],
|
|
528
|
+
sizey=row_heights[i],
|
|
227
529
|
xanchor="left",
|
|
228
530
|
yanchor="top",
|
|
229
|
-
layer="above"
|
|
531
|
+
layer="above",
|
|
230
532
|
)
|
|
231
533
|
)
|
|
534
|
+
# ! Optional grid lines
|
|
535
|
+
# Add horizontal grid lines if row_line_size > 0
|
|
536
|
+
if row_line_size > 0:
|
|
537
|
+
for i in range(1, n_rows):
|
|
538
|
+
# Place line in the middle of the gap between rows
|
|
539
|
+
y = (
|
|
540
|
+
y_positions[i - 1] - row_heights[i - 1] - y_positions[i]
|
|
541
|
+
) / 2 + y_positions[i]
|
|
542
|
+
fig.add_shape(
|
|
543
|
+
type="line",
|
|
544
|
+
x0=-cell_margin_px,
|
|
545
|
+
x1=cum_w - cell_margin_px,
|
|
546
|
+
y0=y,
|
|
547
|
+
y1=y,
|
|
548
|
+
line=dict(width=row_line_size, color="black", dash="dot"),
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
# Add vertical grid lines if col_line_size > 0
|
|
552
|
+
if col_line_size > 0:
|
|
553
|
+
for j in range(1, n_cols):
|
|
554
|
+
x = x_positions[j] - cell_margin_px / 2
|
|
555
|
+
fig.add_shape(
|
|
556
|
+
type="line",
|
|
557
|
+
x0=x,
|
|
558
|
+
x1=x,
|
|
559
|
+
y0=cell_margin_px,
|
|
560
|
+
y1=-cum_h + cell_margin_px,
|
|
561
|
+
line=dict(width=col_line_size, color="black", dash="dot"),
|
|
562
|
+
)
|
|
563
|
+
# Axis labels
|
|
564
|
+
col_labels = [format_col_label_func(c) if format_col_label_func else c for c in columns]
|
|
565
|
+
row_labels = [format_row_label_func(r) if format_row_label_func else r for r in rows]
|
|
232
566
|
|
|
233
|
-
# Set axes for grid layout
|
|
234
567
|
fig.update_xaxes(
|
|
235
|
-
tickvals=
|
|
236
|
-
ticktext=
|
|
237
|
-
range=[-
|
|
568
|
+
tickvals=[x_positions[j] + col_widths[j]/2 for j in range(n_cols)],
|
|
569
|
+
ticktext=col_labels,
|
|
570
|
+
range=[-cell_margin_px, cum_w - cell_margin_px],
|
|
238
571
|
showgrid=False,
|
|
239
|
-
zeroline=False
|
|
572
|
+
zeroline=False,
|
|
573
|
+
tickfont=tickfont # <-- apply bigger font here
|
|
240
574
|
)
|
|
241
575
|
fig.update_yaxes(
|
|
242
|
-
tickvals=[-i for i in range(n_rows)],
|
|
243
|
-
ticktext=
|
|
244
|
-
range=[-
|
|
576
|
+
tickvals=[y_positions[i] - row_heights[i]/2 for i in range(n_rows)],
|
|
577
|
+
ticktext=row_labels,
|
|
578
|
+
range=[-cum_h + cell_margin_px, cell_margin_px],
|
|
245
579
|
showgrid=False,
|
|
246
|
-
zeroline=False
|
|
580
|
+
zeroline=False,
|
|
581
|
+
tickfont=tickfont # <-- apply bigger font here
|
|
247
582
|
)
|
|
248
583
|
|
|
249
584
|
fig.update_layout(
|
|
250
|
-
width=
|
|
251
|
-
height=
|
|
252
|
-
|
|
585
|
+
width=cum_w + 100,
|
|
586
|
+
height=cum_h + 100,
|
|
587
|
+
title=title,
|
|
588
|
+
title_x=0.5,
|
|
589
|
+
margin=fig_margin,
|
|
253
590
|
)
|
|
254
591
|
|
|
255
592
|
fig.show()
|
|
256
593
|
|
|
257
594
|
@staticmethod
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
row_path = os.path.join(input_dir, row)
|
|
266
|
-
cols = sorted(fs.list_dirs(row_path))
|
|
267
|
-
if cols_of_row is None:
|
|
268
|
-
cols_of_row = cols
|
|
269
|
-
else:
|
|
270
|
-
if cols_of_row != cols:
|
|
271
|
-
raise ValueError(
|
|
272
|
-
f"Row {row} has different columns than previous rows: {cols_of_row} vs {cols}"
|
|
273
|
-
)
|
|
274
|
-
meta_dict[row] = cols
|
|
275
|
-
|
|
276
|
-
meta_dict_with_paths = {}
|
|
277
|
-
for row, cols in meta_dict.items():
|
|
278
|
-
meta_dict_with_paths[row] = {
|
|
279
|
-
col: fs.filter_files_by_extension(
|
|
280
|
-
os.path.join(input_dir, row, col), ["png", "jpg", "jpeg"]
|
|
281
|
-
)
|
|
282
|
-
for col in cols
|
|
283
|
-
}
|
|
284
|
-
first_row = list(meta_dict_with_paths.keys())[0]
|
|
285
|
-
first_col = list(meta_dict_with_paths[first_row].keys())[0]
|
|
286
|
-
len_first_col = len(meta_dict_with_paths[first_row][first_col])
|
|
287
|
-
for row, cols in meta_dict_with_paths.items():
|
|
288
|
-
for col, paths in cols.items():
|
|
289
|
-
if len(paths) != len_first_col:
|
|
290
|
-
raise ValueError(
|
|
291
|
-
f"Row {row}, Column {col} has different number of files: {len(paths)} vs {len_first_col}"
|
|
292
|
-
)
|
|
293
|
-
cols = sorted(meta_dict_with_paths[first_row].keys())
|
|
294
|
-
rows_set = sorted(meta_dict_with_paths.keys())
|
|
295
|
-
row_per_col = len(meta_dict_with_paths[first_row][first_col])
|
|
296
|
-
rows = [item for item in rows_set for _ in range(row_per_col)]
|
|
297
|
-
data_dict = {}
|
|
298
|
-
data_dict["row"] = rows
|
|
299
|
-
col_data = {col: [] for col in cols}
|
|
300
|
-
for row_base in rows_set:
|
|
301
|
-
for col in cols:
|
|
302
|
-
for i in range(row_per_col):
|
|
303
|
-
col_data[col].append(meta_dict_with_paths[row_base][col][i])
|
|
304
|
-
data_dict.update(col_data)
|
|
305
|
-
df = pd.DataFrame(data_dict)
|
|
306
|
-
if log:
|
|
307
|
-
csvfile.fn_display_df(df)
|
|
308
|
-
return df
|
|
309
|
-
|
|
310
|
-
@staticmethod
|
|
311
|
-
def plot_image_grid(
|
|
312
|
-
csv_file_or_df: Union[str, pd.DataFrame],
|
|
313
|
-
max_width: int = 300,
|
|
314
|
-
max_height: int = 300,
|
|
315
|
-
img_stack_direction: str = "horizontal",
|
|
316
|
-
img_stack_padding_px: int = 10,
|
|
595
|
+
def plot_image_grid1(
|
|
596
|
+
indir_or_csvf_or_df: Union[str, pd.DataFrame],
|
|
597
|
+
img_width: int = 300,
|
|
598
|
+
img_height: int = 300,
|
|
599
|
+
img_stack_direction: str = "horizontal", # "horizontal" or "vertical"
|
|
600
|
+
img_stack_padding_px: int = 5,
|
|
601
|
+
img_scale_mode: str = "fit", # "fit" or "fill"
|
|
317
602
|
format_row_label_func: Optional[Callable[[str], str]] = None,
|
|
318
|
-
format_col_label_func: Optional[Callable[[str
|
|
603
|
+
format_col_label_func: Optional[Callable[[str], str]] = None,
|
|
319
604
|
title: str = "",
|
|
605
|
+
tickfont=dict(size=16, family="Arial", color="black"), # <-- bigger labels
|
|
606
|
+
fig_margin: dict = dict(l=50, r=50, t=50, b=50),
|
|
607
|
+
outline_color: str = "",
|
|
608
|
+
outline_size: int = 1,
|
|
609
|
+
cell_margin_px: int = 10, # padding (top, left, right, bottom) inside each cell
|
|
610
|
+
row_line_size: int = 0, # if >0, draw horizontal dotted lines
|
|
611
|
+
col_line_size: int = 0, # if >0, draw vertical dotted lines
|
|
320
612
|
):
|
|
321
613
|
"""
|
|
322
|
-
Plot a grid of images using Plotly
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
title (str): Figure title.
|
|
614
|
+
Plot a grid of images using Plotly.
|
|
615
|
+
|
|
616
|
+
- Accepts DataFrame where each cell is either:
|
|
617
|
+
* a Python list object,
|
|
618
|
+
* a string representation of a Python list (e.g. "['a','b']"),
|
|
619
|
+
* a JSON list string, or
|
|
620
|
+
* a single path string.
|
|
621
|
+
- For each cell, stack the images into a single composite that exactly fits
|
|
622
|
+
(img_width, img_height) is the target size for each individual image in the stack.
|
|
623
|
+
The final cell size will depend on the number of images and stacking direction.
|
|
333
624
|
"""
|
|
334
625
|
|
|
626
|
+
def process_image_for_slot(
|
|
627
|
+
path: str,
|
|
628
|
+
target_size: Tuple[int, int],
|
|
629
|
+
scale_mode: str,
|
|
630
|
+
outline: str,
|
|
631
|
+
outline_size: int,
|
|
632
|
+
) -> Image.Image:
|
|
633
|
+
try:
|
|
634
|
+
img = Image.open(path).convert("RGB")
|
|
635
|
+
except Exception:
|
|
636
|
+
return Image.new("RGB", target_size, (255, 255, 255))
|
|
637
|
+
|
|
638
|
+
if scale_mode == "fit":
|
|
639
|
+
img_ratio = img.width / img.height
|
|
640
|
+
target_ratio = target_size[0] / target_size[1]
|
|
641
|
+
|
|
642
|
+
if img_ratio > target_ratio:
|
|
643
|
+
new_height = target_size[1]
|
|
644
|
+
new_width = max(1, int(new_height * img_ratio))
|
|
645
|
+
else:
|
|
646
|
+
new_width = target_size[0]
|
|
647
|
+
new_height = max(1, int(new_width / img_ratio))
|
|
648
|
+
|
|
649
|
+
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
650
|
+
left = (new_width - target_size[0]) // 2
|
|
651
|
+
top = (new_height - target_size[1]) // 2
|
|
652
|
+
right = left + target_size[0]
|
|
653
|
+
bottom = top + target_size[1]
|
|
654
|
+
|
|
655
|
+
if len(outline) == 7 and outline.startswith("#"):
|
|
656
|
+
border_px = outline_size
|
|
657
|
+
bordered = Image.new(
|
|
658
|
+
"RGB",
|
|
659
|
+
(target_size[0] + 2 * border_px, target_size[1] + 2 * border_px),
|
|
660
|
+
outline,
|
|
661
|
+
)
|
|
662
|
+
bordered.paste(
|
|
663
|
+
img.crop((left, top, right, bottom)), (border_px, border_px)
|
|
664
|
+
)
|
|
665
|
+
return bordered
|
|
666
|
+
return img.crop((left, top, right, bottom))
|
|
667
|
+
|
|
668
|
+
elif scale_mode == "fill":
|
|
669
|
+
if len(outline) == 7 and outline.startswith("#"):
|
|
670
|
+
border_px = outline_size
|
|
671
|
+
bordered = Image.new(
|
|
672
|
+
"RGB",
|
|
673
|
+
(target_size[0] + 2 * border_px, target_size[1] + 2 * border_px),
|
|
674
|
+
outline,
|
|
675
|
+
)
|
|
676
|
+
img = img.resize(target_size, Image.Resampling.LANCZOS)
|
|
677
|
+
bordered.paste(img, (border_px, border_px))
|
|
678
|
+
return bordered
|
|
679
|
+
return img.resize(target_size, Image.Resampling.LANCZOS)
|
|
680
|
+
else:
|
|
681
|
+
raise ValueError("img_scale_mode must be 'fit' or 'fill'.")
|
|
682
|
+
|
|
335
683
|
def stack_images_base64(
|
|
336
|
-
image_paths: List[str],
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
for
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
# Stack
|
|
353
|
-
widths, heights = zip(*(img.size for img in processed_images))
|
|
354
|
-
if direction == "horizontal":
|
|
355
|
-
total_width = sum(widths) + img_stack_padding_px * (
|
|
356
|
-
len(processed_images) - 1
|
|
357
|
-
)
|
|
358
|
-
total_height = max(heights)
|
|
359
|
-
stacked = Image.new("RGB", (total_width, total_height), (255, 255, 255))
|
|
360
|
-
x_offset = 0
|
|
361
|
-
for im in processed_images:
|
|
362
|
-
stacked.paste(im, (x_offset, 0))
|
|
363
|
-
x_offset += im.width + img_stack_padding_px
|
|
364
|
-
elif direction == "vertical":
|
|
365
|
-
total_width = max(widths)
|
|
366
|
-
total_height = sum(heights) + img_stack_padding_px * (
|
|
367
|
-
len(processed_images) - 1
|
|
684
|
+
image_paths: List[str],
|
|
685
|
+
direction: str,
|
|
686
|
+
single_img_size: Tuple[int, int],
|
|
687
|
+
outline: str,
|
|
688
|
+
outline_size: int,
|
|
689
|
+
padding: int,
|
|
690
|
+
) -> Tuple[str, Tuple[int, int]]:
|
|
691
|
+
image_paths = [p for p in image_paths if p is not None and str(p).strip() != ""]
|
|
692
|
+
n = len(image_paths)
|
|
693
|
+
if n == 0:
|
|
694
|
+
blank = Image.new("RGB", single_img_size, (255, 255, 255))
|
|
695
|
+
buf = BytesIO()
|
|
696
|
+
blank.save(buf, format="PNG")
|
|
697
|
+
return (
|
|
698
|
+
"data:image/png;base64," + base64.b64encode(buf.getvalue()).decode(),
|
|
699
|
+
single_img_size,
|
|
368
700
|
)
|
|
369
|
-
stacked = Image.new("RGB", (total_width, total_height), (255, 255, 255))
|
|
370
|
-
y_offset = 0
|
|
371
|
-
for im in processed_images:
|
|
372
|
-
stacked.paste(im, (0, y_offset))
|
|
373
|
-
y_offset += im.height + img_stack_padding_px
|
|
374
|
-
else:
|
|
375
|
-
raise ValueError("img_stack_direction must be 'horizontal' or 'vertical'")
|
|
376
701
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
702
|
+
processed = [
|
|
703
|
+
process_image_for_slot(
|
|
704
|
+
p, single_img_size, img_scale_mode, outline, outline_size
|
|
705
|
+
)
|
|
706
|
+
for p in image_paths
|
|
707
|
+
]
|
|
708
|
+
pad_total = padding * (n - 1)
|
|
382
709
|
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
710
|
+
if direction == "horizontal":
|
|
711
|
+
total_w = sum(im.width for im in processed) + pad_total
|
|
712
|
+
total_h = max(im.height for im in processed)
|
|
713
|
+
stacked = Image.new("RGB", (total_w, total_h), (255, 255, 255))
|
|
714
|
+
x = 0
|
|
715
|
+
for im in processed:
|
|
716
|
+
stacked.paste(im, (x, 0))
|
|
717
|
+
x += im.width + padding
|
|
718
|
+
elif direction == "vertical":
|
|
719
|
+
total_w = max(im.width for im in processed)
|
|
720
|
+
total_h = sum(im.height for im in processed) + pad_total
|
|
721
|
+
stacked = Image.new("RGB", (total_w, total_h), (255, 255, 255))
|
|
722
|
+
y = 0
|
|
723
|
+
for im in processed:
|
|
724
|
+
stacked.paste(im, (0, y))
|
|
725
|
+
y += im.height + padding
|
|
726
|
+
else:
|
|
727
|
+
raise ValueError("img_stack_direction must be 'horizontal' or 'vertical'.")
|
|
728
|
+
|
|
729
|
+
buf = BytesIO()
|
|
730
|
+
stacked.save(buf, format="PNG")
|
|
731
|
+
encoded = base64.b64encode(buf.getvalue()).decode()
|
|
732
|
+
return f"data:image/png;base64,{encoded}", (total_w, total_h)
|
|
733
|
+
|
|
734
|
+
def compute_stacked_size(
|
|
735
|
+
image_paths: List[str],
|
|
736
|
+
direction: str,
|
|
737
|
+
single_w: int,
|
|
738
|
+
single_h: int,
|
|
739
|
+
padding: int,
|
|
740
|
+
outline: str,
|
|
741
|
+
outline_size: int,
|
|
742
|
+
) -> Tuple[int, int]:
|
|
743
|
+
image_paths = [p for p in image_paths if p is not None and str(p).strip() != ""]
|
|
744
|
+
n = len(image_paths)
|
|
745
|
+
if n == 0:
|
|
746
|
+
return single_w, single_h
|
|
747
|
+
has_outline = len(outline) == 7 and outline.startswith("#")
|
|
748
|
+
border = 2 * outline_size if has_outline else 0
|
|
749
|
+
unit_w = single_w + border
|
|
750
|
+
unit_h = single_h + border
|
|
751
|
+
if direction == "horizontal":
|
|
752
|
+
total_w = n * unit_w + (n - 1) * padding
|
|
753
|
+
total_h = unit_h
|
|
754
|
+
elif direction == "vertical":
|
|
755
|
+
total_w = unit_w
|
|
756
|
+
total_h = n * unit_h + (n - 1) * padding
|
|
757
|
+
else:
|
|
758
|
+
raise ValueError("img_stack_direction must be 'horizontal' or 'vertical'.")
|
|
759
|
+
return total_w, total_h
|
|
760
|
+
|
|
761
|
+
# --- Load DataFrame ---
|
|
762
|
+
if isinstance(indir_or_csvf_or_df, str):
|
|
763
|
+
fname, ext = os.path.splitext(indir_or_csvf_or_df)
|
|
764
|
+
if ext.lower() == ".csv":
|
|
765
|
+
df = pd.read_csv(indir_or_csvf_or_df)
|
|
766
|
+
elif os.path.isdir(indir_or_csvf_or_df):
|
|
767
|
+
df = PlotHelper.img_grid_indir_1(indir_or_csvf_or_df, log=False)
|
|
768
|
+
else:
|
|
769
|
+
raise ValueError("Input string must be a valid CSV file or directory path")
|
|
770
|
+
elif isinstance(indir_or_csvf_or_df, pd.DataFrame):
|
|
771
|
+
df = indir_or_csvf_or_df.copy()
|
|
386
772
|
else:
|
|
387
|
-
|
|
388
|
-
assert isinstance(df, pd.DataFrame), "Input must be a DataFrame or valid CSV file path"
|
|
773
|
+
raise ValueError("Input must be CSV file path, DataFrame, or directory path")
|
|
389
774
|
|
|
390
|
-
rows = df
|
|
391
|
-
columns = df.columns[1:]
|
|
775
|
+
rows = df.iloc[:, 0].astype(str).tolist()
|
|
776
|
+
columns = list(df.columns[1:])
|
|
392
777
|
n_rows, n_cols = len(rows), len(columns)
|
|
393
778
|
|
|
394
779
|
fig = go.Figure()
|
|
395
780
|
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
781
|
+
# First pass: compute content sizes
|
|
782
|
+
content_col_max = [0] * n_cols
|
|
783
|
+
content_row_max = [0] * n_rows
|
|
784
|
+
cell_paths = [[None] * n_cols for _ in range(n_rows)]
|
|
785
|
+
for i in range(n_rows):
|
|
786
|
+
for j in range(n_cols):
|
|
787
|
+
raw_cell = df.iloc[i, j + 1]
|
|
788
|
+
paths = PlotHelper._parse_cell_to_list(raw_cell)
|
|
789
|
+
image_paths = [str(p).strip() for p in paths if str(p).strip() != ""]
|
|
790
|
+
cell_paths[i][j] = image_paths
|
|
791
|
+
cw, ch = compute_stacked_size(
|
|
792
|
+
image_paths,
|
|
793
|
+
img_stack_direction,
|
|
794
|
+
img_width,
|
|
795
|
+
img_height,
|
|
796
|
+
img_stack_padding_px,
|
|
797
|
+
outline_color,
|
|
798
|
+
outline_size,
|
|
403
799
|
)
|
|
404
|
-
|
|
800
|
+
content_col_max[j] = max(content_col_max[j], cw)
|
|
801
|
+
content_row_max[i] = max(content_row_max[i], ch)
|
|
802
|
+
|
|
803
|
+
# Compute display sizes (content max + padding)
|
|
804
|
+
display_col_w = [content_col_max[j] + 2 * cell_margin_px for j in range(n_cols)]
|
|
805
|
+
display_row_h = [content_row_max[i] + 2 * cell_margin_px for i in range(n_rows)]
|
|
806
|
+
|
|
807
|
+
# Compute positions (cells adjacent)
|
|
808
|
+
x_positions = []
|
|
809
|
+
cum_w = 0
|
|
810
|
+
for dw in display_col_w:
|
|
811
|
+
x_positions.append(cum_w)
|
|
812
|
+
cum_w += dw
|
|
813
|
+
|
|
814
|
+
y_positions = []
|
|
815
|
+
cum_h = 0
|
|
816
|
+
for dh in display_row_h:
|
|
817
|
+
y_positions.append(-cum_h)
|
|
818
|
+
cum_h += dh
|
|
819
|
+
|
|
820
|
+
# Second pass: create padded images (centered content)
|
|
821
|
+
cell_imgs = [[None] * n_cols for _ in range(n_rows)]
|
|
822
|
+
p = cell_margin_px
|
|
823
|
+
for i in range(n_rows):
|
|
824
|
+
for j in range(n_cols):
|
|
825
|
+
image_paths = cell_paths[i][j]
|
|
826
|
+
content_src, (cw, ch) = stack_images_base64(
|
|
827
|
+
image_paths,
|
|
828
|
+
img_stack_direction,
|
|
829
|
+
(img_width, img_height),
|
|
830
|
+
outline_color,
|
|
831
|
+
outline_size,
|
|
832
|
+
img_stack_padding_px,
|
|
833
|
+
)
|
|
834
|
+
if cw == 0 or ch == 0:
|
|
835
|
+
# Skip empty, but create white padded
|
|
836
|
+
pad_w = display_col_w[j]
|
|
837
|
+
pad_h = display_row_h[i]
|
|
838
|
+
padded = Image.new("RGB", (pad_w, pad_h), (255, 255, 255))
|
|
839
|
+
else:
|
|
840
|
+
content_img = Image.open(
|
|
841
|
+
BytesIO(base64.b64decode(content_src.split(",")[1]))
|
|
842
|
+
)
|
|
843
|
+
ca_w = content_col_max[j]
|
|
844
|
+
ca_h = content_row_max[i]
|
|
845
|
+
left_offset = (ca_w - cw) // 2
|
|
846
|
+
top_offset = (ca_h - ch) // 2
|
|
847
|
+
pad_w = display_col_w[j]
|
|
848
|
+
pad_h = display_row_h[i]
|
|
849
|
+
padded = Image.new("RGB", (pad_w, pad_h), (255, 255, 255))
|
|
850
|
+
paste_x = p + left_offset
|
|
851
|
+
paste_y = p + top_offset
|
|
852
|
+
padded.paste(content_img, (paste_x, paste_y))
|
|
853
|
+
buf = BytesIO()
|
|
854
|
+
padded.save(buf, format="PNG")
|
|
855
|
+
encoded = base64.b64encode(buf.getvalue()).decode()
|
|
856
|
+
cell_imgs[i][j] = f"data:image/png;base64,{encoded}"
|
|
857
|
+
|
|
858
|
+
# Add images to figure
|
|
859
|
+
for i in range(n_rows):
|
|
860
|
+
for j in range(n_cols):
|
|
405
861
|
fig.add_layout_image(
|
|
406
862
|
dict(
|
|
407
|
-
source=
|
|
408
|
-
x=j,
|
|
409
|
-
y
|
|
863
|
+
source=cell_imgs[i][j],
|
|
864
|
+
x=x_positions[j],
|
|
865
|
+
y=y_positions[i],
|
|
410
866
|
xref="x",
|
|
411
867
|
yref="y",
|
|
412
|
-
sizex=
|
|
413
|
-
sizey=
|
|
868
|
+
sizex=display_col_w[j],
|
|
869
|
+
sizey=display_row_h[i],
|
|
414
870
|
xanchor="left",
|
|
415
871
|
yanchor="top",
|
|
416
872
|
layer="above",
|
|
417
873
|
)
|
|
418
874
|
)
|
|
419
875
|
|
|
420
|
-
#
|
|
876
|
+
# Optional grid lines (at cell boundaries, adjusted for inter-content spaces)
|
|
877
|
+
if row_line_size > 0:
|
|
878
|
+
for i in range(1, n_rows):
|
|
879
|
+
y = (y_positions[i - 1] - display_row_h[i - 1] + y_positions[i]) / 2
|
|
880
|
+
fig.add_shape(
|
|
881
|
+
type="line",
|
|
882
|
+
x0=-p,
|
|
883
|
+
x1=cum_w,
|
|
884
|
+
y0=y,
|
|
885
|
+
y1=y,
|
|
886
|
+
line=dict(width=row_line_size, color="black", dash="dot"),
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
if col_line_size > 0:
|
|
890
|
+
for j in range(1, n_cols):
|
|
891
|
+
x = x_positions[j]
|
|
892
|
+
fig.add_shape(
|
|
893
|
+
type="line",
|
|
894
|
+
x0=x,
|
|
895
|
+
x1=x,
|
|
896
|
+
y0=p,
|
|
897
|
+
y1=-cum_h,
|
|
898
|
+
line=dict(width=col_line_size, color="black", dash="dot"),
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
# Axis labels
|
|
421
902
|
col_labels = [
|
|
422
|
-
format_col_label_func(c
|
|
423
|
-
for c in columns
|
|
903
|
+
format_col_label_func(c) if format_col_label_func else c for c in columns
|
|
424
904
|
]
|
|
425
905
|
row_labels = [
|
|
426
906
|
format_row_label_func(r) if format_row_label_func else r for r in rows
|
|
427
907
|
]
|
|
428
908
|
|
|
429
909
|
fig.update_xaxes(
|
|
430
|
-
tickvals=
|
|
910
|
+
tickvals=[x_positions[j] + display_col_w[j] / 2 for j in range(n_cols)],
|
|
431
911
|
ticktext=col_labels,
|
|
432
|
-
range=[-
|
|
912
|
+
range=[-p, cum_w],
|
|
433
913
|
showgrid=False,
|
|
434
914
|
zeroline=False,
|
|
915
|
+
tickfont=tickfont, # <-- apply bigger font here
|
|
435
916
|
)
|
|
436
917
|
fig.update_yaxes(
|
|
437
|
-
tickvals=[-i for i in range(n_rows)],
|
|
918
|
+
tickvals=[y_positions[i] - display_row_h[i] / 2 for i in range(n_rows)],
|
|
438
919
|
ticktext=row_labels,
|
|
439
|
-
range=[-
|
|
920
|
+
range=[-cum_h, p],
|
|
440
921
|
showgrid=False,
|
|
441
922
|
zeroline=False,
|
|
923
|
+
tickfont=tickfont, # <-- apply bigger font here
|
|
442
924
|
)
|
|
443
925
|
|
|
444
926
|
fig.update_layout(
|
|
445
|
-
width=
|
|
446
|
-
height=
|
|
927
|
+
width=cum_w + 100,
|
|
928
|
+
height=cum_h + 100,
|
|
447
929
|
title=title,
|
|
448
|
-
|
|
930
|
+
title_x=0.5,
|
|
931
|
+
margin=fig_margin,
|
|
449
932
|
)
|
|
450
933
|
|
|
451
934
|
fig.show()
|
halib/utils/video.py
CHANGED
|
@@ -23,11 +23,17 @@ class VideoUtils:
|
|
|
23
23
|
# Get the FPS
|
|
24
24
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
25
25
|
|
|
26
|
+
# get frame size
|
|
27
|
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
28
|
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
29
|
+
|
|
26
30
|
# Release the video capture object
|
|
27
31
|
cap.release()
|
|
28
32
|
|
|
29
33
|
meta_dict = {
|
|
30
34
|
"video_path": video_path,
|
|
35
|
+
"width": width,
|
|
36
|
+
"height": height,
|
|
31
37
|
"frame_count": frame_count,
|
|
32
38
|
"fps": fps
|
|
33
39
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: halib
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.93
|
|
4
4
|
Summary: Small library for common tasks
|
|
5
5
|
Author: Hoang Van Ha
|
|
6
6
|
Author-email: hoangvanhauit@gmail.com
|
|
@@ -50,7 +50,11 @@ Dynamic: requires-dist
|
|
|
50
50
|
Dynamic: requires-python
|
|
51
51
|
Dynamic: summary
|
|
52
52
|
|
|
53
|
-
Helper package for coding and automation
|
|
53
|
+
# Helper package for coding and automation
|
|
54
|
+
|
|
55
|
+
**Version 0.1.93**
|
|
56
|
+
+ `research/plot': add `PlotHelper` class to plot train history + plot grid of images (e.g., image samples from dataset or model outputs)
|
|
57
|
+
|
|
54
58
|
|
|
55
59
|
**Version 0.1.91**
|
|
56
60
|
+ `research/param_gen`: add `ParamGen` class to generate parameter list from yaml file for hyperparameter search (grid search, random search, etc.)
|
|
@@ -37,7 +37,7 @@ halib/research/mics.py,sha256=uX17AGrBGER-OFMqUULE_A9YPPbn1RpQ4o5-omrmqZ8,377
|
|
|
37
37
|
halib/research/params_gen.py,sha256=GcTMlniL0iE3HalJY-gVRiYa8Qy8u6nX4LkKZeMkct8,4262
|
|
38
38
|
halib/research/perfcalc.py,sha256=qDa0sqfpWrwGZVJtjuUVFK7JX6j8xyXP9OnnfYmdamg,15898
|
|
39
39
|
halib/research/perftb.py,sha256=FWg0b8wSgy4UwuvHSXwEqvTq1Rhi-z-HtAKuQg1lWc4,30989
|
|
40
|
-
halib/research/plot.py,sha256=
|
|
40
|
+
halib/research/plot.py,sha256=4xMGJuP1lGN1wF27XFM5eMFb73Gu9qB582VZhTdcCSA,38418
|
|
41
41
|
halib/research/profiler.py,sha256=GRAewTo0jGkOputjmRwtYVfJYBze_ivsOnrW9exWkPQ,11772
|
|
42
42
|
halib/research/torchloader.py,sha256=yqUjcSiME6H5W210363HyRUrOi3ISpUFAFkTr1w4DCw,6503
|
|
43
43
|
halib/research/wandb_op.py,sha256=YzLEqME5kIRxi3VvjFkW83wnFrsn92oYeqYuNwtYRkY,4188
|
|
@@ -53,9 +53,9 @@ halib/utils/dict_op.py,sha256=wYE6Iw-_CnCWdMg9tpJ2Y2-e2ESkW9FxmdBkZkbUh80,299
|
|
|
53
53
|
halib/utils/gpu_mon.py,sha256=vD41_ZnmPLKguuq9X44SB_vwd9JrblO4BDzHLXZhhFY,2233
|
|
54
54
|
halib/utils/listop.py,sha256=Vpa8_2fI0wySpB2-8sfTBkyi_A4FhoFVVvFiuvW8N64,339
|
|
55
55
|
halib/utils/tele_noti.py,sha256=-4WXZelCA4W9BroapkRyIdUu9cUVrcJJhegnMs_WpGU,5928
|
|
56
|
-
halib/utils/video.py,sha256=
|
|
57
|
-
halib-0.1.
|
|
58
|
-
halib-0.1.
|
|
59
|
-
halib-0.1.
|
|
60
|
-
halib-0.1.
|
|
61
|
-
halib-0.1.
|
|
56
|
+
halib/utils/video.py,sha256=zLoj5EHk4SmP9OnoHjO8mLbzPdtq6gQPzTQisOEDdO8,3261
|
|
57
|
+
halib-0.1.93.dist-info/licenses/LICENSE.txt,sha256=qZssdna4aETiR8znYsShUjidu-U4jUT9Q-EWNlZ9yBQ,1100
|
|
58
|
+
halib-0.1.93.dist-info/METADATA,sha256=F3yohPt7k7wer8ur5j0yIi51-oZd81PKdg0ixUmNLFo,6200
|
|
59
|
+
halib-0.1.93.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
60
|
+
halib-0.1.93.dist-info/top_level.txt,sha256=7AD6PLaQTreE0Fn44mdZsoHBe_Zdd7GUmjsWPyQ7I-k,6
|
|
61
|
+
halib-0.1.93.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|