scitex 2.16.1__py3-none-any.whl → 2.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scitex/_mcp_resources/_cheatsheet.py +1 -1
- scitex/_mcp_resources/_modules.py +1 -1
- scitex/_mcp_tools/__init__.py +2 -0
- scitex/_mcp_tools/verify.py +256 -0
- scitex/cli/main.py +2 -0
- scitex/cli/verify.py +476 -0
- scitex/dev/plt/__init__.py +1 -1
- scitex/dev/plt/mpl/get_dir_ax.py +1 -1
- scitex/dev/plt/mpl/get_signatures.py +1 -1
- scitex/dev/plt/mpl/get_signatures_details.py +1 -1
- scitex/io/_load.py +8 -1
- scitex/io/_save.py +12 -0
- scitex/session/README.md +2 -2
- scitex/session/__init__.py +1 -0
- scitex/session/_decorator.py +57 -33
- scitex/session/_lifecycle/__init__.py +23 -0
- scitex/session/_lifecycle/_close.py +225 -0
- scitex/session/_lifecycle/_config.py +112 -0
- scitex/session/_lifecycle/_matplotlib.py +83 -0
- scitex/session/_lifecycle/_start.py +246 -0
- scitex/session/_lifecycle/_utils.py +186 -0
- scitex/session/_manager.py +40 -3
- scitex/session/template.py +1 -1
- scitex/template/_templates/plt.py +1 -1
- scitex/template/_templates/session.py +1 -1
- scitex/verify/README.md +312 -0
- scitex/verify/__init__.py +212 -0
- scitex/verify/_chain.py +369 -0
- scitex/verify/_db.py +600 -0
- scitex/verify/_hash.py +187 -0
- scitex/verify/_integration.py +127 -0
- scitex/verify/_rerun.py +253 -0
- scitex/verify/_tracker.py +330 -0
- scitex/verify/_visualize.py +48 -0
- scitex/verify/_viz/__init__.py +56 -0
- scitex/verify/_viz/_colors.py +84 -0
- scitex/verify/_viz/_format.py +302 -0
- scitex/verify/_viz/_json.py +192 -0
- scitex/verify/_viz/_mermaid.py +440 -0
- scitex/verify/_viz/_plotly.py +193 -0
- scitex/verify/_viz/_templates.py +246 -0
- scitex/verify/_viz/_utils.py +56 -0
- {scitex-2.16.1.dist-info → scitex-2.17.0.dist-info}/METADATA +1 -1
- {scitex-2.16.1.dist-info → scitex-2.17.0.dist-info}/RECORD +47 -23
- scitex/session/_lifecycle.py +0 -827
- {scitex-2.16.1.dist-info → scitex-2.17.0.dist-info}/WHEEL +0 -0
- {scitex-2.16.1.dist-info → scitex-2.17.0.dist-info}/entry_points.txt +0 -0
- {scitex-2.16.1.dist-info → scitex-2.17.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: "2026-02-01 (ywatanabe)"
|
|
3
|
+
# File: /home/ywatanabe/proj/scitex-python/src/scitex/verify/_viz/_mermaid.py
|
|
4
|
+
"""Mermaid diagram generation for verification DAG."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Literal, Optional, Union
|
|
11
|
+
|
|
12
|
+
from .._chain import VerificationLevel, verify_chain, verify_run
|
|
13
|
+
from .._db import get_db
|
|
14
|
+
from ._json import file_to_node_id, format_path, generate_dag_json, verify_file_hash
|
|
15
|
+
from ._templates import get_html_template
|
|
16
|
+
|
|
17
|
+
PathMode = Literal["name", "relative", "absolute"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def generate_mermaid_dag(
|
|
21
|
+
session_id: Optional[str] = None,
|
|
22
|
+
target_file: Optional[str] = None,
|
|
23
|
+
max_depth: int = 10,
|
|
24
|
+
show_files: bool = True,
|
|
25
|
+
show_hashes: bool = False,
|
|
26
|
+
path_mode: PathMode = "name",
|
|
27
|
+
) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Generate Mermaid diagram for verification DAG.
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
session_id : str, optional
|
|
34
|
+
Start from this session
|
|
35
|
+
target_file : str, optional
|
|
36
|
+
Start from session that produced this file
|
|
37
|
+
max_depth : int, optional
|
|
38
|
+
Maximum chain depth
|
|
39
|
+
show_files : bool, optional
|
|
40
|
+
Whether to show input/output files as nodes (default: True)
|
|
41
|
+
show_hashes : bool, optional
|
|
42
|
+
Whether to show truncated file hashes (default: False)
|
|
43
|
+
path_mode : str, optional
|
|
44
|
+
How to display file paths: "name", "relative", or "absolute"
|
|
45
|
+
|
|
46
|
+
Returns
|
|
47
|
+
-------
|
|
48
|
+
str
|
|
49
|
+
Mermaid diagram code
|
|
50
|
+
"""
|
|
51
|
+
db = get_db()
|
|
52
|
+
lines = ["graph TD"]
|
|
53
|
+
|
|
54
|
+
if target_file:
|
|
55
|
+
chain = verify_chain(target_file)
|
|
56
|
+
chain_ids = [run.session_id for run in chain.runs]
|
|
57
|
+
elif session_id:
|
|
58
|
+
chain_ids = db.get_chain(session_id)
|
|
59
|
+
else:
|
|
60
|
+
chain_ids = []
|
|
61
|
+
|
|
62
|
+
if not chain_ids:
|
|
63
|
+
lines.append(' empty["No runs found"]')
|
|
64
|
+
return "\n".join(lines)
|
|
65
|
+
|
|
66
|
+
runs_data = _collect_runs_data(chain_ids, db)
|
|
67
|
+
|
|
68
|
+
if show_files:
|
|
69
|
+
_generate_detailed_dag(lines, runs_data, show_hashes, path_mode)
|
|
70
|
+
else:
|
|
71
|
+
_generate_simple_dag(lines, runs_data, chain_ids, path_mode)
|
|
72
|
+
|
|
73
|
+
_append_class_definitions(lines)
|
|
74
|
+
return "\n".join(lines)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _collect_runs_data(chain_ids: list, db) -> list:
|
|
78
|
+
"""Collect run data for all sessions in chain."""
|
|
79
|
+
runs_data = []
|
|
80
|
+
for sid in chain_ids:
|
|
81
|
+
run = db.get_run(sid)
|
|
82
|
+
verification = verify_run(sid)
|
|
83
|
+
|
|
84
|
+
# Check if there's a stored from-scratch verification result
|
|
85
|
+
latest_verification = db.get_latest_verification(sid)
|
|
86
|
+
if (
|
|
87
|
+
latest_verification
|
|
88
|
+
and latest_verification.get("level") == "rerun"
|
|
89
|
+
and latest_verification.get("status") == "verified"
|
|
90
|
+
):
|
|
91
|
+
# Apply from-scratch level to the verification
|
|
92
|
+
verification.level = VerificationLevel.RERUN
|
|
93
|
+
|
|
94
|
+
inputs = db.get_file_hashes(sid, role="input")
|
|
95
|
+
outputs = db.get_file_hashes(sid, role="output")
|
|
96
|
+
runs_data.append(
|
|
97
|
+
{
|
|
98
|
+
"session_id": sid,
|
|
99
|
+
"run": run,
|
|
100
|
+
"verification": verification,
|
|
101
|
+
"inputs": inputs,
|
|
102
|
+
"outputs": outputs,
|
|
103
|
+
}
|
|
104
|
+
)
|
|
105
|
+
return runs_data
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _append_class_definitions(lines: list) -> None:
|
|
109
|
+
"""Append Mermaid class definitions for styling."""
|
|
110
|
+
lines.append("")
|
|
111
|
+
lines.append(" classDef script fill:#87CEEB,stroke:#4169E1,stroke-width:2px")
|
|
112
|
+
lines.append(" classDef verified fill:#90EE90,stroke:#228B22")
|
|
113
|
+
lines.append(
|
|
114
|
+
" classDef verified_scratch fill:#90EE90,stroke:#228B22,stroke-width:4px"
|
|
115
|
+
)
|
|
116
|
+
lines.append(" classDef failed fill:#FFB6C1,stroke:#DC143C")
|
|
117
|
+
lines.append(" classDef file fill:#FFF8DC,stroke:#DAA520")
|
|
118
|
+
lines.append(" classDef file_ok fill:#90EE90,stroke:#228B22")
|
|
119
|
+
lines.append(" classDef file_rerun fill:#90EE90,stroke:#228B22,stroke-width:4px")
|
|
120
|
+
lines.append(" classDef file_bad fill:#FFB6C1,stroke:#DC143C")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _generate_simple_dag(
|
|
124
|
+
lines: list, runs_data: list, chain_ids: list, path_mode: PathMode = "name"
|
|
125
|
+
) -> None:
|
|
126
|
+
"""Generate simple script-only DAG."""
|
|
127
|
+
for data in runs_data:
|
|
128
|
+
sid = data["session_id"]
|
|
129
|
+
run = data["run"]
|
|
130
|
+
verification = data["verification"]
|
|
131
|
+
node_id = sid.replace("-", "_").replace(".", "_")
|
|
132
|
+
status_class = "verified" if verification.is_verified else "failed"
|
|
133
|
+
script_name = format_path(
|
|
134
|
+
run.get("script_path", "unknown") if run else "unknown", path_mode
|
|
135
|
+
)
|
|
136
|
+
lines.append(f' {node_id}["{script_name}"]:::{status_class}')
|
|
137
|
+
|
|
138
|
+
for i in range(len(chain_ids) - 1):
|
|
139
|
+
curr = chain_ids[i].replace("-", "_").replace(".", "_")
|
|
140
|
+
parent = chain_ids[i + 1].replace("-", "_").replace(".", "_")
|
|
141
|
+
lines.append(f" {parent} --> {curr}")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _generate_detailed_dag(
|
|
145
|
+
lines: list,
|
|
146
|
+
runs_data: list,
|
|
147
|
+
show_hashes: bool = False,
|
|
148
|
+
path_mode: PathMode = "name",
|
|
149
|
+
) -> None:
|
|
150
|
+
"""Generate detailed DAG with input/output files and verification status."""
|
|
151
|
+
file_nodes = {}
|
|
152
|
+
failed_files = set() # Track failed files for propagation
|
|
153
|
+
runs_data = list(reversed(runs_data))
|
|
154
|
+
|
|
155
|
+
# First pass: identify all failed files
|
|
156
|
+
for data in runs_data:
|
|
157
|
+
inputs = data["inputs"]
|
|
158
|
+
outputs = data["outputs"]
|
|
159
|
+
for fpath, stored_hash in {**inputs, **outputs}.items():
|
|
160
|
+
if not verify_file_hash(fpath, stored_hash):
|
|
161
|
+
failed_files.add(fpath)
|
|
162
|
+
|
|
163
|
+
# Second pass: propagate failures through chain
|
|
164
|
+
for data in runs_data:
|
|
165
|
+
inputs = data["inputs"]
|
|
166
|
+
outputs = data["outputs"]
|
|
167
|
+
# If any input is failed, all outputs are also failed
|
|
168
|
+
has_failed_input = any(fpath in failed_files for fpath in inputs.keys())
|
|
169
|
+
if has_failed_input:
|
|
170
|
+
for fpath in outputs.keys():
|
|
171
|
+
failed_files.add(fpath)
|
|
172
|
+
|
|
173
|
+
for i, data in enumerate(runs_data):
|
|
174
|
+
sid = data["session_id"]
|
|
175
|
+
run = data["run"]
|
|
176
|
+
verification = data["verification"]
|
|
177
|
+
inputs = data["inputs"]
|
|
178
|
+
outputs = data["outputs"]
|
|
179
|
+
|
|
180
|
+
# Check if this script has failed inputs (propagated failure)
|
|
181
|
+
has_failed_input = any(fpath in failed_files for fpath in inputs.keys())
|
|
182
|
+
|
|
183
|
+
_add_script_node(
|
|
184
|
+
lines, i, sid, run, verification, path_mode, show_hashes, has_failed_input
|
|
185
|
+
)
|
|
186
|
+
is_rerun = verification.is_verified_from_scratch
|
|
187
|
+
_add_file_nodes(
|
|
188
|
+
lines,
|
|
189
|
+
f"script_{i}",
|
|
190
|
+
inputs,
|
|
191
|
+
file_nodes,
|
|
192
|
+
show_hashes,
|
|
193
|
+
path_mode,
|
|
194
|
+
"input",
|
|
195
|
+
False,
|
|
196
|
+
failed_files,
|
|
197
|
+
)
|
|
198
|
+
_add_file_nodes(
|
|
199
|
+
lines,
|
|
200
|
+
f"script_{i}",
|
|
201
|
+
outputs,
|
|
202
|
+
file_nodes,
|
|
203
|
+
show_hashes,
|
|
204
|
+
path_mode,
|
|
205
|
+
"output",
|
|
206
|
+
is_rerun,
|
|
207
|
+
failed_files,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _get_file_icon(filename: str) -> str:
|
|
212
|
+
"""Get icon emoji for file type."""
|
|
213
|
+
ext = Path(filename).suffix.lower()
|
|
214
|
+
icons = {
|
|
215
|
+
".py": "🐍",
|
|
216
|
+
".csv": "📊",
|
|
217
|
+
".json": "📋",
|
|
218
|
+
".yaml": "⚙️",
|
|
219
|
+
".yml": "⚙️",
|
|
220
|
+
".png": "🖼️",
|
|
221
|
+
".jpg": "🖼️",
|
|
222
|
+
".jpeg": "🖼️",
|
|
223
|
+
".svg": "🖼️",
|
|
224
|
+
".pdf": "📄",
|
|
225
|
+
".html": "🌐",
|
|
226
|
+
".txt": "📝",
|
|
227
|
+
".md": "📝",
|
|
228
|
+
".npy": "🔢",
|
|
229
|
+
".npz": "🔢",
|
|
230
|
+
".pkl": "📦",
|
|
231
|
+
".pickle": "📦",
|
|
232
|
+
".h5": "💾",
|
|
233
|
+
".hdf5": "💾",
|
|
234
|
+
".mat": "🔬",
|
|
235
|
+
".sh": "🖥️",
|
|
236
|
+
}
|
|
237
|
+
return icons.get(ext, "📄")
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def _add_script_node(
|
|
241
|
+
lines: list,
|
|
242
|
+
idx: int,
|
|
243
|
+
sid: str,
|
|
244
|
+
run: dict,
|
|
245
|
+
verification,
|
|
246
|
+
path_mode: PathMode,
|
|
247
|
+
show_hashes: bool = False,
|
|
248
|
+
has_failed_input: bool = False,
|
|
249
|
+
) -> None:
|
|
250
|
+
"""Add a script node to the diagram."""
|
|
251
|
+
node_id = f"script_{idx}"
|
|
252
|
+
script_verified = verification.is_verified and not has_failed_input
|
|
253
|
+
is_from_scratch = verification.is_verified_from_scratch and not has_failed_input
|
|
254
|
+
|
|
255
|
+
# Determine status class with from-scratch distinction
|
|
256
|
+
if has_failed_input:
|
|
257
|
+
status_class = "failed"
|
|
258
|
+
elif is_from_scratch:
|
|
259
|
+
status_class = "verified_scratch"
|
|
260
|
+
elif script_verified:
|
|
261
|
+
status_class = "verified"
|
|
262
|
+
else:
|
|
263
|
+
status_class = "failed"
|
|
264
|
+
|
|
265
|
+
script_path = run.get("script_path", "unknown") if run else "unknown"
|
|
266
|
+
script_name = format_path(script_path, path_mode)
|
|
267
|
+
icon = _get_file_icon(script_path)
|
|
268
|
+
short_id = sid.split("_")[-1][:4] if "_" in sid else sid[:8]
|
|
269
|
+
badge = "✓✓" if is_from_scratch else ("✓" if script_verified else "✗")
|
|
270
|
+
# Show script hash if requested
|
|
271
|
+
script_hash = run.get("script_hash", "") if run else ""
|
|
272
|
+
hash_display = f"<br/>{script_hash[:8]}..." if show_hashes and script_hash else ""
|
|
273
|
+
lines.append(
|
|
274
|
+
f' {node_id}["{badge} {icon} {script_name}<br/>({short_id}){hash_display}"]:::{status_class}'
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _add_file_nodes(
|
|
279
|
+
lines: list,
|
|
280
|
+
script_id: str,
|
|
281
|
+
files: dict,
|
|
282
|
+
file_nodes: dict,
|
|
283
|
+
show_hashes: bool,
|
|
284
|
+
path_mode: PathMode,
|
|
285
|
+
role: str,
|
|
286
|
+
is_script_rerun_verified: bool = False,
|
|
287
|
+
failed_files: set = None,
|
|
288
|
+
) -> None:
|
|
289
|
+
"""Add file nodes and connections to the diagram."""
|
|
290
|
+
failed_files = failed_files or set()
|
|
291
|
+
|
|
292
|
+
for fpath, stored_hash in files.items():
|
|
293
|
+
display_name = format_path(fpath, path_mode)
|
|
294
|
+
file_id = file_to_node_id(Path(fpath).name)
|
|
295
|
+
icon = _get_file_icon(fpath)
|
|
296
|
+
|
|
297
|
+
if file_id not in file_nodes:
|
|
298
|
+
file_status = verify_file_hash(fpath, stored_hash)
|
|
299
|
+
is_failed = fpath in failed_files or not file_status
|
|
300
|
+
|
|
301
|
+
# Determine badge and class
|
|
302
|
+
if is_failed:
|
|
303
|
+
file_class = "file_bad"
|
|
304
|
+
badge = "✗"
|
|
305
|
+
elif role == "output" and is_script_rerun_verified:
|
|
306
|
+
file_class = "file_rerun"
|
|
307
|
+
badge = "✓✓"
|
|
308
|
+
else:
|
|
309
|
+
file_class = "file_ok"
|
|
310
|
+
badge = "✓"
|
|
311
|
+
|
|
312
|
+
hash_display = f"<br/>{stored_hash[:8]}..." if show_hashes else ""
|
|
313
|
+
lines.append(
|
|
314
|
+
f' {file_id}[("{badge} {icon} {display_name}{hash_display}")]:::{file_class}'
|
|
315
|
+
)
|
|
316
|
+
file_nodes[file_id] = (fpath, stored_hash)
|
|
317
|
+
|
|
318
|
+
if role == "input":
|
|
319
|
+
lines.append(f" {file_id} --> {script_id}")
|
|
320
|
+
else:
|
|
321
|
+
lines.append(f" {script_id} --> {file_id}")
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def generate_html_dag(
|
|
325
|
+
session_id: Optional[str] = None,
|
|
326
|
+
target_file: Optional[str] = None,
|
|
327
|
+
title: str = "Verification DAG",
|
|
328
|
+
show_hashes: bool = False,
|
|
329
|
+
path_mode: PathMode = "name",
|
|
330
|
+
) -> str:
|
|
331
|
+
"""Generate interactive HTML visualization for verification DAG."""
|
|
332
|
+
mermaid_code = generate_mermaid_dag(
|
|
333
|
+
session_id=session_id,
|
|
334
|
+
target_file=target_file,
|
|
335
|
+
show_hashes=show_hashes,
|
|
336
|
+
path_mode=path_mode,
|
|
337
|
+
)
|
|
338
|
+
return get_html_template(title, mermaid_code)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
def render_dag(
|
|
342
|
+
output_path: Union[str, Path],
|
|
343
|
+
session_id: Optional[str] = None,
|
|
344
|
+
target_file: Optional[str] = None,
|
|
345
|
+
title: str = "Verification DAG",
|
|
346
|
+
show_hashes: bool = False,
|
|
347
|
+
path_mode: PathMode = "name",
|
|
348
|
+
) -> Path:
|
|
349
|
+
"""
|
|
350
|
+
Render verification DAG to file (HTML, PNG, SVG, JSON, or MMD).
|
|
351
|
+
|
|
352
|
+
Parameters
|
|
353
|
+
----------
|
|
354
|
+
output_path : str or Path
|
|
355
|
+
Output file path. Extension determines format.
|
|
356
|
+
session_id : str, optional
|
|
357
|
+
Start from this session
|
|
358
|
+
target_file : str, optional
|
|
359
|
+
Start from session that produced this file
|
|
360
|
+
title : str, optional
|
|
361
|
+
Title for the visualization
|
|
362
|
+
show_hashes : bool, optional
|
|
363
|
+
Whether to show file hashes
|
|
364
|
+
path_mode : str, optional
|
|
365
|
+
Path display mode
|
|
366
|
+
|
|
367
|
+
Returns
|
|
368
|
+
-------
|
|
369
|
+
Path
|
|
370
|
+
Path to the generated file
|
|
371
|
+
"""
|
|
372
|
+
output_path = Path(output_path)
|
|
373
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
374
|
+
ext = output_path.suffix.lower()
|
|
375
|
+
|
|
376
|
+
if ext == ".html":
|
|
377
|
+
content = generate_html_dag(
|
|
378
|
+
session_id=session_id,
|
|
379
|
+
target_file=target_file,
|
|
380
|
+
title=title,
|
|
381
|
+
show_hashes=show_hashes,
|
|
382
|
+
path_mode=path_mode,
|
|
383
|
+
)
|
|
384
|
+
output_path.write_text(content)
|
|
385
|
+
|
|
386
|
+
elif ext == ".mmd":
|
|
387
|
+
content = generate_mermaid_dag(
|
|
388
|
+
session_id=session_id,
|
|
389
|
+
target_file=target_file,
|
|
390
|
+
show_hashes=show_hashes,
|
|
391
|
+
path_mode=path_mode,
|
|
392
|
+
)
|
|
393
|
+
output_path.write_text(content)
|
|
394
|
+
|
|
395
|
+
elif ext == ".json":
|
|
396
|
+
graph_json = generate_dag_json(
|
|
397
|
+
session_id=session_id,
|
|
398
|
+
target_file=target_file,
|
|
399
|
+
path_mode=path_mode,
|
|
400
|
+
)
|
|
401
|
+
output_path.write_text(json.dumps(graph_json, indent=2))
|
|
402
|
+
|
|
403
|
+
elif ext in [".png", ".svg"]:
|
|
404
|
+
mermaid = generate_mermaid_dag(
|
|
405
|
+
session_id=session_id,
|
|
406
|
+
target_file=target_file,
|
|
407
|
+
show_hashes=show_hashes,
|
|
408
|
+
path_mode=path_mode,
|
|
409
|
+
)
|
|
410
|
+
# Write mermaid to temp file and compile with mmdc
|
|
411
|
+
import subprocess
|
|
412
|
+
import tempfile
|
|
413
|
+
|
|
414
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".mmd", delete=False) as f:
|
|
415
|
+
f.write(mermaid)
|
|
416
|
+
mmd_path = f.name
|
|
417
|
+
|
|
418
|
+
try:
|
|
419
|
+
subprocess.run(
|
|
420
|
+
["mmdc", "-i", mmd_path, "-o", str(output_path)],
|
|
421
|
+
check=True,
|
|
422
|
+
capture_output=True,
|
|
423
|
+
)
|
|
424
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
425
|
+
# Fallback to mmd file if mmdc fails
|
|
426
|
+
fallback_path = output_path.with_suffix(".mmd")
|
|
427
|
+
fallback_path.write_text(mermaid)
|
|
428
|
+
return fallback_path
|
|
429
|
+
finally:
|
|
430
|
+
Path(mmd_path).unlink(missing_ok=True)
|
|
431
|
+
|
|
432
|
+
else:
|
|
433
|
+
raise ValueError(
|
|
434
|
+
f"Unsupported format: {ext}. Use .html, .png, .svg, .json, or .mmd"
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
return output_path
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
# EOF
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: "2026-02-01 (ywatanabe)"
|
|
3
|
+
# File: /home/ywatanabe/proj/scitex-python/src/scitex/verify/_viz/_plotly.py
|
|
4
|
+
"""Plotly-based interactive visualization for verification DAG."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Union
|
|
10
|
+
|
|
11
|
+
from .._chain import verify_chain, verify_run
|
|
12
|
+
from .._db import get_db
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def generate_plotly_dag(
|
|
16
|
+
session_id: Optional[str] = None,
|
|
17
|
+
target_file: Optional[str] = None,
|
|
18
|
+
title: str = "Verification DAG",
|
|
19
|
+
) -> go.Figure:
|
|
20
|
+
"""
|
|
21
|
+
Generate interactive Plotly figure for verification DAG.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
session_id : str, optional
|
|
26
|
+
Start from this session
|
|
27
|
+
target_file : str, optional
|
|
28
|
+
Start from session that produced this file
|
|
29
|
+
title : str, optional
|
|
30
|
+
Title for the figure
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
plotly.graph_objects.Figure
|
|
35
|
+
Interactive Plotly figure
|
|
36
|
+
"""
|
|
37
|
+
try:
|
|
38
|
+
import plotly.graph_objects as go
|
|
39
|
+
except ImportError:
|
|
40
|
+
raise ImportError("plotly required: pip install plotly")
|
|
41
|
+
|
|
42
|
+
db = get_db()
|
|
43
|
+
nodes = []
|
|
44
|
+
edges = []
|
|
45
|
+
node_colors = []
|
|
46
|
+
node_texts = []
|
|
47
|
+
|
|
48
|
+
if target_file:
|
|
49
|
+
chain = verify_chain(target_file)
|
|
50
|
+
for i, run in enumerate(chain.runs):
|
|
51
|
+
script_name = Path(run.script_path).name if run.script_path else "unknown"
|
|
52
|
+
nodes.append(run.session_id)
|
|
53
|
+
node_texts.append(f"{script_name}<br>{run.session_id[:20]}...")
|
|
54
|
+
node_colors.append("#90EE90" if run.is_verified else "#FFB6C1")
|
|
55
|
+
|
|
56
|
+
for i in range(len(chain.runs) - 1):
|
|
57
|
+
edges.append((i + 1, i)) # parent -> child
|
|
58
|
+
|
|
59
|
+
elif session_id:
|
|
60
|
+
chain_ids = db.get_chain(session_id)
|
|
61
|
+
for i, sid in enumerate(chain_ids):
|
|
62
|
+
run = db.get_run(sid)
|
|
63
|
+
verification = verify_run(sid)
|
|
64
|
+
script_name = (
|
|
65
|
+
Path(run["script_path"]).name
|
|
66
|
+
if run and run.get("script_path")
|
|
67
|
+
else "unknown"
|
|
68
|
+
)
|
|
69
|
+
nodes.append(sid)
|
|
70
|
+
node_texts.append(f"{script_name}<br>{sid[:20]}...")
|
|
71
|
+
node_colors.append("#90EE90" if verification.is_verified else "#FFB6C1")
|
|
72
|
+
|
|
73
|
+
for i in range(len(chain_ids) - 1):
|
|
74
|
+
edges.append((i + 1, i))
|
|
75
|
+
|
|
76
|
+
if not nodes:
|
|
77
|
+
nodes = ["No data"]
|
|
78
|
+
node_texts = ["No runs found"]
|
|
79
|
+
node_colors = ["#CCCCCC"]
|
|
80
|
+
|
|
81
|
+
# Create layout positions (vertical flow)
|
|
82
|
+
n = len(nodes)
|
|
83
|
+
x_pos = [0.5] * n
|
|
84
|
+
y_pos = [1 - i / max(n - 1, 1) for i in range(n)]
|
|
85
|
+
|
|
86
|
+
# Create edge traces
|
|
87
|
+
edge_x = []
|
|
88
|
+
edge_y = []
|
|
89
|
+
for src, dst in edges:
|
|
90
|
+
edge_x.extend([x_pos[src], x_pos[dst], None])
|
|
91
|
+
edge_y.extend([y_pos[src], y_pos[dst], None])
|
|
92
|
+
|
|
93
|
+
edge_trace = go.Scatter(
|
|
94
|
+
x=edge_x,
|
|
95
|
+
y=edge_y,
|
|
96
|
+
line=dict(width=2, color="#888"),
|
|
97
|
+
hoverinfo="none",
|
|
98
|
+
mode="lines",
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Create node trace
|
|
102
|
+
node_trace = go.Scatter(
|
|
103
|
+
x=x_pos,
|
|
104
|
+
y=y_pos,
|
|
105
|
+
mode="markers+text",
|
|
106
|
+
hoverinfo="text",
|
|
107
|
+
text=node_texts,
|
|
108
|
+
textposition="middle right",
|
|
109
|
+
marker=dict(
|
|
110
|
+
size=30,
|
|
111
|
+
color=node_colors,
|
|
112
|
+
line=dict(width=2, color="#333"),
|
|
113
|
+
),
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Create figure
|
|
117
|
+
fig = go.Figure(
|
|
118
|
+
data=[edge_trace, node_trace],
|
|
119
|
+
layout=go.Layout(
|
|
120
|
+
title=dict(text=title, font=dict(size=16)),
|
|
121
|
+
showlegend=False,
|
|
122
|
+
hovermode="closest",
|
|
123
|
+
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
|
124
|
+
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
|
|
125
|
+
margin=dict(l=40, r=40, t=60, b=40),
|
|
126
|
+
plot_bgcolor="white",
|
|
127
|
+
annotations=[
|
|
128
|
+
dict(
|
|
129
|
+
text="🟢 Verified | 🔴 Failed",
|
|
130
|
+
showarrow=False,
|
|
131
|
+
xref="paper",
|
|
132
|
+
yref="paper",
|
|
133
|
+
x=0,
|
|
134
|
+
y=-0.1,
|
|
135
|
+
font=dict(size=12),
|
|
136
|
+
)
|
|
137
|
+
],
|
|
138
|
+
),
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
return fig
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def render_plotly_dag(
|
|
145
|
+
output_path: Union[str, Path],
|
|
146
|
+
session_id: Optional[str] = None,
|
|
147
|
+
target_file: Optional[str] = None,
|
|
148
|
+
title: str = "Verification DAG",
|
|
149
|
+
) -> Path:
|
|
150
|
+
"""
|
|
151
|
+
Render verification DAG using Plotly.
|
|
152
|
+
|
|
153
|
+
Parameters
|
|
154
|
+
----------
|
|
155
|
+
output_path : str or Path
|
|
156
|
+
Output file path (.html or .png)
|
|
157
|
+
session_id : str, optional
|
|
158
|
+
Start from this session
|
|
159
|
+
target_file : str, optional
|
|
160
|
+
Start from session that produced this file
|
|
161
|
+
title : str, optional
|
|
162
|
+
Title for the visualization
|
|
163
|
+
|
|
164
|
+
Returns
|
|
165
|
+
-------
|
|
166
|
+
Path
|
|
167
|
+
Path to the generated file
|
|
168
|
+
"""
|
|
169
|
+
output_path = Path(output_path)
|
|
170
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
171
|
+
|
|
172
|
+
fig = generate_plotly_dag(
|
|
173
|
+
session_id=session_id,
|
|
174
|
+
target_file=target_file,
|
|
175
|
+
title=title,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
ext = output_path.suffix.lower()
|
|
179
|
+
|
|
180
|
+
if ext == ".html":
|
|
181
|
+
fig.write_html(str(output_path))
|
|
182
|
+
elif ext == ".png":
|
|
183
|
+
fig.write_image(str(output_path))
|
|
184
|
+
elif ext == ".svg":
|
|
185
|
+
fig.write_image(str(output_path))
|
|
186
|
+
else:
|
|
187
|
+
fig.write_html(str(output_path.with_suffix(".html")))
|
|
188
|
+
return output_path.with_suffix(".html")
|
|
189
|
+
|
|
190
|
+
return output_path
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# EOF
|