scitex 2.16.2__py3-none-any.whl → 2.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scitex/_mcp_resources/_cheatsheet.py +1 -1
- scitex/_mcp_resources/_modules.py +1 -1
- scitex/_mcp_tools/__init__.py +2 -0
- scitex/_mcp_tools/verify.py +256 -0
- scitex/cli/main.py +2 -0
- scitex/cli/verify.py +476 -0
- scitex/dev/plt/__init__.py +1 -1
- scitex/dev/plt/data/mpl/PLOTTING_FUNCTIONS.yaml +90 -0
- scitex/dev/plt/data/mpl/PLOTTING_SIGNATURES.yaml +1571 -0
- scitex/dev/plt/data/mpl/PLOTTING_SIGNATURES_DETAILED.yaml +6262 -0
- scitex/dev/plt/data/mpl/SIGNATURES_FLATTENED.yaml +1274 -0
- scitex/dev/plt/data/mpl/dir_ax.txt +459 -0
- scitex/dev/plt/mpl/get_dir_ax.py +1 -1
- scitex/dev/plt/mpl/get_signatures.py +1 -1
- scitex/dev/plt/mpl/get_signatures_details.py +1 -1
- scitex/io/_load.py +8 -1
- scitex/io/_save.py +12 -0
- scitex/scholar/data/.gitkeep +0 -0
- scitex/scholar/data/README.md +44 -0
- scitex/scholar/data/bib_files/bibliography.bib +1952 -0
- scitex/scholar/data/bib_files/neurovista.bib +277 -0
- scitex/scholar/data/bib_files/neurovista_enriched.bib +441 -0
- scitex/scholar/data/bib_files/neurovista_enriched_enriched.bib +441 -0
- scitex/scholar/data/bib_files/neurovista_processed.bib +338 -0
- scitex/scholar/data/bib_files/openaccess.bib +89 -0
- scitex/scholar/data/bib_files/pac-seizure_prediction_enriched.bib +2178 -0
- scitex/scholar/data/bib_files/pac.bib +698 -0
- scitex/scholar/data/bib_files/pac_enriched.bib +1061 -0
- scitex/scholar/data/bib_files/pac_processed.bib +0 -0
- scitex/scholar/data/bib_files/pac_titles.txt +75 -0
- scitex/scholar/data/bib_files/paywalled.bib +98 -0
- scitex/scholar/data/bib_files/related-papers-by-coauthors.bib +58 -0
- scitex/scholar/data/bib_files/related-papers-by-coauthors_enriched.bib +87 -0
- scitex/scholar/data/bib_files/seizure_prediction.bib +694 -0
- scitex/scholar/data/bib_files/seizure_prediction_processed.bib +0 -0
- scitex/scholar/data/bib_files/test_complete_enriched.bib +437 -0
- scitex/scholar/data/bib_files/test_final_enriched.bib +437 -0
- scitex/scholar/data/bib_files/test_seizure.bib +46 -0
- scitex/scholar/data/impact_factor/JCR_IF_2022.xlsx +0 -0
- scitex/scholar/data/impact_factor/JCR_IF_2024.db +0 -0
- scitex/scholar/data/impact_factor/JCR_IF_2024.xlsx +0 -0
- scitex/scholar/data/impact_factor/JCR_IF_2024_v01.db +0 -0
- scitex/scholar/data/impact_factor.db +0 -0
- scitex/session/README.md +2 -2
- scitex/session/__init__.py +1 -0
- scitex/session/_decorator.py +57 -33
- scitex/session/_lifecycle/__init__.py +23 -0
- scitex/session/_lifecycle/_close.py +225 -0
- scitex/session/_lifecycle/_config.py +112 -0
- scitex/session/_lifecycle/_matplotlib.py +83 -0
- scitex/session/_lifecycle/_start.py +246 -0
- scitex/session/_lifecycle/_utils.py +186 -0
- scitex/session/_manager.py +40 -3
- scitex/session/template.py +1 -1
- scitex/template/_templates/plt.py +1 -1
- scitex/template/_templates/session.py +1 -1
- scitex/verify/README.md +312 -0
- scitex/verify/__init__.py +212 -0
- scitex/verify/_chain.py +369 -0
- scitex/verify/_db.py +600 -0
- scitex/verify/_hash.py +187 -0
- scitex/verify/_integration.py +127 -0
- scitex/verify/_rerun.py +253 -0
- scitex/verify/_tracker.py +330 -0
- scitex/verify/_visualize.py +48 -0
- scitex/verify/_viz/__init__.py +56 -0
- scitex/verify/_viz/_colors.py +84 -0
- scitex/verify/_viz/_format.py +302 -0
- scitex/verify/_viz/_json.py +192 -0
- scitex/verify/_viz/_mermaid.py +440 -0
- scitex/verify/_viz/_plotly.py +193 -0
- scitex/verify/_viz/_templates.py +246 -0
- scitex/verify/_viz/_utils.py +56 -0
- {scitex-2.16.2.dist-info → scitex-2.17.0.dist-info}/METADATA +1 -1
- {scitex-2.16.2.dist-info → scitex-2.17.0.dist-info}/RECORD +78 -29
- scitex/scholar/url_finder/.tmp/open_url/KNOWN_RESOLVERS.py +0 -462
- scitex/scholar/url_finder/.tmp/open_url/README.md +0 -223
- scitex/scholar/url_finder/.tmp/open_url/_DOIToURLResolver.py +0 -694
- scitex/scholar/url_finder/.tmp/open_url/_OpenURLResolver.py +0 -1160
- scitex/scholar/url_finder/.tmp/open_url/_ResolverLinkFinder.py +0 -344
- scitex/scholar/url_finder/.tmp/open_url/__init__.py +0 -24
- scitex/session/_lifecycle.py +0 -827
- {scitex-2.16.2.dist-info → scitex-2.17.0.dist-info}/WHEEL +0 -0
- {scitex-2.16.2.dist-info → scitex-2.17.0.dist-info}/entry_points.txt +0 -0
- {scitex-2.16.2.dist-info → scitex-2.17.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: "2026-02-01 (ywatanabe)"
|
|
3
|
+
# File: /home/ywatanabe/proj/scitex-python/src/scitex/verify/_viz/_format.py
|
|
4
|
+
"""Formatting functions for verification output."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Dict, List
|
|
10
|
+
|
|
11
|
+
from .._chain import (
|
|
12
|
+
ChainVerification,
|
|
13
|
+
RunVerification,
|
|
14
|
+
VerificationStatus,
|
|
15
|
+
verify_run,
|
|
16
|
+
)
|
|
17
|
+
from ._colors import Colors, status_icon, status_text
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def format_run_verification(
|
|
21
|
+
verification: RunVerification,
|
|
22
|
+
verbose: bool = False,
|
|
23
|
+
) -> str:
|
|
24
|
+
"""
|
|
25
|
+
Format run verification result as a string.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
verification : RunVerification
|
|
30
|
+
Verification result
|
|
31
|
+
verbose : bool, optional
|
|
32
|
+
Show detailed file information
|
|
33
|
+
|
|
34
|
+
Returns
|
|
35
|
+
-------
|
|
36
|
+
str
|
|
37
|
+
Formatted string
|
|
38
|
+
"""
|
|
39
|
+
lines = []
|
|
40
|
+
icon = status_icon(verification.status)
|
|
41
|
+
stat_text = status_text(verification.status)
|
|
42
|
+
|
|
43
|
+
lines.append(f"{icon} {verification.session_id} [{stat_text}]")
|
|
44
|
+
|
|
45
|
+
if verification.script_path:
|
|
46
|
+
lines.append(f" Script: {verification.script_path}")
|
|
47
|
+
|
|
48
|
+
if verbose or not verification.is_verified:
|
|
49
|
+
if verification.inputs:
|
|
50
|
+
lines.append(" Inputs:")
|
|
51
|
+
for f in verification.inputs:
|
|
52
|
+
f_icon = status_icon(f.status)
|
|
53
|
+
lines.append(f" {f_icon} {f.path}")
|
|
54
|
+
|
|
55
|
+
if verification.outputs:
|
|
56
|
+
lines.append(" Outputs:")
|
|
57
|
+
for f in verification.outputs:
|
|
58
|
+
f_icon = status_icon(f.status)
|
|
59
|
+
lines.append(f" {f_icon} {f.path}")
|
|
60
|
+
|
|
61
|
+
if verification.mismatched_files:
|
|
62
|
+
lines.append(f" {Colors.RED}Mismatched:{Colors.RESET}")
|
|
63
|
+
for f in verification.mismatched_files:
|
|
64
|
+
lines.append(f" - {f.path}")
|
|
65
|
+
lines.append(f" Expected: {f.expected_hash[:16]}...")
|
|
66
|
+
if f.current_hash:
|
|
67
|
+
lines.append(f" Got: {f.current_hash[:16]}...")
|
|
68
|
+
|
|
69
|
+
if verification.missing_files:
|
|
70
|
+
lines.append(f" {Colors.YELLOW}Missing:{Colors.RESET}")
|
|
71
|
+
for f in verification.missing_files:
|
|
72
|
+
lines.append(f" - {f.path}")
|
|
73
|
+
|
|
74
|
+
return "\n".join(lines)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def format_run_detailed(verification: RunVerification) -> str:
|
|
78
|
+
"""
|
|
79
|
+
Format run verification with detailed breakdown.
|
|
80
|
+
|
|
81
|
+
Shows inputs/scripts/outputs with individual status icons.
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
verification : RunVerification
|
|
86
|
+
Verification result
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
str
|
|
91
|
+
Formatted string with tree structure
|
|
92
|
+
"""
|
|
93
|
+
lines = []
|
|
94
|
+
icon = status_icon(verification.status)
|
|
95
|
+
|
|
96
|
+
lines.append(f"{icon} {verification.session_id}")
|
|
97
|
+
|
|
98
|
+
if verification.script_path:
|
|
99
|
+
script_name = Path(verification.script_path).name
|
|
100
|
+
lines.append(f" Script: {script_name}")
|
|
101
|
+
|
|
102
|
+
failed_inputs = [
|
|
103
|
+
f for f in verification.inputs if f.status != VerificationStatus.VERIFIED
|
|
104
|
+
]
|
|
105
|
+
failed_outputs = [
|
|
106
|
+
f for f in verification.outputs if f.status != VerificationStatus.VERIFIED
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
if verification.inputs:
|
|
110
|
+
input_icons = "".join([status_icon(f.status) for f in verification.inputs])
|
|
111
|
+
lines.append(f" ├── inputs: {input_icons}")
|
|
112
|
+
for f in failed_inputs:
|
|
113
|
+
lines.append(f" │ └── {Colors.RED}{Path(f.path).name}{Colors.RESET}")
|
|
114
|
+
|
|
115
|
+
if verification.script_path:
|
|
116
|
+
script_status = VerificationStatus.VERIFIED
|
|
117
|
+
lines.append(f" ├── script: {status_icon(script_status)}")
|
|
118
|
+
|
|
119
|
+
if verification.outputs:
|
|
120
|
+
output_icons = "".join([status_icon(f.status) for f in verification.outputs])
|
|
121
|
+
lines.append(f" └── outputs: {output_icons}")
|
|
122
|
+
for f in failed_outputs:
|
|
123
|
+
lines.append(f" └── {Colors.RED}{Path(f.path).name}{Colors.RESET}")
|
|
124
|
+
|
|
125
|
+
return "\n".join(lines)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def format_chain_verification(
|
|
129
|
+
chain: ChainVerification,
|
|
130
|
+
verbose: bool = False,
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Format chain verification result as a tree.
|
|
134
|
+
|
|
135
|
+
Parameters
|
|
136
|
+
----------
|
|
137
|
+
chain : ChainVerification
|
|
138
|
+
Chain verification result
|
|
139
|
+
verbose : bool, optional
|
|
140
|
+
Show detailed information
|
|
141
|
+
|
|
142
|
+
Returns
|
|
143
|
+
-------
|
|
144
|
+
str
|
|
145
|
+
Formatted tree string
|
|
146
|
+
"""
|
|
147
|
+
lines = []
|
|
148
|
+
icon = status_icon(chain.status)
|
|
149
|
+
|
|
150
|
+
lines.append(
|
|
151
|
+
f"{Colors.BOLD}Chain verification for:{Colors.RESET} {chain.target_file}"
|
|
152
|
+
)
|
|
153
|
+
lines.append(f"Status: {icon} {status_text(chain.status)}")
|
|
154
|
+
lines.append("")
|
|
155
|
+
|
|
156
|
+
if not chain.runs:
|
|
157
|
+
lines.append(" (no runs found)")
|
|
158
|
+
return "\n".join(lines)
|
|
159
|
+
|
|
160
|
+
for i, run in enumerate(chain.runs):
|
|
161
|
+
is_last = i == len(chain.runs) - 1
|
|
162
|
+
prefix = "└── " if is_last else "├── "
|
|
163
|
+
continuation = " " if is_last else "│ "
|
|
164
|
+
|
|
165
|
+
run_icon = status_icon(run.status)
|
|
166
|
+
lines.append(f"{prefix}{run_icon} {run.session_id}")
|
|
167
|
+
|
|
168
|
+
if run.script_path:
|
|
169
|
+
script_name = Path(run.script_path).name
|
|
170
|
+
lines.append(f"{continuation}Script: {script_name}")
|
|
171
|
+
|
|
172
|
+
if not run.is_verified:
|
|
173
|
+
if run.mismatched_files:
|
|
174
|
+
lines.append(
|
|
175
|
+
f"{continuation}{Colors.RED}Mismatched files:{Colors.RESET}"
|
|
176
|
+
)
|
|
177
|
+
for f in run.mismatched_files:
|
|
178
|
+
lines.append(f"{continuation} - {Path(f.path).name}")
|
|
179
|
+
|
|
180
|
+
if run.missing_files:
|
|
181
|
+
lines.append(
|
|
182
|
+
f"{continuation}{Colors.YELLOW}Missing files:{Colors.RESET}"
|
|
183
|
+
)
|
|
184
|
+
for f in run.missing_files:
|
|
185
|
+
lines.append(f"{continuation} - {Path(f.path).name}")
|
|
186
|
+
|
|
187
|
+
return "\n".join(lines)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def format_status(status: Dict[str, Any]) -> str:
|
|
191
|
+
"""
|
|
192
|
+
Format verification status summary (like git status).
|
|
193
|
+
|
|
194
|
+
Parameters
|
|
195
|
+
----------
|
|
196
|
+
status : dict
|
|
197
|
+
Status dictionary from get_status()
|
|
198
|
+
|
|
199
|
+
Returns
|
|
200
|
+
-------
|
|
201
|
+
str
|
|
202
|
+
Formatted status string
|
|
203
|
+
"""
|
|
204
|
+
lines = []
|
|
205
|
+
|
|
206
|
+
lines.append(f"{Colors.BOLD}Verification Status{Colors.RESET}")
|
|
207
|
+
lines.append("=" * 40)
|
|
208
|
+
lines.append("")
|
|
209
|
+
|
|
210
|
+
total = (
|
|
211
|
+
status["verified_count"] + status["mismatch_count"] + status["missing_count"]
|
|
212
|
+
)
|
|
213
|
+
lines.append(f"Total runs tracked: {total}")
|
|
214
|
+
lines.append(
|
|
215
|
+
f" {Colors.GREEN}●{Colors.RESET} Verified: {status['verified_count']}"
|
|
216
|
+
)
|
|
217
|
+
lines.append(f" {Colors.RED}●{Colors.RESET} Mismatch: {status['mismatch_count']}")
|
|
218
|
+
lines.append(
|
|
219
|
+
f" {Colors.YELLOW}○{Colors.RESET} Missing: {status['missing_count']}"
|
|
220
|
+
)
|
|
221
|
+
lines.append("")
|
|
222
|
+
|
|
223
|
+
if status["mismatched"]:
|
|
224
|
+
lines.append(f"{Colors.RED}Modified (hash mismatch):{Colors.RESET}")
|
|
225
|
+
for item in status["mismatched"][:10]:
|
|
226
|
+
lines.append(f" {item['session_id']}")
|
|
227
|
+
for f in item["files"][:3]:
|
|
228
|
+
lines.append(f" └── {Path(f).name}")
|
|
229
|
+
if len(item["files"]) > 3:
|
|
230
|
+
lines.append(f" └── ... and {len(item['files']) - 3} more")
|
|
231
|
+
if len(status["mismatched"]) > 10:
|
|
232
|
+
lines.append(f" ... and {len(status['mismatched']) - 10} more runs")
|
|
233
|
+
lines.append("")
|
|
234
|
+
|
|
235
|
+
if status["missing"]:
|
|
236
|
+
lines.append(f"{Colors.YELLOW}Missing files:{Colors.RESET}")
|
|
237
|
+
for item in status["missing"][:10]:
|
|
238
|
+
lines.append(f" {item['session_id']}")
|
|
239
|
+
for f in item["files"][:3]:
|
|
240
|
+
lines.append(f" └── {Path(f).name}")
|
|
241
|
+
if len(item["files"]) > 3:
|
|
242
|
+
lines.append(f" └── ... and {len(item['files']) - 3} more")
|
|
243
|
+
if len(status["missing"]) > 10:
|
|
244
|
+
lines.append(f" ... and {len(status['missing']) - 10} more runs")
|
|
245
|
+
|
|
246
|
+
if not status["mismatched"] and not status["missing"]:
|
|
247
|
+
lines.append(f"{Colors.GREEN}All tracked files verified!{Colors.RESET}")
|
|
248
|
+
|
|
249
|
+
return "\n".join(lines)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def format_list(
|
|
253
|
+
runs: List[Dict[str, Any]],
|
|
254
|
+
verify: bool = True,
|
|
255
|
+
) -> str:
|
|
256
|
+
"""
|
|
257
|
+
Format list of runs with verification status.
|
|
258
|
+
|
|
259
|
+
Parameters
|
|
260
|
+
----------
|
|
261
|
+
runs : list of dict
|
|
262
|
+
List of run records from database
|
|
263
|
+
verify : bool, optional
|
|
264
|
+
Whether to verify each run (default: True)
|
|
265
|
+
|
|
266
|
+
Returns
|
|
267
|
+
-------
|
|
268
|
+
str
|
|
269
|
+
Formatted list string
|
|
270
|
+
"""
|
|
271
|
+
lines = []
|
|
272
|
+
|
|
273
|
+
header = f"{'SESSION':<45} {'STATUS':<15} {'SCRIPT':<30}"
|
|
274
|
+
lines.append(f"{Colors.BOLD}{header}{Colors.RESET}")
|
|
275
|
+
lines.append("-" * 90)
|
|
276
|
+
|
|
277
|
+
for run in runs:
|
|
278
|
+
session_id = run["session_id"]
|
|
279
|
+
|
|
280
|
+
if verify:
|
|
281
|
+
verification = verify_run(session_id)
|
|
282
|
+
icon = status_icon(verification.status)
|
|
283
|
+
stat_text = verification.status.value
|
|
284
|
+
else:
|
|
285
|
+
icon = " "
|
|
286
|
+
stat_text = run.get("status", "unknown")
|
|
287
|
+
|
|
288
|
+
script = (
|
|
289
|
+
Path(run.get("script_path", "")).name if run.get("script_path") else "-"
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
session_display = session_id[:43] + ".." if len(session_id) > 45 else session_id
|
|
293
|
+
script_display = script[:28] + ".." if len(script) > 30 else script
|
|
294
|
+
|
|
295
|
+
lines.append(
|
|
296
|
+
f"{icon} {session_display:<43} {stat_text:<15} {script_display:<30}"
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
return "\n".join(lines)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
# EOF
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: "2026-02-01 (ywatanabe)"
|
|
3
|
+
# File: /home/ywatanabe/proj/scitex-python/src/scitex/verify/_viz/_json.py
|
|
4
|
+
"""JSON graph export for verification DAG."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, Literal, Optional
|
|
11
|
+
|
|
12
|
+
PathMode = Literal["name", "relative", "absolute"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def format_path(path: str, mode: PathMode) -> str:
|
|
16
|
+
"""Format path according to display mode."""
|
|
17
|
+
if not path or path == "unknown":
|
|
18
|
+
return "unknown"
|
|
19
|
+
p = Path(path)
|
|
20
|
+
if mode == "name":
|
|
21
|
+
return p.name
|
|
22
|
+
elif mode == "relative":
|
|
23
|
+
try:
|
|
24
|
+
return str(p.relative_to(Path.cwd()))
|
|
25
|
+
except ValueError:
|
|
26
|
+
return str(p)
|
|
27
|
+
else: # absolute
|
|
28
|
+
return str(p.resolve())
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def verify_file_hash(path: str, stored_hash: str) -> bool:
|
|
32
|
+
"""Check if file's current hash matches stored hash."""
|
|
33
|
+
from .._hash import hash_file
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
current_hash = hash_file(path)
|
|
37
|
+
return current_hash == stored_hash
|
|
38
|
+
except (FileNotFoundError, OSError):
|
|
39
|
+
return False
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def file_to_node_id(filename: str) -> str:
|
|
43
|
+
"""Convert filename to valid node ID."""
|
|
44
|
+
return "file_" + filename.replace(".", "_").replace("-", "_").replace(" ", "_")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def generate_dag_json(
|
|
48
|
+
session_id: Optional[str] = None,
|
|
49
|
+
target_file: Optional[str] = None,
|
|
50
|
+
path_mode: PathMode = "name",
|
|
51
|
+
) -> Dict[str, Any]:
|
|
52
|
+
"""
|
|
53
|
+
Generate JSON representation of verification DAG.
|
|
54
|
+
|
|
55
|
+
Uses node-link format compatible with D3.js and other visualization libraries.
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
session_id : str, optional
|
|
60
|
+
Start from this session
|
|
61
|
+
target_file : str, optional
|
|
62
|
+
Start from session that produced this file
|
|
63
|
+
path_mode : str, optional
|
|
64
|
+
Path display mode: "name", "relative", or "absolute"
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
dict
|
|
69
|
+
Graph in node-link format with keys:
|
|
70
|
+
- nodes: list of {id, type, name, status, hash, ...}
|
|
71
|
+
- links: list of {source, target, type}
|
|
72
|
+
- metadata: {generated_at, target_file, ...}
|
|
73
|
+
"""
|
|
74
|
+
from .._chain import verify_chain, verify_run
|
|
75
|
+
from .._db import get_db
|
|
76
|
+
|
|
77
|
+
db = get_db()
|
|
78
|
+
nodes = []
|
|
79
|
+
links = []
|
|
80
|
+
node_ids = set()
|
|
81
|
+
|
|
82
|
+
if target_file:
|
|
83
|
+
chain = verify_chain(target_file)
|
|
84
|
+
chain_ids = [run.session_id for run in chain.runs]
|
|
85
|
+
elif session_id:
|
|
86
|
+
chain_ids = db.get_chain(session_id)
|
|
87
|
+
else:
|
|
88
|
+
chain_ids = []
|
|
89
|
+
|
|
90
|
+
if not chain_ids:
|
|
91
|
+
return {
|
|
92
|
+
"nodes": [],
|
|
93
|
+
"links": [],
|
|
94
|
+
"metadata": {"generated_at": datetime.now().isoformat(), "empty": True},
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
# Collect runs data
|
|
98
|
+
runs_data = []
|
|
99
|
+
for sid in chain_ids:
|
|
100
|
+
run = db.get_run(sid)
|
|
101
|
+
verification = verify_run(sid)
|
|
102
|
+
inputs = db.get_file_hashes(sid, role="input")
|
|
103
|
+
outputs = db.get_file_hashes(sid, role="output")
|
|
104
|
+
runs_data.append(
|
|
105
|
+
{
|
|
106
|
+
"session_id": sid,
|
|
107
|
+
"run": run,
|
|
108
|
+
"verification": verification,
|
|
109
|
+
"inputs": inputs,
|
|
110
|
+
"outputs": outputs,
|
|
111
|
+
}
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Process in reverse order (oldest first)
|
|
115
|
+
runs_data = list(reversed(runs_data))
|
|
116
|
+
|
|
117
|
+
for i, data in enumerate(runs_data):
|
|
118
|
+
sid = data["session_id"]
|
|
119
|
+
run = data["run"]
|
|
120
|
+
verification = data["verification"]
|
|
121
|
+
inputs = data["inputs"]
|
|
122
|
+
outputs = data["outputs"]
|
|
123
|
+
|
|
124
|
+
# Script node
|
|
125
|
+
script_id = f"script_{i}"
|
|
126
|
+
script_path = run.get("script_path", "unknown") if run else "unknown"
|
|
127
|
+
nodes.append(
|
|
128
|
+
{
|
|
129
|
+
"id": script_id,
|
|
130
|
+
"type": "script",
|
|
131
|
+
"name": format_path(script_path, path_mode),
|
|
132
|
+
"path": script_path,
|
|
133
|
+
"session_id": sid,
|
|
134
|
+
"status": "verified" if verification.is_verified else "failed",
|
|
135
|
+
"verified_from_scratch": verification.is_verified_from_scratch,
|
|
136
|
+
"hash": run.get("script_hash") if run else None,
|
|
137
|
+
}
|
|
138
|
+
)
|
|
139
|
+
node_ids.add(script_id)
|
|
140
|
+
|
|
141
|
+
# Input file nodes
|
|
142
|
+
for fpath, stored_hash in inputs.items():
|
|
143
|
+
file_id = file_to_node_id(Path(fpath).name)
|
|
144
|
+
if file_id not in node_ids:
|
|
145
|
+
file_ok = verify_file_hash(fpath, stored_hash)
|
|
146
|
+
nodes.append(
|
|
147
|
+
{
|
|
148
|
+
"id": file_id,
|
|
149
|
+
"type": "file",
|
|
150
|
+
"role": "input",
|
|
151
|
+
"name": format_path(fpath, path_mode),
|
|
152
|
+
"path": fpath,
|
|
153
|
+
"status": "verified" if file_ok else "failed",
|
|
154
|
+
"hash": stored_hash,
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
node_ids.add(file_id)
|
|
158
|
+
links.append({"source": file_id, "target": script_id, "type": "input"})
|
|
159
|
+
|
|
160
|
+
# Output file nodes
|
|
161
|
+
for fpath, stored_hash in outputs.items():
|
|
162
|
+
file_id = file_to_node_id(Path(fpath).name)
|
|
163
|
+
if file_id not in node_ids:
|
|
164
|
+
file_ok = verify_file_hash(fpath, stored_hash)
|
|
165
|
+
nodes.append(
|
|
166
|
+
{
|
|
167
|
+
"id": file_id,
|
|
168
|
+
"type": "file",
|
|
169
|
+
"role": "output",
|
|
170
|
+
"name": format_path(fpath, path_mode),
|
|
171
|
+
"path": fpath,
|
|
172
|
+
"status": "verified" if file_ok else "failed",
|
|
173
|
+
"hash": stored_hash,
|
|
174
|
+
}
|
|
175
|
+
)
|
|
176
|
+
node_ids.add(file_id)
|
|
177
|
+
links.append({"source": script_id, "target": file_id, "type": "output"})
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
"nodes": nodes,
|
|
181
|
+
"links": links,
|
|
182
|
+
"metadata": {
|
|
183
|
+
"generated_at": datetime.now().isoformat(),
|
|
184
|
+
"target_file": target_file,
|
|
185
|
+
"session_id": session_id,
|
|
186
|
+
"num_runs": len(runs_data),
|
|
187
|
+
"num_files": len([n for n in nodes if n["type"] == "file"]),
|
|
188
|
+
},
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
# EOF
|