fraclab-sdk 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +50 -0
- README.md +73 -7
- fraclab_sdk/__init__.py +3 -0
- fraclab_sdk/devkit/__init__.py +8 -0
- fraclab_sdk/devkit/validate.py +836 -75
- fraclab_sdk/specs/__init__.py +22 -0
- fraclab_sdk/specs/output.py +33 -0
- fraclab_sdk/version.py +5 -0
- fraclab_sdk/workbench/Home.py +162 -0
- fraclab_sdk/workbench/__init__.py +4 -0
- fraclab_sdk/workbench/__main__.py +48 -0
- fraclab_sdk/workbench/pages/1_Snapshots.py +577 -0
- fraclab_sdk/workbench/pages/2_Browse.py +513 -0
- fraclab_sdk/workbench/pages/3_Selection.py +464 -0
- fraclab_sdk/workbench/pages/4_Run.py +331 -0
- fraclab_sdk/workbench/pages/5_Results.py +298 -0
- fraclab_sdk/workbench/pages/6_Algorithm_Edit.py +116 -0
- fraclab_sdk/workbench/pages/7_Schema_Edit.py +160 -0
- fraclab_sdk/workbench/pages/8_Output_Edit.py +155 -0
- fraclab_sdk/workbench/pages/9_Export_Algorithm.py +386 -0
- fraclab_sdk/workbench/pages/__init__.py +1 -0
- fraclab_sdk/workbench/ui_styles.py +103 -0
- fraclab_sdk/workbench/utils.py +43 -0
- {fraclab_sdk-0.1.0.dist-info → fraclab_sdk-0.1.2.dist-info}/METADATA +77 -8
- {fraclab_sdk-0.1.0.dist-info → fraclab_sdk-0.1.2.dist-info}/RECORD +27 -8
- {fraclab_sdk-0.1.0.dist-info → fraclab_sdk-0.1.2.dist-info}/entry_points.txt +1 -0
- {fraclab_sdk-0.1.0.dist-info → fraclab_sdk-0.1.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,386 @@
|
|
|
1
|
+
"""Algorithm export page."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import io
|
|
6
|
+
import json
|
|
7
|
+
import shutil
|
|
8
|
+
import tempfile
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
import streamlit as st
|
|
12
|
+
|
|
13
|
+
from fraclab_sdk.algorithm import AlgorithmLibrary
|
|
14
|
+
from fraclab_sdk.config import SDKConfig
|
|
15
|
+
from fraclab_sdk.devkit import (
|
|
16
|
+
validate_algorithm_signature,
|
|
17
|
+
validate_inputspec,
|
|
18
|
+
validate_output_contract,
|
|
19
|
+
)
|
|
20
|
+
from fraclab_sdk.snapshot import SnapshotLibrary
|
|
21
|
+
from fraclab_sdk.workbench import ui_styles
|
|
22
|
+
|
|
23
|
+
st.set_page_config(page_title="Export Algorithm", page_icon="📦", layout="wide", initial_sidebar_state="expanded")
|
|
24
|
+
st.title("Export Algorithm")
|
|
25
|
+
|
|
26
|
+
ui_styles.apply_global_styles()
|
|
27
|
+
|
|
28
|
+
# --- Page-Specific CSS ---
|
|
29
|
+
st.markdown("""
|
|
30
|
+
<style>
|
|
31
|
+
/* Status badge styling */
|
|
32
|
+
.status-badge {
|
|
33
|
+
padding: 4px 8px;
|
|
34
|
+
border-radius: 4px;
|
|
35
|
+
font-weight: 600;
|
|
36
|
+
font-size: 0.85rem;
|
|
37
|
+
}
|
|
38
|
+
.status-ok { background-color: #d1fae5; color: #065f46; }
|
|
39
|
+
.status-missing { background-color: #fee2e2; color: #991b1b; }
|
|
40
|
+
.status-warning { background-color: #fef3c7; color: #92400e; }
|
|
41
|
+
</style>
|
|
42
|
+
""", unsafe_allow_html=True)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
config = SDKConfig()
|
|
46
|
+
algo_lib = AlgorithmLibrary(config)
|
|
47
|
+
snap_lib = SnapshotLibrary(config)
|
|
48
|
+
|
|
49
|
+
algos = algo_lib.list_algorithms()
|
|
50
|
+
if not algos:
|
|
51
|
+
st.info("No algorithms imported. Use Snapshots page to import or create one.")
|
|
52
|
+
st.stop()
|
|
53
|
+
|
|
54
|
+
# ==========================================
|
|
55
|
+
# 1. Source Selection
|
|
56
|
+
# ==========================================
|
|
57
|
+
st.subheader("1. Select Algorithm Source")
|
|
58
|
+
|
|
59
|
+
with st.container(border=True):
|
|
60
|
+
c1, c2 = st.columns([3, 1])
|
|
61
|
+
with c1:
|
|
62
|
+
algo_options = {f"{a.algorithm_id}:{a.version}": a for a in algos}
|
|
63
|
+
selected_key = st.selectbox(
|
|
64
|
+
"Target Algorithm",
|
|
65
|
+
options=list(algo_options.keys()),
|
|
66
|
+
format_func=lambda k: f"{algo_options[k].algorithm_id} (v{algo_options[k].version})",
|
|
67
|
+
label_visibility="collapsed"
|
|
68
|
+
)
|
|
69
|
+
with c2:
|
|
70
|
+
if selected_key:
|
|
71
|
+
selected_algo = algo_options[selected_key]
|
|
72
|
+
st.caption(f"ID: `{selected_algo.algorithm_id}`")
|
|
73
|
+
|
|
74
|
+
if not selected_key:
|
|
75
|
+
st.stop()
|
|
76
|
+
|
|
77
|
+
selected_algo = algo_options[selected_key]
|
|
78
|
+
handle = algo_lib.get_algorithm(selected_algo.algorithm_id, selected_algo.version)
|
|
79
|
+
algo_dir = handle.directory
|
|
80
|
+
|
|
81
|
+
# File paths
|
|
82
|
+
manifest_path = algo_dir / "manifest.json"
|
|
83
|
+
params_schema_path = algo_dir / "dist" / "params.schema.json"
|
|
84
|
+
output_contract_path = algo_dir / "dist" / "output_contract.json"
|
|
85
|
+
drs_path = algo_dir / "dist" / "drs.json"
|
|
86
|
+
|
|
87
|
+
# ==========================================
|
|
88
|
+
# 2. Validation Status
|
|
89
|
+
# ==========================================
|
|
90
|
+
st.subheader("2. Validation Status")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _get_algo_mtime(algo_dir: Path) -> float:
|
|
94
|
+
"""Get max mtime of source files for cache invalidation."""
|
|
95
|
+
files = [
|
|
96
|
+
algo_dir / "main.py",
|
|
97
|
+
algo_dir / "manifest.json",
|
|
98
|
+
]
|
|
99
|
+
# schema/*.py files
|
|
100
|
+
schema_dir = algo_dir / "schema"
|
|
101
|
+
if schema_dir.exists():
|
|
102
|
+
files.extend(schema_dir.glob("*.py"))
|
|
103
|
+
|
|
104
|
+
# dist/*.json (if exists)
|
|
105
|
+
dist_dir = algo_dir / "dist"
|
|
106
|
+
if dist_dir.exists():
|
|
107
|
+
files.extend(dist_dir.glob("*.json"))
|
|
108
|
+
|
|
109
|
+
mtimes = [f.stat().st_mtime for f in files if f.exists()]
|
|
110
|
+
return max(mtimes) if mtimes else 0.0
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@st.cache_data(ttl=60)
|
|
114
|
+
def _run_all_validations(algo_dir_str: str, _mtime: float) -> dict[str, dict]:
|
|
115
|
+
"""Run all validations. Cached by (path, mtime)."""
|
|
116
|
+
workspace = Path(algo_dir_str)
|
|
117
|
+
results: dict[str, dict] = {}
|
|
118
|
+
|
|
119
|
+
# InputSpec validation
|
|
120
|
+
try:
|
|
121
|
+
inputspec_result = validate_inputspec(workspace)
|
|
122
|
+
results["inputspec"] = {
|
|
123
|
+
"valid": inputspec_result.valid,
|
|
124
|
+
"errors": len(inputspec_result.errors),
|
|
125
|
+
"warnings": len(inputspec_result.warnings),
|
|
126
|
+
"issues": [
|
|
127
|
+
{
|
|
128
|
+
"severity": i.severity.value,
|
|
129
|
+
"code": i.code,
|
|
130
|
+
"message": i.message,
|
|
131
|
+
"path": i.path,
|
|
132
|
+
"details": i.details,
|
|
133
|
+
}
|
|
134
|
+
for i in inputspec_result.issues
|
|
135
|
+
],
|
|
136
|
+
}
|
|
137
|
+
except Exception as e:
|
|
138
|
+
results["inputspec"] = {
|
|
139
|
+
"valid": False,
|
|
140
|
+
"errors": 1,
|
|
141
|
+
"warnings": 0,
|
|
142
|
+
"issues": [{"severity": "error", "code": "VALIDATION_FAILED", "message": str(e), "path": None, "details": {}}],
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
# OutputContract validation
|
|
146
|
+
try:
|
|
147
|
+
output_result = validate_output_contract(workspace)
|
|
148
|
+
results["output_contract"] = {
|
|
149
|
+
"valid": output_result.valid,
|
|
150
|
+
"errors": len(output_result.errors),
|
|
151
|
+
"warnings": len(output_result.warnings),
|
|
152
|
+
"issues": [
|
|
153
|
+
{
|
|
154
|
+
"severity": i.severity.value,
|
|
155
|
+
"code": i.code,
|
|
156
|
+
"message": i.message,
|
|
157
|
+
"path": i.path,
|
|
158
|
+
"details": i.details,
|
|
159
|
+
}
|
|
160
|
+
for i in output_result.issues
|
|
161
|
+
],
|
|
162
|
+
}
|
|
163
|
+
except Exception as e:
|
|
164
|
+
results["output_contract"] = {
|
|
165
|
+
"valid": False,
|
|
166
|
+
"errors": 1,
|
|
167
|
+
"warnings": 0,
|
|
168
|
+
"issues": [{"severity": "error", "code": "VALIDATION_FAILED", "message": str(e), "path": None, "details": {}}],
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
# Algorithm signature validation
|
|
172
|
+
try:
|
|
173
|
+
algo_result = validate_algorithm_signature(workspace)
|
|
174
|
+
results["algorithm"] = {
|
|
175
|
+
"valid": algo_result.valid,
|
|
176
|
+
"errors": len(algo_result.errors),
|
|
177
|
+
"warnings": len(algo_result.warnings),
|
|
178
|
+
"issues": [
|
|
179
|
+
{
|
|
180
|
+
"severity": i.severity.value,
|
|
181
|
+
"code": i.code,
|
|
182
|
+
"message": i.message,
|
|
183
|
+
"path": i.path,
|
|
184
|
+
"details": i.details,
|
|
185
|
+
}
|
|
186
|
+
for i in algo_result.issues
|
|
187
|
+
],
|
|
188
|
+
}
|
|
189
|
+
except Exception as e:
|
|
190
|
+
results["algorithm"] = {
|
|
191
|
+
"valid": False,
|
|
192
|
+
"errors": 1,
|
|
193
|
+
"warnings": 0,
|
|
194
|
+
"issues": [{"severity": "error", "code": "VALIDATION_FAILED", "message": str(e), "path": None, "details": {}}],
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return results
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
# Run validations with caching
|
|
201
|
+
mtime = _get_algo_mtime(algo_dir)
|
|
202
|
+
validation_results = _run_all_validations(str(algo_dir), mtime)
|
|
203
|
+
|
|
204
|
+
# Display validation status badges with revalidate button
|
|
205
|
+
with st.container(border=True):
|
|
206
|
+
cols = st.columns([1, 1, 1, 0.5])
|
|
207
|
+
names = ["InputSpec", "OutputContract", "Algorithm"]
|
|
208
|
+
keys = ["inputspec", "output_contract", "algorithm"]
|
|
209
|
+
|
|
210
|
+
for i, (key, name) in enumerate(zip(keys, names)):
|
|
211
|
+
result = validation_results.get(key, {})
|
|
212
|
+
error_count = result.get("errors", 0)
|
|
213
|
+
warning_count = result.get("warnings", 0)
|
|
214
|
+
|
|
215
|
+
with cols[i]:
|
|
216
|
+
if error_count > 0:
|
|
217
|
+
st.markdown(
|
|
218
|
+
f'<span class="status-badge status-missing">❌ {name}: {error_count} error{"s" if error_count > 1 else ""}</span>',
|
|
219
|
+
unsafe_allow_html=True,
|
|
220
|
+
)
|
|
221
|
+
elif warning_count > 0:
|
|
222
|
+
st.markdown(
|
|
223
|
+
f'<span class="status-badge status-warning">⚠️ {name}: {warning_count} warning{"s" if warning_count > 1 else ""}</span>',
|
|
224
|
+
unsafe_allow_html=True,
|
|
225
|
+
)
|
|
226
|
+
else:
|
|
227
|
+
st.markdown(
|
|
228
|
+
f'<span class="status-badge status-ok">✅ {name}</span>',
|
|
229
|
+
unsafe_allow_html=True,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
with cols[3]:
|
|
233
|
+
if st.button("🔄 Revalidate", key="rerun_validation"):
|
|
234
|
+
_run_all_validations.clear()
|
|
235
|
+
st.rerun()
|
|
236
|
+
|
|
237
|
+
# Collect all issues
|
|
238
|
+
all_issues = []
|
|
239
|
+
for key in keys:
|
|
240
|
+
result = validation_results.get(key, {})
|
|
241
|
+
for issue in result.get("issues", []):
|
|
242
|
+
all_issues.append((key, issue))
|
|
243
|
+
|
|
244
|
+
# Show validation details if there are issues
|
|
245
|
+
if all_issues:
|
|
246
|
+
with st.expander(f"📋 Validation Details ({len(all_issues)} issue{'s' if len(all_issues) > 1 else ''})", expanded=False):
|
|
247
|
+
for source, issue in all_issues:
|
|
248
|
+
icon = {"error": "🔴", "warning": "🟡", "info": "🔵"}.get(issue["severity"], "⚪")
|
|
249
|
+
path_str = f" at `{issue['path']}`" if issue.get("path") else ""
|
|
250
|
+
details_str = ""
|
|
251
|
+
if issue.get("details"):
|
|
252
|
+
# Show suggested fix for snake_case issues
|
|
253
|
+
if "suggested" in issue["details"]:
|
|
254
|
+
details_str = f" → Suggested: `{issue['details']['suggested']}`"
|
|
255
|
+
elif "missing" in issue["details"]:
|
|
256
|
+
details_str = f" (missing: {issue['details']['missing']})"
|
|
257
|
+
elif "extra" in issue["details"]:
|
|
258
|
+
details_str = f" (extra: {issue['details']['extra']})"
|
|
259
|
+
st.markdown(f"{icon} **[{source}]** `{issue['code']}`{path_str}: {issue['message']}{details_str}")
|
|
260
|
+
|
|
261
|
+
# File Inspector
|
|
262
|
+
with st.expander("📂 File Inspector", expanded=True):
|
|
263
|
+
tab_man, tab_in, tab_out = st.tabs(["Manifest", "Input Spec", "Output Spec"])
|
|
264
|
+
|
|
265
|
+
def _show_json_preview(path: Path):
|
|
266
|
+
if path.exists():
|
|
267
|
+
try:
|
|
268
|
+
data = json.loads(path.read_text())
|
|
269
|
+
st.code(json.dumps(data, indent=2, ensure_ascii=False), language="json", line_numbers=True)
|
|
270
|
+
except Exception:
|
|
271
|
+
st.error("Failed to parse JSON")
|
|
272
|
+
else:
|
|
273
|
+
st.info("File not generated yet.")
|
|
274
|
+
|
|
275
|
+
with tab_man: _show_json_preview(manifest_path)
|
|
276
|
+
with tab_in: _show_json_preview(params_schema_path)
|
|
277
|
+
with tab_out: _show_json_preview(output_contract_path)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
# ==========================================
|
|
281
|
+
# 3. DRS Source Selection
|
|
282
|
+
# ==========================================
|
|
283
|
+
st.subheader("3. Select DRS Source")
|
|
284
|
+
|
|
285
|
+
snapshots = snap_lib.list_snapshots()
|
|
286
|
+
snapshot_map = {s.snapshot_id: s for s in snapshots}
|
|
287
|
+
|
|
288
|
+
if not snapshots:
|
|
289
|
+
st.warning("No snapshots available. Import a snapshot first to provide DRS for export.")
|
|
290
|
+
st.stop()
|
|
291
|
+
|
|
292
|
+
with st.container(border=True):
|
|
293
|
+
st.caption("The DRS (Data Requirement Specification) defines dataset requirements. Select a snapshot to use its DRS in the export package.")
|
|
294
|
+
|
|
295
|
+
selected_snapshot_id = st.selectbox(
|
|
296
|
+
"Snapshot (DRS Source)",
|
|
297
|
+
options=list(snapshot_map.keys()),
|
|
298
|
+
format_func=lambda x: f"{x} — {snapshot_map[x].bundle_id}",
|
|
299
|
+
label_visibility="collapsed"
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
if not selected_snapshot_id:
|
|
303
|
+
st.stop()
|
|
304
|
+
|
|
305
|
+
snapshot_handle = snap_lib.get_snapshot(selected_snapshot_id)
|
|
306
|
+
|
|
307
|
+
# ==========================================
|
|
308
|
+
# 4. Export
|
|
309
|
+
# ==========================================
|
|
310
|
+
st.divider()
|
|
311
|
+
st.subheader("4. Export")
|
|
312
|
+
|
|
313
|
+
def build_zip() -> bytes:
|
|
314
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
315
|
+
tmpdir_path = Path(tmpdir)
|
|
316
|
+
# copy installed algorithm content
|
|
317
|
+
shutil.copytree(algo_dir, tmpdir_path / algo_dir.name, dirs_exist_ok=True)
|
|
318
|
+
target_root = tmpdir_path / algo_dir.name
|
|
319
|
+
|
|
320
|
+
# ensure manifest files paths cover dist outputs if present
|
|
321
|
+
manifest_data = json.loads(manifest_path.read_text())
|
|
322
|
+
files = manifest_data.get("files") or {}
|
|
323
|
+
|
|
324
|
+
if output_contract_path.exists():
|
|
325
|
+
files["outputContractPath"] = "dist/output_contract.json"
|
|
326
|
+
if params_schema_path.exists():
|
|
327
|
+
files["paramsSchemaPath"] = "dist/params.schema.json"
|
|
328
|
+
if drs_path.exists():
|
|
329
|
+
files["drsPath"] = "dist/drs.json"
|
|
330
|
+
|
|
331
|
+
if files:
|
|
332
|
+
manifest_data["files"] = files
|
|
333
|
+
|
|
334
|
+
(target_root / "manifest.json").write_text(json.dumps(manifest_data, indent=2), encoding="utf-8")
|
|
335
|
+
|
|
336
|
+
# DRS Override Logic
|
|
337
|
+
# Try to find DRS path from manifest, default to dist/drs.json
|
|
338
|
+
drs_rel_path = manifest_data.get("files", {}).get("drsPath", "dist/drs.json")
|
|
339
|
+
target_drs_path = target_root / drs_rel_path
|
|
340
|
+
target_drs_path.parent.mkdir(parents=True, exist_ok=True)
|
|
341
|
+
|
|
342
|
+
# Read DRS from Snapshot
|
|
343
|
+
snap_drs_path = snapshot_handle.directory / snapshot_handle.manifest.specFiles.drsPath
|
|
344
|
+
|
|
345
|
+
if snap_drs_path.exists():
|
|
346
|
+
target_drs_path.write_bytes(snap_drs_path.read_bytes())
|
|
347
|
+
else:
|
|
348
|
+
# Fallback if snapshot DRS is missing structure (rare)
|
|
349
|
+
pass
|
|
350
|
+
|
|
351
|
+
# Zip it up (flattened: no top-level version folder)
|
|
352
|
+
zip_buf = io.BytesIO()
|
|
353
|
+
shutil.make_archive(
|
|
354
|
+
base_name=tmpdir_path / "algorithm_export",
|
|
355
|
+
format="zip",
|
|
356
|
+
root_dir=target_root,
|
|
357
|
+
base_dir=".",
|
|
358
|
+
)
|
|
359
|
+
zip_path = tmpdir_path / "algorithm_export.zip"
|
|
360
|
+
zip_buf.write(zip_path.read_bytes())
|
|
361
|
+
zip_buf.seek(0)
|
|
362
|
+
return zip_buf.read()
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
# Check if there are validation errors
|
|
366
|
+
has_validation_errors = any(not r.get("valid", True) for r in validation_results.values())
|
|
367
|
+
|
|
368
|
+
_, col_export_btn = st.columns([3, 1])
|
|
369
|
+
with col_export_btn:
|
|
370
|
+
if has_validation_errors:
|
|
371
|
+
st.error("Fix validation errors to export")
|
|
372
|
+
st.button("📦 Build & Export", type="primary", disabled=True, key="export_disabled")
|
|
373
|
+
else:
|
|
374
|
+
if st.button("📦 Build & Export", type="primary", key="export_enabled"):
|
|
375
|
+
try:
|
|
376
|
+
with st.spinner("Packaging..."):
|
|
377
|
+
zip_bytes = build_zip()
|
|
378
|
+
|
|
379
|
+
st.download_button(
|
|
380
|
+
label="⬇️ Download Zip",
|
|
381
|
+
data=zip_bytes,
|
|
382
|
+
file_name=f"{selected_algo.algorithm_id}-{selected_algo.version}.zip",
|
|
383
|
+
mime="application/zip",
|
|
384
|
+
)
|
|
385
|
+
except Exception as e:
|
|
386
|
+
st.error(f"Export failed: {e}")
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Streamlit multipage registry for the workbench."""
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Shared UI styling utility for Streamlit pages."""
|
|
2
|
+
|
|
3
|
+
import streamlit as st
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def apply_global_styles():
|
|
7
|
+
"""Inject global CSS to hide Streamlit UI elements and apply common styling."""
|
|
8
|
+
st.markdown("""
|
|
9
|
+
<style>
|
|
10
|
+
/* =================================
|
|
11
|
+
1. Hide Streamlit UI Elements
|
|
12
|
+
================================= */
|
|
13
|
+
|
|
14
|
+
/* Hide top header bar */
|
|
15
|
+
header[data-testid="stHeader"] {
|
|
16
|
+
display: none !important;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/* Hide Deploy button; keep toolbar for sidebar toggle */
|
|
20
|
+
.stDeployButton {
|
|
21
|
+
display: none !important;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/* Force sidebar visible and disable collapse control */
|
|
25
|
+
[data-testid="stSidebar"] {
|
|
26
|
+
transform: none !important;
|
|
27
|
+
min-width: 260px;
|
|
28
|
+
}
|
|
29
|
+
[data-testid="stSidebarNav"] {
|
|
30
|
+
min-width: 240px;
|
|
31
|
+
}
|
|
32
|
+
[data-testid="collapsedControl"] {
|
|
33
|
+
display: none !important;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/* Hide footer (Made with Streamlit) */
|
|
37
|
+
footer {
|
|
38
|
+
display: none !important;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/* Hide element toolbar on tables/dataframes */
|
|
42
|
+
[data-testid="stElementToolbar"] {
|
|
43
|
+
display: none !important;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/* Adjust main content padding (header hidden, content needs padding) */
|
|
47
|
+
.main .block-container {
|
|
48
|
+
padding-top: 2rem !important;
|
|
49
|
+
padding-bottom: 2rem !important;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/* =================================
|
|
53
|
+
2. Button Styling
|
|
54
|
+
================================= */
|
|
55
|
+
|
|
56
|
+
div[data-testid="stButton"] button {
|
|
57
|
+
white-space: nowrap !important;
|
|
58
|
+
border-radius: 6px !important;
|
|
59
|
+
min-width: 60px !important;
|
|
60
|
+
font-weight: 500 !important;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/* =================================
|
|
64
|
+
3. Anti-Copy Protection
|
|
65
|
+
================================= */
|
|
66
|
+
|
|
67
|
+
/* Disable text selection on key areas */
|
|
68
|
+
.element-container,
|
|
69
|
+
[data-testid="stDataFrame"],
|
|
70
|
+
[data-testid="stDataEditor"],
|
|
71
|
+
[data-testid="stCode"],
|
|
72
|
+
[data-testid="stJson"] {
|
|
73
|
+
user-select: none;
|
|
74
|
+
-webkit-user-select: none;
|
|
75
|
+
-moz-user-select: none;
|
|
76
|
+
-ms-user-select: none;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/* Hide copy buttons on st.code / st.json */
|
|
80
|
+
[data-testid="stCode"] button,
|
|
81
|
+
[data-testid="stJson"] button {
|
|
82
|
+
display: none !important;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/* =================================
|
|
86
|
+
4. Component Styling
|
|
87
|
+
================================= */
|
|
88
|
+
|
|
89
|
+
/* Expander header styling */
|
|
90
|
+
.streamlit-expanderHeader {
|
|
91
|
+
font-weight: 600;
|
|
92
|
+
background-color: #f9f9fb;
|
|
93
|
+
border-radius: 6px;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/* Data Editor border styling */
|
|
97
|
+
[data-testid="stDataEditor"] {
|
|
98
|
+
border: 1px solid #e6e9ef;
|
|
99
|
+
border-radius: 6px;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
</style>
|
|
103
|
+
""", unsafe_allow_html=True)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Shared helpers for the Streamlit workbench."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import subprocess
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from fraclab_sdk.config import SDKConfig
|
|
11
|
+
|
|
12
|
+
# Keep a dedicated workspace separate from the installed algorithm library.
|
|
13
|
+
WORKSPACE_ALGOS_SUBDIR = "workspace_algorithms"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_workspace_dir(config: SDKConfig) -> Path:
|
|
17
|
+
"""Return the workspace directory for editable algorithms."""
|
|
18
|
+
workspace = config.sdk_home / WORKSPACE_ALGOS_SUBDIR
|
|
19
|
+
workspace.mkdir(parents=True, exist_ok=True)
|
|
20
|
+
return workspace
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def run_workspace_script(workspace: Path, script: str, timeout: int = 30) -> subprocess.CompletedProcess:
|
|
24
|
+
"""Run a Python snippet with the workspace on PYTHONPATH."""
|
|
25
|
+
pythonpath = [str(workspace)]
|
|
26
|
+
existing = os.environ.get("PYTHONPATH")
|
|
27
|
+
if existing:
|
|
28
|
+
pythonpath.append(existing)
|
|
29
|
+
|
|
30
|
+
env = {
|
|
31
|
+
**os.environ,
|
|
32
|
+
"PYTHONPATH": os.pathsep.join(pythonpath),
|
|
33
|
+
"PYTHONUNBUFFERED": "1",
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
return subprocess.run(
|
|
37
|
+
[sys.executable, "-c", script],
|
|
38
|
+
cwd=workspace,
|
|
39
|
+
env=env,
|
|
40
|
+
capture_output=True,
|
|
41
|
+
text=True,
|
|
42
|
+
timeout=timeout,
|
|
43
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fraclab-sdk
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: SDK for managing snapshots, algorithms, and run execution
|
|
5
5
|
Requires-Python: >=3.11,<4.0
|
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -8,19 +8,22 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
8
8
|
Classifier: Programming Language :: Python :: 3.12
|
|
9
9
|
Classifier: Programming Language :: Python :: 3.13
|
|
10
10
|
Classifier: Programming Language :: Python :: 3.14
|
|
11
|
+
Provides-Extra: workbench
|
|
11
12
|
Requires-Dist: fastapi (>=0.115.0,<0.116.0)
|
|
12
13
|
Requires-Dist: matplotlib (>=3.9.2,<4.0.0)
|
|
13
14
|
Requires-Dist: numpy (>=2.1.1,<3.0.0)
|
|
14
15
|
Requires-Dist: pandas (>=2.2.3,<3.0.0)
|
|
16
|
+
Requires-Dist: pyarrow (>=16.0.0) ; extra == "workbench"
|
|
15
17
|
Requires-Dist: pydantic (>=2.10.0,<3.0.0)
|
|
16
18
|
Requires-Dist: rich (>=13.9.0,<14.0.0)
|
|
17
19
|
Requires-Dist: scipy (>=1.13.1,<2.0.0)
|
|
20
|
+
Requires-Dist: streamlit (>=1.30) ; extra == "workbench"
|
|
18
21
|
Requires-Dist: typer (>=0.15.0,<0.16.0)
|
|
19
22
|
Description-Content-Type: text/markdown
|
|
20
23
|
|
|
21
24
|
# Fraclab SDK Reference
|
|
22
25
|
|
|
23
|
-
> 版本: 0.1.
|
|
26
|
+
> 版本: 0.1.1
|
|
24
27
|
> Python: >=3.11
|
|
25
28
|
|
|
26
29
|
Fraclab SDK 是一个算法开发与执行框架,帮助算法开发者快速构建、测试和部署数据处理算法。
|
|
@@ -43,12 +46,26 @@ Fraclab SDK 是一个算法开发与执行框架,帮助算法开发者快速
|
|
|
43
46
|
|
|
44
47
|
## 安装
|
|
45
48
|
|
|
46
|
-
|
|
49
|
+
轻量安装(核心 SDK / CLI,自动带上科学计算依赖):
|
|
47
50
|
|
|
48
51
|
```bash
|
|
49
|
-
pip install
|
|
52
|
+
pip install fraclab-sdk
|
|
50
53
|
```
|
|
51
54
|
|
|
55
|
+
安装并启用 Workbench UI:
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
pip install "fraclab-sdk[workbench]"
|
|
59
|
+
fraclab-workbench # CLI entry point
|
|
60
|
+
# 或
|
|
61
|
+
python -m fraclab_sdk.workbench
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
### 依赖说明
|
|
65
|
+
|
|
66
|
+
- 核心安装会自动安装并锁定:`numpy>=2.1.1`, `pandas>=2.2.3`, `scipy>=1.13.1`, `matplotlib>=3.9.2`, `fastapi>=0.115.0`, `rich>=13.9.0`。无需手动再装,避免版本冲突。
|
|
67
|
+
- 可选 `workbench` 额外安装 UI 依赖:`streamlit>=1.30`, `pyarrow>=16.0.0`(`pandas` 已在核心里)。
|
|
68
|
+
|
|
52
69
|
---
|
|
53
70
|
|
|
54
71
|
## 快速开始:编写你的第一个算法
|
|
@@ -66,13 +83,63 @@ from fraclab_sdk.runtime import DataClient, ArtifactWriter
|
|
|
66
83
|
|
|
67
84
|
### 2. 编写算法入口
|
|
68
85
|
|
|
69
|
-
创建 `main.py`
|
|
86
|
+
创建 `main.py` 作为算法入口文件。
|
|
87
|
+
|
|
88
|
+
#### 入口函数签名约定
|
|
89
|
+
|
|
90
|
+
**算法入口函数必须严格遵循以下签名:**
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
def run(ctx):
|
|
94
|
+
...
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
| 约定 | 要求 | 说明 |
|
|
98
|
+
|------|------|------|
|
|
99
|
+
| 文件名 | `main.py` | 必须是 `main.py`,不能是其他名称 |
|
|
100
|
+
| 函数名 | `run` | 必须是 `run`,区分大小写 |
|
|
101
|
+
| 参数 | 仅 `ctx` 一个参数 | SDK 使用 `module.run(ctx)` 调用,**不支持**其他签名 |
|
|
102
|
+
| 返回值 | 无要求 | 返回值会被忽略 |
|
|
103
|
+
|
|
104
|
+
> **警告**: 以下写法都会导致运行失败:
|
|
105
|
+
> ```python
|
|
106
|
+
> # ❌ 错误: 参数名不影响,但参数个数必须是 1
|
|
107
|
+
> def run(ctx, extra_arg): # TypeError: run() missing required argument
|
|
108
|
+
>
|
|
109
|
+
> # ❌ 错误: 函数名错误
|
|
110
|
+
> def execute(ctx): # AttributeError: module has no attribute 'run'
|
|
111
|
+
>
|
|
112
|
+
> # ❌ 错误: 放在其他文件
|
|
113
|
+
> # algorithm.py 中定义 run() # 不会被加载
|
|
114
|
+
> ```
|
|
115
|
+
|
|
116
|
+
#### 最小可运行模板
|
|
70
117
|
|
|
71
118
|
```python
|
|
72
119
|
# main.py
|
|
73
|
-
|
|
74
|
-
|
|
120
|
+
def run(ctx):
|
|
121
|
+
"""算法入口函数 - 最小模板。
|
|
75
122
|
|
|
123
|
+
Args:
|
|
124
|
+
ctx: RunContext,包含:
|
|
125
|
+
- ctx.data_client: DataClient 实例
|
|
126
|
+
- ctx.params: dict[str, Any],用户参数
|
|
127
|
+
- ctx.artifacts: ArtifactWriter 实例
|
|
128
|
+
- ctx.logger: logging.Logger 实例
|
|
129
|
+
- ctx.run_context: dict,运行上下文
|
|
130
|
+
"""
|
|
131
|
+
logger = ctx.logger
|
|
132
|
+
logger.info("算法开始执行")
|
|
133
|
+
|
|
134
|
+
# 你的逻辑...
|
|
135
|
+
|
|
136
|
+
logger.info("算法执行完成")
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
#### 完整示例
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
# main.py
|
|
76
143
|
def run(ctx):
|
|
77
144
|
"""算法入口函数。
|
|
78
145
|
|
|
@@ -333,7 +400,7 @@ aw.write_scalar(
|
|
|
333
400
|
"drsPath": "dist/drs.json"
|
|
334
401
|
},
|
|
335
402
|
"requires": {
|
|
336
|
-
"sdk": "0.1.
|
|
403
|
+
"sdk": "0.1.1",
|
|
337
404
|
"core": "1.0.0"
|
|
338
405
|
},
|
|
339
406
|
"repository": "https://github.com/example/my-algorithm",
|
|
@@ -846,6 +913,8 @@ $ fraclab-sdk run tail f9e8d7c6
|
|
|
846
913
|
$ fraclab-sdk run tail f9e8d7c6 --stderr
|
|
847
914
|
```
|
|
848
915
|
|
|
916
|
+
> Workbench 提示:结果页面会展示本次运行的输出目录路径(含 `_logs` 日志),即使运行失败也能点开路径定位调试。
|
|
917
|
+
|
|
849
918
|
#### 6. 运行目录结构
|
|
850
919
|
|
|
851
920
|
执行完成后,`~/.fraclab/runs/<run_id>/` 目录结构:
|