expops 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- expops-0.1.3.dist-info/METADATA +826 -0
- expops-0.1.3.dist-info/RECORD +86 -0
- expops-0.1.3.dist-info/WHEEL +5 -0
- expops-0.1.3.dist-info/entry_points.txt +3 -0
- expops-0.1.3.dist-info/licenses/LICENSE +674 -0
- expops-0.1.3.dist-info/top_level.txt +1 -0
- mlops/__init__.py +0 -0
- mlops/__main__.py +11 -0
- mlops/_version.py +34 -0
- mlops/adapters/__init__.py +12 -0
- mlops/adapters/base.py +86 -0
- mlops/adapters/config_schema.py +89 -0
- mlops/adapters/custom/__init__.py +3 -0
- mlops/adapters/custom/custom_adapter.py +447 -0
- mlops/adapters/plugin_manager.py +113 -0
- mlops/adapters/sklearn/__init__.py +3 -0
- mlops/adapters/sklearn/adapter.py +94 -0
- mlops/cluster/__init__.py +3 -0
- mlops/cluster/controller.py +496 -0
- mlops/cluster/process_runner.py +91 -0
- mlops/cluster/providers.py +258 -0
- mlops/core/__init__.py +95 -0
- mlops/core/custom_model_base.py +38 -0
- mlops/core/dask_networkx_executor.py +1265 -0
- mlops/core/executor_worker.py +1239 -0
- mlops/core/experiment_tracker.py +81 -0
- mlops/core/graph_types.py +64 -0
- mlops/core/networkx_parser.py +135 -0
- mlops/core/payload_spill.py +278 -0
- mlops/core/pipeline_utils.py +162 -0
- mlops/core/process_hashing.py +216 -0
- mlops/core/step_state_manager.py +1298 -0
- mlops/core/step_system.py +956 -0
- mlops/core/workspace.py +99 -0
- mlops/environment/__init__.py +10 -0
- mlops/environment/base.py +43 -0
- mlops/environment/conda_manager.py +307 -0
- mlops/environment/factory.py +70 -0
- mlops/environment/pyenv_manager.py +146 -0
- mlops/environment/setup_env.py +31 -0
- mlops/environment/system_manager.py +66 -0
- mlops/environment/utils.py +105 -0
- mlops/environment/venv_manager.py +134 -0
- mlops/main.py +527 -0
- mlops/managers/project_manager.py +400 -0
- mlops/managers/reproducibility_manager.py +575 -0
- mlops/platform.py +996 -0
- mlops/reporting/__init__.py +16 -0
- mlops/reporting/context.py +187 -0
- mlops/reporting/entrypoint.py +292 -0
- mlops/reporting/kv_utils.py +77 -0
- mlops/reporting/registry.py +50 -0
- mlops/runtime/__init__.py +9 -0
- mlops/runtime/context.py +34 -0
- mlops/runtime/env_export.py +113 -0
- mlops/storage/__init__.py +12 -0
- mlops/storage/adapters/__init__.py +9 -0
- mlops/storage/adapters/gcp_kv_store.py +778 -0
- mlops/storage/adapters/gcs_object_store.py +96 -0
- mlops/storage/adapters/memory_store.py +240 -0
- mlops/storage/adapters/redis_store.py +438 -0
- mlops/storage/factory.py +199 -0
- mlops/storage/interfaces/__init__.py +6 -0
- mlops/storage/interfaces/kv_store.py +118 -0
- mlops/storage/path_utils.py +38 -0
- mlops/templates/premier-league/charts/plot_metrics.js +70 -0
- mlops/templates/premier-league/charts/plot_metrics.py +145 -0
- mlops/templates/premier-league/charts/requirements.txt +6 -0
- mlops/templates/premier-league/configs/cluster_config.yaml +13 -0
- mlops/templates/premier-league/configs/project_config.yaml +207 -0
- mlops/templates/premier-league/data/England CSV.csv +12154 -0
- mlops/templates/premier-league/models/premier_league_model.py +638 -0
- mlops/templates/premier-league/requirements.txt +8 -0
- mlops/templates/sklearn-basic/README.md +22 -0
- mlops/templates/sklearn-basic/charts/plot_metrics.py +85 -0
- mlops/templates/sklearn-basic/charts/requirements.txt +3 -0
- mlops/templates/sklearn-basic/configs/project_config.yaml +64 -0
- mlops/templates/sklearn-basic/data/train.csv +14 -0
- mlops/templates/sklearn-basic/models/model.py +62 -0
- mlops/templates/sklearn-basic/requirements.txt +10 -0
- mlops/web/__init__.py +3 -0
- mlops/web/server.py +585 -0
- mlops/web/ui/index.html +52 -0
- mlops/web/ui/mlops-charts.js +357 -0
- mlops/web/ui/script.js +1244 -0
- mlops/web/ui/styles.css +248 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from ..interfaces.kv_store import ObjectStore
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class GCSObjectStore(ObjectStore):
|
|
9
|
+
"""Google Cloud Storage implementation of ObjectStore.
|
|
10
|
+
|
|
11
|
+
URIs use the form gs://bucket/path/to/object
|
|
12
|
+
The instance is initialized with a default bucket and optional prefix.
|
|
13
|
+
If a provided uri already includes gs://bucket, that bucket is used.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, bucket: str, prefix: Optional[str] = None) -> None:
|
|
17
|
+
from google.cloud import storage # type: ignore
|
|
18
|
+
|
|
19
|
+
self._client = storage.Client()
|
|
20
|
+
self._bucket = self._client.bucket(bucket)
|
|
21
|
+
self._prefix = prefix.strip("/") if prefix else None
|
|
22
|
+
|
|
23
|
+
# ----- helpers -----
|
|
24
|
+
def _split_gs_uri(self, uri: str) -> tuple[str, str]:
|
|
25
|
+
assert uri.startswith("gs://"), f"Not a GCS URI: {uri}"
|
|
26
|
+
without = uri[len("gs://") :]
|
|
27
|
+
parts = without.split("/", 1)
|
|
28
|
+
bucket = parts[0]
|
|
29
|
+
key = parts[1] if len(parts) > 1 else ""
|
|
30
|
+
return bucket, key
|
|
31
|
+
|
|
32
|
+
def _blob_for(self, uri_or_key: str):
|
|
33
|
+
if uri_or_key.startswith("gs://"):
|
|
34
|
+
bkt, key = self._split_gs_uri(uri_or_key)
|
|
35
|
+
return self._client.bucket(bkt).blob(key)
|
|
36
|
+
key = uri_or_key.lstrip("/")
|
|
37
|
+
if self._prefix:
|
|
38
|
+
if not key:
|
|
39
|
+
key = self._prefix
|
|
40
|
+
elif key == self._prefix or key.startswith(f"{self._prefix}/"):
|
|
41
|
+
pass
|
|
42
|
+
else:
|
|
43
|
+
key = f"{self._prefix}/{key}"
|
|
44
|
+
return self._bucket.blob(key)
|
|
45
|
+
|
|
46
|
+
def put_bytes(self, uri: str, data: bytes, content_type: Optional[str] = None) -> None:
|
|
47
|
+
blob = self._blob_for(uri)
|
|
48
|
+
blob.upload_from_string(data, content_type=content_type)
|
|
49
|
+
|
|
50
|
+
def get_bytes(self, uri: str) -> bytes:
|
|
51
|
+
blob = self._blob_for(uri)
|
|
52
|
+
return blob.download_as_bytes()
|
|
53
|
+
|
|
54
|
+
def put_file(self, uri: str, file_path: str, content_type: Optional[str] = None) -> None:
|
|
55
|
+
blob = self._blob_for(uri)
|
|
56
|
+
# Use streaming upload directly from filename to avoid large in-memory buffers
|
|
57
|
+
blob.upload_from_filename(file_path, content_type=content_type)
|
|
58
|
+
|
|
59
|
+
def exists(self, uri: str) -> bool:
|
|
60
|
+
blob = self._blob_for(uri)
|
|
61
|
+
return bool(blob.exists())
|
|
62
|
+
|
|
63
|
+
def build_uri(self, *parts: str) -> str:
|
|
64
|
+
if not parts:
|
|
65
|
+
return f"gs://{self._bucket.name}/{self._prefix}" if self._prefix else f"gs://{self._bucket.name}"
|
|
66
|
+
# If first part is already a gs:// prefix, treat rest as path
|
|
67
|
+
if parts[0].startswith("gs://"):
|
|
68
|
+
base = parts[0].rstrip("/")
|
|
69
|
+
rest = "/".join([p.strip("/") for p in parts[1:]])
|
|
70
|
+
return f"{base}/{rest}" if rest else base
|
|
71
|
+
key = "/".join([p.strip("/") for p in parts])
|
|
72
|
+
if self._prefix:
|
|
73
|
+
# Avoid double-prefix if caller-provided parts already start with the prefix
|
|
74
|
+
if not key:
|
|
75
|
+
key = self._prefix
|
|
76
|
+
elif key == self._prefix or key.startswith(f"{self._prefix}/"):
|
|
77
|
+
pass
|
|
78
|
+
else:
|
|
79
|
+
key = f"{self._prefix}/{key}"
|
|
80
|
+
return f"gs://{self._bucket.name}/{key}"
|
|
81
|
+
|
|
82
|
+
# --------- pickling support (avoid shipping live clients) ---------
|
|
83
|
+
def __getstate__(self) -> dict:
|
|
84
|
+
return {
|
|
85
|
+
"_bucket_name": getattr(self._bucket, "name", None),
|
|
86
|
+
"_prefix": self._prefix,
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
def __setstate__(self, state: dict) -> None:
|
|
90
|
+
from google.cloud import storage # type: ignore
|
|
91
|
+
self._prefix = state.get("_prefix")
|
|
92
|
+
bucket_name = state.get("_bucket_name")
|
|
93
|
+
self._client = storage.Client()
|
|
94
|
+
self._bucket = self._client.bucket(bucket_name) if bucket_name else None
|
|
95
|
+
|
|
96
|
+
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
import numbers
|
|
5
|
+
import json
|
|
6
|
+
import time
|
|
7
|
+
from ..interfaces.kv_store import KeyValueEventStore
|
|
8
|
+
from ..path_utils import encode_probe_path
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class InMemoryStore(KeyValueEventStore):
|
|
12
|
+
"""Simple in-memory implementation for dev/tests.
|
|
13
|
+
Not persistent and no real pub/sub; events are appended to a list.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, project_id: str) -> None:
|
|
17
|
+
self.project_id = project_id
|
|
18
|
+
self._kv: Dict[str, Any] = {}
|
|
19
|
+
self._events: list[Dict[str, Any]] = []
|
|
20
|
+
|
|
21
|
+
# Helpers
|
|
22
|
+
def _json_set(self, key: str, value: Dict[str, Any]) -> None:
|
|
23
|
+
self._kv[key] = json.loads(json.dumps(value, default=str))
|
|
24
|
+
|
|
25
|
+
def _json_get(self, key: str) -> Optional[Dict[str, Any]]:
|
|
26
|
+
val = self._kv.get(key)
|
|
27
|
+
if val is None:
|
|
28
|
+
return None
|
|
29
|
+
return json.loads(json.dumps(val))
|
|
30
|
+
|
|
31
|
+
# Cache indices
|
|
32
|
+
def set_step_cache_record(self, process_name: str, step_name: str, input_hash: str, config_hash: str,
|
|
33
|
+
function_hash: Optional[str], record: Dict[str, Any], ttl_seconds: Optional[int] = None) -> None:
|
|
34
|
+
key = f"steps:{process_name}:{step_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
35
|
+
self._json_set(key, record)
|
|
36
|
+
|
|
37
|
+
def get_step_cache_path(self, process_name: str, step_name: str, input_hash: Optional[str], config_hash: Optional[str], function_hash: Optional[str]) -> Optional[str]:
|
|
38
|
+
if not input_hash or not config_hash:
|
|
39
|
+
return None
|
|
40
|
+
key = f"steps:{process_name}:{step_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
41
|
+
rec = self._json_get(key)
|
|
42
|
+
if rec and rec.get("status") in ("completed", "cached") and rec.get("cache_path"):
|
|
43
|
+
return rec["cache_path"]
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
def get_step_cache_record(self, process_name: str, step_name: str, input_hash: Optional[str], config_hash: Optional[str], function_hash: Optional[str]) -> Optional[Dict[str, Any]]:
|
|
47
|
+
if not input_hash or not config_hash:
|
|
48
|
+
return None
|
|
49
|
+
key = f"steps:{process_name}:{step_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
50
|
+
return self._json_get(key)
|
|
51
|
+
|
|
52
|
+
def set_process_cache_record(self, process_name: str, input_hash: str, config_hash: str, function_hash: Optional[str], record: Dict[str, Any], ttl_seconds: Optional[int] = None) -> None:
|
|
53
|
+
key = f"process:{process_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
54
|
+
self._json_set(key, record)
|
|
55
|
+
|
|
56
|
+
def get_process_cache_path(self, process_name: str, input_hash: Optional[str], config_hash: Optional[str], function_hash: Optional[str]) -> Optional[str]:
|
|
57
|
+
if not input_hash or not config_hash:
|
|
58
|
+
return None
|
|
59
|
+
key = f"process:{process_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
60
|
+
rec = self._json_get(key)
|
|
61
|
+
if rec and rec.get("status") in ("completed", "cached") and rec.get("cache_path"):
|
|
62
|
+
return rec["cache_path"]
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
def get_process_cache_record(self, process_name: str, input_hash: Optional[str], config_hash: Optional[str], function_hash: Optional[str]) -> Optional[Dict[str, Any]]:
|
|
66
|
+
if not input_hash or not config_hash:
|
|
67
|
+
return None
|
|
68
|
+
key = f"process:{process_name}:{input_hash}:{config_hash}:{function_hash or 'none'}"
|
|
69
|
+
return self._json_get(key)
|
|
70
|
+
|
|
71
|
+
def get_process_cache_paths_batch(self, lookups: list[tuple[str, Optional[str], Optional[str], Optional[str]]]) -> dict[str, Optional[str]]:
|
|
72
|
+
"""In-memory batched lookup by iterating local dict; returns composite-key map."""
|
|
73
|
+
out: dict[str, Optional[str]] = {}
|
|
74
|
+
for process_name, ih, ch, fh in lookups or []:
|
|
75
|
+
fhash = (fh or 'none') if (ih and ch) else (fh or 'none')
|
|
76
|
+
comp = f"{process_name}|{ih}|{ch}|{fhash}"
|
|
77
|
+
if not ih or not ch:
|
|
78
|
+
out[comp] = None
|
|
79
|
+
continue
|
|
80
|
+
key = f"process:{process_name}:{ih}:{ch}:{fhash}"
|
|
81
|
+
rec = self._json_get(key)
|
|
82
|
+
if rec and rec.get("status") in ("completed", "cached") and rec.get("cache_path"):
|
|
83
|
+
out[comp] = rec.get("cache_path")
|
|
84
|
+
else:
|
|
85
|
+
out[comp] = None
|
|
86
|
+
return out
|
|
87
|
+
|
|
88
|
+
# Run lifecycle + metrics
|
|
89
|
+
def mark_pipeline_started(self, run_id: str) -> None:
|
|
90
|
+
self._kv[f"runs:{run_id}:status"] = "running"
|
|
91
|
+
self._json_set(f"runs:{run_id}:timestamps", {"start": time.time(), "end": None})
|
|
92
|
+
self.publish_event({"type": "pipeline.started", "run_id": run_id, "status": "running"})
|
|
93
|
+
|
|
94
|
+
def mark_pipeline_completed(self, run_id: str, success: bool) -> None:
|
|
95
|
+
self._kv[f"runs:{run_id}:status"] = "completed" if success else "failed"
|
|
96
|
+
self._json_set(f"runs:{run_id}:timestamps", {"start": None, "end": time.time()})
|
|
97
|
+
self.publish_event({"type": "pipeline.completed", "run_id": run_id, "status": self._kv[f'runs:{run_id}:status']})
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# Events
|
|
101
|
+
def publish_event(self, event: Dict[str, Any]) -> None:
|
|
102
|
+
self._events.append(json.loads(json.dumps(event, default=str)))
|
|
103
|
+
|
|
104
|
+
def get_run_status(self, run_id: str) -> Optional[str]:
|
|
105
|
+
status = self._kv.get(f"runs:{run_id}:status")
|
|
106
|
+
if status is None:
|
|
107
|
+
return None
|
|
108
|
+
if isinstance(status, (bytes, bytearray)):
|
|
109
|
+
try:
|
|
110
|
+
status = status.decode()
|
|
111
|
+
except Exception:
|
|
112
|
+
return None
|
|
113
|
+
return str(status).lower() if isinstance(status, str) else None
|
|
114
|
+
|
|
115
|
+
# Per-run step bookkeeping
|
|
116
|
+
def record_run_step(self, run_id: str, process_name: str, step_name: str, record: Dict[str, Any]) -> None:
|
|
117
|
+
self._json_set(f"runs:{run_id}:steps:{process_name}:{step_name}", record)
|
|
118
|
+
|
|
119
|
+
def list_run_steps(self, run_id: str) -> Dict[str, Dict[str, Any]]:
|
|
120
|
+
prefix = f"runs:{run_id}:steps:"
|
|
121
|
+
out: Dict[str, Dict[str, Any]] = {}
|
|
122
|
+
for key, val in self._kv.items():
|
|
123
|
+
if isinstance(key, str) and key.startswith(prefix):
|
|
124
|
+
_, _, _, process, step = key.split(":", 4)
|
|
125
|
+
out[f"{process}.{step}"] = self._json_get(key) or {}
|
|
126
|
+
return out
|
|
127
|
+
|
|
128
|
+
# Stats
|
|
129
|
+
def increment_stat(self, run_id: str, name: str, amount: int = 1) -> None:
|
|
130
|
+
hkey = f"runs:{run_id}:stats:{name}"
|
|
131
|
+
self._kv[hkey] = int(self._kv.get(hkey, 0)) + amount
|
|
132
|
+
|
|
133
|
+
def get_pipeline_stats(self, run_id: str) -> Dict[str, Any]:
|
|
134
|
+
prefix = f"runs:{run_id}:stats:"
|
|
135
|
+
return { key[len(prefix):]: int(val) for key, val in self._kv.items() if isinstance(key, str) and key.startswith(prefix) }
|
|
136
|
+
|
|
137
|
+
# Charts index
|
|
138
|
+
def record_run_chart_artifacts(self, run_id: str, chart_name: str, artifacts: list[dict[str, Any]]) -> None:
|
|
139
|
+
idx_key = f"runs:{run_id}:charts:{chart_name}"
|
|
140
|
+
self._json_set(idx_key, {"items": artifacts})
|
|
141
|
+
|
|
142
|
+
def list_run_charts(self, run_id: str) -> Dict[str, Any]:
|
|
143
|
+
# Debug trace
|
|
144
|
+
try:
|
|
145
|
+
import logging as _logging
|
|
146
|
+
_logging.getLogger(__name__).info(f"[InMemoryStore] list_run_charts(run_id={run_id})")
|
|
147
|
+
except Exception:
|
|
148
|
+
pass
|
|
149
|
+
prefix = f"runs:{run_id}:charts:"
|
|
150
|
+
out: Dict[str, Any] = {}
|
|
151
|
+
for key, val in self._kv.items():
|
|
152
|
+
if isinstance(key, str) and key.startswith(prefix):
|
|
153
|
+
name = key[len(prefix):]
|
|
154
|
+
data = self._json_get(key) or {}
|
|
155
|
+
items = data.get("items", [])
|
|
156
|
+
# Derive chart type from first item's chart_type if available
|
|
157
|
+
ctype = None
|
|
158
|
+
try:
|
|
159
|
+
if isinstance(items, list) and items and isinstance(items[0], dict):
|
|
160
|
+
ctype = items[0].get("chart_type")
|
|
161
|
+
except Exception:
|
|
162
|
+
ctype = None
|
|
163
|
+
out[name] = {"type": (ctype or "static"), "items": items}
|
|
164
|
+
try:
|
|
165
|
+
import logging as _logging
|
|
166
|
+
_logging.getLogger(__name__).info(f"[InMemoryStore] list_run_charts -> {list(out.keys())}")
|
|
167
|
+
except Exception:
|
|
168
|
+
pass
|
|
169
|
+
return out
|
|
170
|
+
|
|
171
|
+
def copy_run_chart_artifacts(self, from_run_id: str, to_run_id: str, chart_name: str) -> bool:
|
|
172
|
+
"""Copy chart artifacts from one run to another.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
from_run_id: Source run ID
|
|
176
|
+
to_run_id: Destination run ID
|
|
177
|
+
chart_name: Name of the chart to copy
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
True if copy was successful, False otherwise
|
|
181
|
+
"""
|
|
182
|
+
try:
|
|
183
|
+
# Read chart artifacts from source run
|
|
184
|
+
from_key = f"runs:{from_run_id}:charts:{chart_name}"
|
|
185
|
+
from_data = self._json_get(from_key)
|
|
186
|
+
|
|
187
|
+
if not from_data:
|
|
188
|
+
try:
|
|
189
|
+
import logging as _logging
|
|
190
|
+
_logging.getLogger(__name__).info(f"[InMemoryStore] copy_run_chart_artifacts: chart {chart_name} not found in run {from_run_id}")
|
|
191
|
+
except Exception:
|
|
192
|
+
pass
|
|
193
|
+
return False
|
|
194
|
+
|
|
195
|
+
# Write to destination run
|
|
196
|
+
to_key = f"runs:{to_run_id}:charts:{chart_name}"
|
|
197
|
+
self._json_set(to_key, from_data)
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
import logging as _logging
|
|
201
|
+
_logging.getLogger(__name__).info(f"[InMemoryStore] copy_run_chart_artifacts: copied chart {chart_name} from {from_run_id} to {to_run_id}")
|
|
202
|
+
except Exception:
|
|
203
|
+
pass
|
|
204
|
+
|
|
205
|
+
return True
|
|
206
|
+
|
|
207
|
+
except Exception as e:
|
|
208
|
+
try:
|
|
209
|
+
import logging as _logging
|
|
210
|
+
_logging.getLogger(__name__).warning(f"[InMemoryStore] copy_run_chart_artifacts failed: {e}")
|
|
211
|
+
except Exception:
|
|
212
|
+
pass
|
|
213
|
+
return False
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def save_probe_metrics_by_path(self, run_id: str, probe_path: str, metrics: Dict[str, Any]) -> None:
|
|
217
|
+
enc = encode_probe_path(probe_path)
|
|
218
|
+
self._json_set(f"metric:{run_id}:probe_path:{enc}", metrics)
|
|
219
|
+
try:
|
|
220
|
+
self.publish_event({"type": "probe_metrics.updated", "run_id": run_id, "probe_path": probe_path, "metrics": metrics})
|
|
221
|
+
except Exception:
|
|
222
|
+
pass
|
|
223
|
+
|
|
224
|
+
def get_probe_metrics_by_path(self, run_id: str, probe_path: str) -> Dict[str, Any]:
|
|
225
|
+
enc = encode_probe_path(probe_path)
|
|
226
|
+
return self._json_get(f"metric:{run_id}:probe_path:{enc}") or {}
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
# Run listing (for UI)
|
|
230
|
+
def list_runs(self, limit: int = 100) -> list[str]:
|
|
231
|
+
prefix = "runs:"
|
|
232
|
+
ids: list[str] = []
|
|
233
|
+
for key in self._kv.keys():
|
|
234
|
+
if isinstance(key, str) and key.startswith(prefix) and key.endswith(":status"):
|
|
235
|
+
rid = key[len(prefix):-len(":status")]
|
|
236
|
+
ids.append(rid)
|
|
237
|
+
# Return insertion order approximation
|
|
238
|
+
return ids[:limit]
|
|
239
|
+
|
|
240
|
+
|