celltype-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celltype_cli-0.1.0.dist-info/METADATA +267 -0
- celltype_cli-0.1.0.dist-info/RECORD +89 -0
- celltype_cli-0.1.0.dist-info/WHEEL +4 -0
- celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
- celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- ct/__init__.py +3 -0
- ct/agent/__init__.py +0 -0
- ct/agent/case_studies.py +426 -0
- ct/agent/config.py +523 -0
- ct/agent/doctor.py +544 -0
- ct/agent/knowledge.py +523 -0
- ct/agent/loop.py +99 -0
- ct/agent/mcp_server.py +478 -0
- ct/agent/orchestrator.py +733 -0
- ct/agent/runner.py +656 -0
- ct/agent/sandbox.py +481 -0
- ct/agent/session.py +145 -0
- ct/agent/system_prompt.py +186 -0
- ct/agent/trace_store.py +228 -0
- ct/agent/trajectory.py +169 -0
- ct/agent/types.py +182 -0
- ct/agent/workflows.py +462 -0
- ct/api/__init__.py +1 -0
- ct/api/app.py +211 -0
- ct/api/config.py +120 -0
- ct/api/engine.py +124 -0
- ct/cli.py +1448 -0
- ct/data/__init__.py +0 -0
- ct/data/compute_providers.json +59 -0
- ct/data/cro_database.json +395 -0
- ct/data/downloader.py +238 -0
- ct/data/loaders.py +252 -0
- ct/kb/__init__.py +5 -0
- ct/kb/benchmarks.py +147 -0
- ct/kb/governance.py +106 -0
- ct/kb/ingest.py +415 -0
- ct/kb/reasoning.py +129 -0
- ct/kb/schema_monitor.py +162 -0
- ct/kb/substrate.py +387 -0
- ct/models/__init__.py +0 -0
- ct/models/llm.py +370 -0
- ct/tools/__init__.py +195 -0
- ct/tools/_compound_resolver.py +297 -0
- ct/tools/biomarker.py +368 -0
- ct/tools/cellxgene.py +282 -0
- ct/tools/chemistry.py +1371 -0
- ct/tools/claude.py +390 -0
- ct/tools/clinical.py +1153 -0
- ct/tools/clue.py +249 -0
- ct/tools/code.py +1069 -0
- ct/tools/combination.py +397 -0
- ct/tools/compute.py +402 -0
- ct/tools/cro.py +413 -0
- ct/tools/data_api.py +2114 -0
- ct/tools/design.py +295 -0
- ct/tools/dna.py +575 -0
- ct/tools/experiment.py +604 -0
- ct/tools/expression.py +655 -0
- ct/tools/files.py +957 -0
- ct/tools/genomics.py +1387 -0
- ct/tools/http_client.py +146 -0
- ct/tools/imaging.py +319 -0
- ct/tools/intel.py +223 -0
- ct/tools/literature.py +743 -0
- ct/tools/network.py +422 -0
- ct/tools/notification.py +111 -0
- ct/tools/omics.py +3330 -0
- ct/tools/ops.py +1230 -0
- ct/tools/parity.py +649 -0
- ct/tools/pk.py +245 -0
- ct/tools/protein.py +678 -0
- ct/tools/regulatory.py +643 -0
- ct/tools/remote_data.py +179 -0
- ct/tools/report.py +181 -0
- ct/tools/repurposing.py +376 -0
- ct/tools/safety.py +1280 -0
- ct/tools/shell.py +178 -0
- ct/tools/singlecell.py +533 -0
- ct/tools/statistics.py +552 -0
- ct/tools/structure.py +882 -0
- ct/tools/target.py +901 -0
- ct/tools/translational.py +123 -0
- ct/tools/viability.py +218 -0
- ct/ui/__init__.py +0 -0
- ct/ui/markdown.py +31 -0
- ct/ui/status.py +258 -0
- ct/ui/suggestions.py +567 -0
- ct/ui/terminal.py +1456 -0
- ct/ui/traces.py +112 -0
ct/tools/compute.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU compute tools: list providers, estimate costs, submit jobs, check status.
|
|
3
|
+
|
|
4
|
+
PLACEHOLDER IMPLEMENTATION: Provider listings and pricing come from a static JSON
|
|
5
|
+
file bundled with ct. Prices may be outdated. submit_job and job_status make real
|
|
6
|
+
API calls when dry_run=False, but list_providers and estimate_cost use static data.
|
|
7
|
+
A real implementation would query live provider APIs for current pricing.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import time
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from ct.tools import registry
|
|
14
|
+
from ct.tools.http_client import request_json
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Module-level cache for provider data
|
|
18
|
+
_providers_data = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _load_providers() -> dict:
|
|
22
|
+
"""Load provider data from JSON, caching after first read."""
|
|
23
|
+
global _providers_data
|
|
24
|
+
if _providers_data is None:
|
|
25
|
+
json_path = Path(__file__).resolve().parent.parent / "data" / "compute_providers.json"
|
|
26
|
+
with open(json_path) as f:
|
|
27
|
+
_providers_data = json.load(f)
|
|
28
|
+
return _providers_data
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _provider_info(provider_id: str) -> dict | None:
|
|
32
|
+
"""Return provider metadata by ID."""
|
|
33
|
+
data = _load_providers()
|
|
34
|
+
for provider in data.get("providers", []):
|
|
35
|
+
if provider.get("id") == provider_id:
|
|
36
|
+
return provider
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _provider_api_key_config(provider: str) -> str:
|
|
41
|
+
"""Map provider IDs to config keys, with a predictable fallback."""
|
|
42
|
+
key_map = {"lambda": "compute.lambda_api_key", "runpod": "compute.runpod_api_key"}
|
|
43
|
+
return key_map.get(provider, f"compute.{provider}_api_key")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _resolve_provider(provider: str | None, cfg) -> str:
|
|
47
|
+
"""Resolve provider from argument or config default."""
|
|
48
|
+
if provider:
|
|
49
|
+
return provider
|
|
50
|
+
resolved = cfg.get("compute.default_provider", "lambda")
|
|
51
|
+
return resolved or "lambda"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _request_json(
|
|
55
|
+
method: str,
|
|
56
|
+
url: str,
|
|
57
|
+
*,
|
|
58
|
+
headers: dict,
|
|
59
|
+
payload: dict | None = None,
|
|
60
|
+
timeout: int = 30,
|
|
61
|
+
retries: int = 2,
|
|
62
|
+
) -> tuple[dict | None, str | None]:
|
|
63
|
+
"""HTTP request with shared retry/backoff semantics."""
|
|
64
|
+
return request_json(
|
|
65
|
+
method,
|
|
66
|
+
url,
|
|
67
|
+
json=payload,
|
|
68
|
+
headers=headers,
|
|
69
|
+
timeout=timeout,
|
|
70
|
+
retries=retries,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _find_cheapest_gpu(min_vram_gb: int, provider_id: str = None) -> dict | None:
|
|
75
|
+
"""Find the cheapest GPU meeting VRAM requirements, optionally filtered by provider."""
|
|
76
|
+
data = _load_providers()
|
|
77
|
+
candidates = []
|
|
78
|
+
for provider in data["providers"]:
|
|
79
|
+
if provider_id and provider["id"] != provider_id:
|
|
80
|
+
continue
|
|
81
|
+
for gpu in provider["gpu_types"]:
|
|
82
|
+
if gpu["vram_gb"] >= min_vram_gb:
|
|
83
|
+
candidates.append({
|
|
84
|
+
"provider_id": provider["id"],
|
|
85
|
+
"provider_name": provider["name"],
|
|
86
|
+
"gpu_id": gpu["id"],
|
|
87
|
+
"gpu_name": gpu["name"],
|
|
88
|
+
"vram_gb": gpu["vram_gb"],
|
|
89
|
+
"price_per_hour": gpu["price_per_hour"],
|
|
90
|
+
})
|
|
91
|
+
if not candidates:
|
|
92
|
+
return None
|
|
93
|
+
return min(candidates, key=lambda c: c["price_per_hour"])
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@registry.register(
|
|
97
|
+
name="compute.list_providers",
|
|
98
|
+
description="List available GPU cloud providers with reference pricing for each GPU type (from built-in directory)",
|
|
99
|
+
category="compute",
|
|
100
|
+
parameters={},
|
|
101
|
+
usage_guide="You need to see available GPU cloud providers and their pricing before submitting a compute job.",
|
|
102
|
+
)
|
|
103
|
+
def list_providers(**kwargs) -> dict:
|
|
104
|
+
"""List all GPU cloud providers and their available GPU types with pricing."""
|
|
105
|
+
data = _load_providers()
|
|
106
|
+
providers = []
|
|
107
|
+
for p in data["providers"]:
|
|
108
|
+
gpu_list = []
|
|
109
|
+
for gpu in p["gpu_types"]:
|
|
110
|
+
gpu_list.append({
|
|
111
|
+
"id": gpu["id"],
|
|
112
|
+
"name": gpu["name"],
|
|
113
|
+
"vram_gb": gpu["vram_gb"],
|
|
114
|
+
"price_per_hour": gpu["price_per_hour"],
|
|
115
|
+
})
|
|
116
|
+
providers.append({
|
|
117
|
+
"id": p["id"],
|
|
118
|
+
"name": p["name"],
|
|
119
|
+
"website": p["website"],
|
|
120
|
+
"gpu_types": gpu_list,
|
|
121
|
+
})
|
|
122
|
+
|
|
123
|
+
# Build summary
|
|
124
|
+
lines = []
|
|
125
|
+
for p in providers:
|
|
126
|
+
gpu_strs = [f"{g['name']} (${g['price_per_hour']:.2f}/hr)" for g in p["gpu_types"]]
|
|
127
|
+
lines.append(f"{p['name']}: {', '.join(gpu_strs)}")
|
|
128
|
+
|
|
129
|
+
return {
|
|
130
|
+
"summary": f"[PLACEHOLDER] {len(providers)} GPU cloud providers (static reference pricing — may be outdated):\n" + "\n".join(lines),
|
|
131
|
+
"placeholder": True,
|
|
132
|
+
"providers": providers,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
@registry.register(
|
|
137
|
+
name="compute.estimate_cost",
|
|
138
|
+
description="Estimate cost and time for a GPU computation job based on built-in templates (Boltz-2, AlphaFold, MD, virtual screening, training)",
|
|
139
|
+
category="compute",
|
|
140
|
+
parameters={
|
|
141
|
+
"job_type": "Type of job: boltz2, alphafold, molecular_dynamics, virtual_screening, model_training",
|
|
142
|
+
"n_samples": "Number of samples/structures to process (default 1)",
|
|
143
|
+
"gpu_type": "Specific GPU type to use (optional, auto-selects cheapest if omitted)",
|
|
144
|
+
"provider": "Provider ID: lambda or runpod (optional, auto-selects cheapest if omitted)",
|
|
145
|
+
},
|
|
146
|
+
usage_guide="You need to estimate the cost and time for a GPU computation (Boltz-2, AlphaFold, MD simulation, etc.) before deciding whether to proceed.",
|
|
147
|
+
)
|
|
148
|
+
def estimate_cost(job_type: str, n_samples: int = 1, gpu_type: str = None, provider: str = None, **kwargs) -> dict:
|
|
149
|
+
"""Estimate cost and time for a compute job.
|
|
150
|
+
|
|
151
|
+
Looks up the job template, finds the best GPU option, and calculates
|
|
152
|
+
estimated_hours and estimated_cost based on per-sample time and pricing.
|
|
153
|
+
"""
|
|
154
|
+
data = _load_providers()
|
|
155
|
+
templates = data["job_templates"]
|
|
156
|
+
|
|
157
|
+
if job_type not in templates:
|
|
158
|
+
valid = ", ".join(templates.keys())
|
|
159
|
+
return {"error": f"Unknown job type '{job_type}'. Valid types: {valid}", "summary": f"Unknown job type '{job_type}'. Valid types: {valid}"}
|
|
160
|
+
template = templates[job_type]
|
|
161
|
+
min_vram = template["gpu_requirement_vram_gb"]
|
|
162
|
+
|
|
163
|
+
# Resolve GPU selection
|
|
164
|
+
if gpu_type and provider:
|
|
165
|
+
# Find specific GPU at specific provider
|
|
166
|
+
selected = None
|
|
167
|
+
for p in data["providers"]:
|
|
168
|
+
if p["id"] == provider:
|
|
169
|
+
for g in p["gpu_types"]:
|
|
170
|
+
if g["id"] == gpu_type:
|
|
171
|
+
selected = {
|
|
172
|
+
"provider_id": p["id"],
|
|
173
|
+
"provider_name": p["name"],
|
|
174
|
+
"gpu_id": g["id"],
|
|
175
|
+
"gpu_name": g["name"],
|
|
176
|
+
"vram_gb": g["vram_gb"],
|
|
177
|
+
"price_per_hour": g["price_per_hour"],
|
|
178
|
+
}
|
|
179
|
+
break
|
|
180
|
+
if not selected:
|
|
181
|
+
return {"error": f"GPU '{gpu_type}' not found at provider '{provider}'", "summary": f"GPU '{gpu_type}' not found at provider '{provider}'"}
|
|
182
|
+
if selected["vram_gb"] < min_vram:
|
|
183
|
+
return {"error": f"GPU {gpu_type} has {selected['vram_gb']}GB VRAM but {job_type} requires {min_vram}GB", "summary": f"GPU {gpu_type} has {selected['vram_gb']}GB VRAM but {job_type} requires {min_vram}GB"}
|
|
184
|
+
else:
|
|
185
|
+
selected = _find_cheapest_gpu(min_vram, provider_id=provider)
|
|
186
|
+
if not selected:
|
|
187
|
+
msg = f"No GPU found with >= {min_vram}GB VRAM" + (f" at provider '{provider}'" if provider else "")
|
|
188
|
+
return {"error": msg, "summary": msg}
|
|
189
|
+
|
|
190
|
+
# Calculate cost
|
|
191
|
+
time_per_sample = template["estimated_time_per_sample_minutes"]
|
|
192
|
+
total_minutes = time_per_sample * n_samples
|
|
193
|
+
total_hours = total_minutes / 60.0
|
|
194
|
+
estimated_cost = total_hours * selected["price_per_hour"]
|
|
195
|
+
|
|
196
|
+
return {
|
|
197
|
+
"summary": (
|
|
198
|
+
f"[PLACEHOLDER] {template['description']}: {n_samples} sample(s) on {selected['gpu_name']} ({selected['provider_name']})\n"
|
|
199
|
+
f"Estimated time: {total_hours:.1f} hours | Estimated cost: ${estimated_cost:.2f} (based on static reference pricing — may be outdated)"
|
|
200
|
+
),
|
|
201
|
+
"placeholder": True,
|
|
202
|
+
"job_type": job_type,
|
|
203
|
+
"n_samples": n_samples,
|
|
204
|
+
"estimated_hours": round(total_hours, 2),
|
|
205
|
+
"estimated_cost": round(estimated_cost, 2),
|
|
206
|
+
"gpu": selected["gpu_id"],
|
|
207
|
+
"gpu_name": selected["gpu_name"],
|
|
208
|
+
"vram_gb": selected["vram_gb"],
|
|
209
|
+
"provider": selected["provider_id"],
|
|
210
|
+
"provider_name": selected["provider_name"],
|
|
211
|
+
"price_per_hour": selected["price_per_hour"],
|
|
212
|
+
"time_per_sample_minutes": time_per_sample,
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
@registry.register(
|
|
217
|
+
name="compute.submit_job",
|
|
218
|
+
description="Submit a GPU compute job to a cloud provider (dry_run=True by default)",
|
|
219
|
+
category="compute",
|
|
220
|
+
parameters={
|
|
221
|
+
"job_type": "Type of job: boltz2, alphafold, molecular_dynamics, virtual_screening, model_training",
|
|
222
|
+
"params": "Job-specific parameters (dict with input files, config, etc.)",
|
|
223
|
+
"provider": "Provider ID: lambda or runpod (default: compute.default_provider)",
|
|
224
|
+
"gpu_type": "GPU type to use (optional, auto-selects if omitted)",
|
|
225
|
+
"dry_run": "If True (default), only show what would be submitted without actually submitting",
|
|
226
|
+
},
|
|
227
|
+
usage_guide="You want to submit a GPU computation job. Always runs in dry_run mode unless explicitly overridden. Use after compute.estimate_cost.",
|
|
228
|
+
)
|
|
229
|
+
def submit_job(job_type: str, params: dict = None, provider: str | None = None, gpu_type: str = None, dry_run: bool = True, **kwargs) -> dict:
|
|
230
|
+
"""Submit a compute job to a cloud GPU provider.
|
|
231
|
+
|
|
232
|
+
By default runs in dry_run mode, returning what would be submitted.
|
|
233
|
+
When dry_run=False, POSTs the job to the provider API.
|
|
234
|
+
"""
|
|
235
|
+
params = params or {}
|
|
236
|
+
from ct.agent.config import Config
|
|
237
|
+
cfg = Config.load()
|
|
238
|
+
provider = _resolve_provider(provider, cfg)
|
|
239
|
+
|
|
240
|
+
data = _load_providers()
|
|
241
|
+
templates = data["job_templates"]
|
|
242
|
+
|
|
243
|
+
if job_type not in templates:
|
|
244
|
+
valid = ", ".join(templates.keys())
|
|
245
|
+
return {"error": f"Unknown job type '{job_type}'. Valid types: {valid}", "summary": f"Unknown job type '{job_type}'. Valid types: {valid}"}
|
|
246
|
+
template = templates[job_type]
|
|
247
|
+
|
|
248
|
+
# Get cost estimate
|
|
249
|
+
cost_est = estimate_cost(job_type=job_type, n_samples=params.get("n_samples", 1), gpu_type=gpu_type, provider=provider)
|
|
250
|
+
if "error" in cost_est:
|
|
251
|
+
return cost_est
|
|
252
|
+
|
|
253
|
+
# Find provider API base
|
|
254
|
+
provider_info = _provider_info(provider)
|
|
255
|
+
if not provider_info:
|
|
256
|
+
return {"error": f"Unknown provider '{provider}'", "summary": f"Unknown provider '{provider}'"}
|
|
257
|
+
job_payload = {
|
|
258
|
+
"job_type": job_type,
|
|
259
|
+
"description": template["description"],
|
|
260
|
+
"gpu_type": cost_est["gpu"],
|
|
261
|
+
"provider": provider,
|
|
262
|
+
"params": params,
|
|
263
|
+
"estimated_hours": cost_est["estimated_hours"],
|
|
264
|
+
"estimated_cost": cost_est["estimated_cost"],
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
if dry_run:
|
|
268
|
+
return {
|
|
269
|
+
"summary": (
|
|
270
|
+
f"[DRY RUN] Would submit {template['description']} to {provider_info['name']}\n"
|
|
271
|
+
f"GPU: {cost_est['gpu_name']} | Est. time: {cost_est['estimated_hours']:.1f}h | Est. cost: ${cost_est['estimated_cost']:.2f}\n"
|
|
272
|
+
f"Set dry_run=False to actually submit."
|
|
273
|
+
),
|
|
274
|
+
"dry_run": True,
|
|
275
|
+
"job_payload": job_payload,
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
# Actual submission — validate API key first
|
|
279
|
+
config_key = _provider_api_key_config(provider)
|
|
280
|
+
api_key = cfg.get(config_key)
|
|
281
|
+
if not api_key:
|
|
282
|
+
return {
|
|
283
|
+
"error": (
|
|
284
|
+
f"No API key configured for {provider_info['name']}. "
|
|
285
|
+
f"Set it with: ct config set {config_key} <your-key>\n"
|
|
286
|
+
f"Or: export {config_key.upper().replace('.', '_')}=<your-key>\n"
|
|
287
|
+
f"Sign up at: {provider_info.get('website', 'N/A')}\n"
|
|
288
|
+
f"Run 'ct keys' to see all API key status."
|
|
289
|
+
),
|
|
290
|
+
"summary": f"Job submission failed: no API key for {provider_info['name']}",
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
api_base = provider_info.get("api_base_url")
|
|
294
|
+
if not api_base:
|
|
295
|
+
return {
|
|
296
|
+
"error": f"Provider '{provider}' does not have an API endpoint configured",
|
|
297
|
+
"summary": f"Job submission failed: provider '{provider_info.get('name', provider)}' is not API-enabled",
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
api_url = f"{api_base}/jobs"
|
|
301
|
+
result, request_error = _request_json(
|
|
302
|
+
"POST",
|
|
303
|
+
api_url,
|
|
304
|
+
payload=job_payload,
|
|
305
|
+
timeout=30,
|
|
306
|
+
retries=2,
|
|
307
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
308
|
+
)
|
|
309
|
+
if request_error:
|
|
310
|
+
return {
|
|
311
|
+
"error": f"Job submission failed: {request_error}",
|
|
312
|
+
"summary": f"Job submission failed for {provider_info['name']}: {request_error}",
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
job_id = result.get("id") or result.get("job_id") or "unknown"
|
|
316
|
+
|
|
317
|
+
return {
|
|
318
|
+
"summary": (
|
|
319
|
+
f"Job submitted to {provider_info['name']}: {job_id}\n"
|
|
320
|
+
f"GPU: {cost_est['gpu_name']} | Est. time: {cost_est['estimated_hours']:.1f}h | Est. cost: ${cost_est['estimated_cost']:.2f}"
|
|
321
|
+
),
|
|
322
|
+
"job_id": job_id,
|
|
323
|
+
"provider": provider,
|
|
324
|
+
"job_payload": job_payload,
|
|
325
|
+
"raw_response": result,
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
@registry.register(
|
|
330
|
+
name="compute.job_status",
|
|
331
|
+
description="Check the status of a previously submitted compute job",
|
|
332
|
+
category="compute",
|
|
333
|
+
parameters={
|
|
334
|
+
"job_id": "Job ID returned from compute.submit_job",
|
|
335
|
+
"provider": "Provider ID: lambda or runpod (default: compute.default_provider)",
|
|
336
|
+
},
|
|
337
|
+
usage_guide="You want to check the status of a previously submitted compute job.",
|
|
338
|
+
)
|
|
339
|
+
def job_status(job_id: str, provider: str | None = None, **kwargs) -> dict:
|
|
340
|
+
"""Check the status of a compute job via the provider API."""
|
|
341
|
+
from ct.agent.config import Config
|
|
342
|
+
cfg = Config.load()
|
|
343
|
+
provider = _resolve_provider(provider, cfg)
|
|
344
|
+
|
|
345
|
+
provider_info = _provider_info(provider)
|
|
346
|
+
if not provider_info:
|
|
347
|
+
return {"error": f"Unknown provider '{provider}'", "summary": f"Unknown provider '{provider}'"}
|
|
348
|
+
config_key = _provider_api_key_config(provider)
|
|
349
|
+
api_key = cfg.get(config_key)
|
|
350
|
+
if not api_key:
|
|
351
|
+
return {
|
|
352
|
+
"error": (
|
|
353
|
+
f"No API key configured for {provider_info['name']}. "
|
|
354
|
+
f"Set it with: ct config set {config_key} <your-key>\n"
|
|
355
|
+
f"Run 'ct keys' to see all API key status."
|
|
356
|
+
),
|
|
357
|
+
"summary": f"Job status check failed: no API key for {provider_info['name']}",
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
api_base = provider_info.get("api_base_url")
|
|
361
|
+
if not api_base:
|
|
362
|
+
return {
|
|
363
|
+
"error": f"Provider '{provider}' does not have an API endpoint configured",
|
|
364
|
+
"summary": f"Job status check failed: provider '{provider_info.get('name', provider)}' is not API-enabled",
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
api_url = f"{api_base}/jobs/{job_id}"
|
|
368
|
+
result, request_error = _request_json(
|
|
369
|
+
"GET",
|
|
370
|
+
api_url,
|
|
371
|
+
timeout=30,
|
|
372
|
+
retries=2,
|
|
373
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
374
|
+
)
|
|
375
|
+
if request_error:
|
|
376
|
+
return {
|
|
377
|
+
"error": f"Failed to check job status: {request_error}",
|
|
378
|
+
"summary": f"Job status check failed for {provider_info['name']}: {request_error}",
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
status = result.get("status", "unknown")
|
|
382
|
+
progress = result.get("progress", None)
|
|
383
|
+
elapsed = result.get("elapsed_seconds", None)
|
|
384
|
+
|
|
385
|
+
elapsed_str = ""
|
|
386
|
+
if elapsed is not None:
|
|
387
|
+
hours = elapsed / 3600
|
|
388
|
+
elapsed_str = f" | Elapsed: {hours:.1f}h"
|
|
389
|
+
|
|
390
|
+
progress_str = ""
|
|
391
|
+
if progress is not None:
|
|
392
|
+
progress_str = f" | Progress: {progress}%"
|
|
393
|
+
|
|
394
|
+
return {
|
|
395
|
+
"summary": f"Job {job_id} ({provider_info['name']}): {status}{progress_str}{elapsed_str}",
|
|
396
|
+
"job_id": job_id,
|
|
397
|
+
"provider": provider,
|
|
398
|
+
"status": status,
|
|
399
|
+
"progress": progress,
|
|
400
|
+
"elapsed_seconds": elapsed,
|
|
401
|
+
"raw_response": result,
|
|
402
|
+
}
|