text4q-cortex 0.1.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cortex/__init__.py +16 -0
- cortex/cli.py +486 -0
- cortex/cloud/__init__.py +0 -0
- cortex/cloud/models.py +122 -0
- cortex/cloud/queue.py +202 -0
- cortex/cloud/server.py +254 -0
- cortex/connectors/__init__.py +0 -0
- cortex/connectors/ibm.py +147 -0
- cortex/core.py +119 -0
- cortex/models.py +58 -0
- cortex/nlp/__init__.py +0 -0
- cortex/nlp/engine.py +250 -0
- cortex/nlp/llm_engine.py +339 -0
- cortex/scheduler/__init__.py +0 -0
- cortex/scheduler/integration.py +183 -0
- cortex/scheduler/optimizer.py +311 -0
- cortex/scheduler/problem.py +236 -0
- cortex/scheduler/qaoa.py +207 -0
- text4q_cortex-0.1.0a0.dist-info/METADATA +131 -0
- text4q_cortex-0.1.0a0.dist-info/RECORD +24 -0
- text4q_cortex-0.1.0a0.dist-info/WHEEL +5 -0
- text4q_cortex-0.1.0a0.dist-info/entry_points.txt +2 -0
- text4q_cortex-0.1.0a0.dist-info/licenses/LICENSE +96 -0
- text4q_cortex-0.1.0a0.dist-info/top_level.txt +1 -0
cortex/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
text4q Cortex
|
|
3
|
+
=============
|
|
4
|
+
Natural language interface for quantum computing infrastructure.
|
|
5
|
+
|
|
6
|
+
Quick start:
|
|
7
|
+
from cortex import Cortex
|
|
8
|
+
cx = Cortex(backend="ibm_quantum")
|
|
9
|
+
result = cx.run("Bell state with 2 qubits, 1024 shots")
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from cortex.core import Cortex
|
|
13
|
+
from cortex.models import CortexResult, CircuitIntent
|
|
14
|
+
|
|
15
|
+
__version__ = "0.1.0-alpha"
|
|
16
|
+
__all__ = ["Cortex", "CortexResult", "CircuitIntent"]
|
cortex/cli.py
ADDED
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cortex.cli
|
|
3
|
+
==========
|
|
4
|
+
Command-line interface for text4q Cortex.
|
|
5
|
+
|
|
6
|
+
Commands:
|
|
7
|
+
cortex run "Bell state 2 qubits" — compile + execute locally
|
|
8
|
+
cortex compile "GHZ 3 qubits" — show QASM without executing
|
|
9
|
+
cortex submit "..." --server URL — submit to cloud server
|
|
10
|
+
cortex status <job-id> — check job status
|
|
11
|
+
cortex jobs — list recent jobs
|
|
12
|
+
cortex server — start the cloud API server
|
|
13
|
+
cortex info — show config + versions
|
|
14
|
+
|
|
15
|
+
Run with:
|
|
16
|
+
python -m cortex.cli --help
|
|
17
|
+
cortex --help (after pip install)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
import json
|
|
23
|
+
import os
|
|
24
|
+
import sys
|
|
25
|
+
import time
|
|
26
|
+
from typing import Optional
|
|
27
|
+
|
|
28
|
+
import typer
|
|
29
|
+
from rich.console import Console
|
|
30
|
+
from rich.panel import Panel
|
|
31
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
|
|
32
|
+
from rich.syntax import Syntax
|
|
33
|
+
from rich.table import Table
|
|
34
|
+
from rich import print as rprint
|
|
35
|
+
|
|
36
|
+
app = typer.Typer(
|
|
37
|
+
name="cortex",
|
|
38
|
+
help="[bold purple]text4q Cortex[/] — natural language quantum computing",
|
|
39
|
+
rich_markup_mode="rich",
|
|
40
|
+
no_args_is_help=True,
|
|
41
|
+
)
|
|
42
|
+
console = Console()
|
|
43
|
+
|
|
44
|
+
# ── Config helpers ────────────────────────────────────────────────────────────
|
|
45
|
+
|
|
46
|
+
def _get_server() -> str:
|
|
47
|
+
return os.environ.get("CORTEX_SERVER", "http://localhost:8000")
|
|
48
|
+
|
|
49
|
+
def _get_api_key() -> str:
|
|
50
|
+
key = os.environ.get("CORTEX_API_KEY", "dev-key-0000")
|
|
51
|
+
return key
|
|
52
|
+
|
|
53
|
+
def _get_backend() -> str:
|
|
54
|
+
return os.environ.get("CORTEX_BACKEND", "aer")
|
|
55
|
+
|
|
56
|
+
def _headers() -> dict:
|
|
57
|
+
return {"x-api-key": _get_api_key(), "Content-Type": "application/json"}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ── cortex run ────────────────────────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
@app.command()
|
|
63
|
+
def run(
|
|
64
|
+
text: str = typer.Argument(..., help="Natural language circuit description"),
|
|
65
|
+
backend: str = typer.Option(None, "--backend", "-b", help="aer | ibm_quantum"),
|
|
66
|
+
shots: int = typer.Option(1024, "--shots", "-s", help="Number of measurement shots"),
|
|
67
|
+
nlp: str = typer.Option("pattern", "--nlp", help="NLP engine: pattern | llm"),
|
|
68
|
+
llm_backend: str = typer.Option("anthropic", "--llm-backend", help="anthropic | openai"),
|
|
69
|
+
show_qasm: bool = typer.Option(False, "--qasm", help="Print the generated QASM circuit"),
|
|
70
|
+
):
|
|
71
|
+
"""
|
|
72
|
+
[bold]Compile and execute[/] a quantum circuit locally.
|
|
73
|
+
|
|
74
|
+
Examples:
|
|
75
|
+
cortex run "Bell state with 2 qubits"
|
|
76
|
+
cortex run "GHZ 5 qubits 2048 shots" --backend aer --qasm
|
|
77
|
+
cortex run "VQE para H2" --nlp llm
|
|
78
|
+
"""
|
|
79
|
+
from cortex.core import Cortex
|
|
80
|
+
|
|
81
|
+
backend = backend or _get_backend()
|
|
82
|
+
|
|
83
|
+
with Progress(
|
|
84
|
+
SpinnerColumn(),
|
|
85
|
+
TextColumn("[progress.description]{task.description}"),
|
|
86
|
+
TimeElapsedColumn(),
|
|
87
|
+
console=console,
|
|
88
|
+
transient=True,
|
|
89
|
+
) as progress:
|
|
90
|
+
task = progress.add_task("Parsing intent…", total=None)
|
|
91
|
+
cx = Cortex(backend=backend, nlp=nlp, llm_backend=llm_backend)
|
|
92
|
+
|
|
93
|
+
progress.update(task, description="Compiling to OpenQASM…")
|
|
94
|
+
intent = cx.parse(text)
|
|
95
|
+
qasm = cx.compile(intent)
|
|
96
|
+
|
|
97
|
+
progress.update(task, description=f"Executing on [bold]{backend}[/]…")
|
|
98
|
+
result = cx._connector.execute(intent, qasm)
|
|
99
|
+
|
|
100
|
+
_print_result(result, text, show_qasm=show_qasm)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# ── cortex compile ────────────────────────────────────────────────────────────
|
|
104
|
+
|
|
105
|
+
@app.command()
|
|
106
|
+
def compile(
|
|
107
|
+
text: str = typer.Argument(..., help="Natural language circuit description"),
|
|
108
|
+
nlp: str = typer.Option("pattern", "--nlp", help="NLP engine: pattern | llm"),
|
|
109
|
+
output: Optional[str] = typer.Option(None, "--output", "-o", help="Save QASM to file"),
|
|
110
|
+
):
|
|
111
|
+
"""
|
|
112
|
+
[bold]Translate[/] natural language to OpenQASM 3.0 (no execution).
|
|
113
|
+
|
|
114
|
+
Examples:
|
|
115
|
+
cortex compile "Bell state"
|
|
116
|
+
cortex compile "QFT 4 qubits" --output circuit.qasm
|
|
117
|
+
"""
|
|
118
|
+
from cortex.core import Cortex
|
|
119
|
+
|
|
120
|
+
cx = Cortex(backend="aer", nlp=nlp)
|
|
121
|
+
|
|
122
|
+
with console.status("Compiling…"):
|
|
123
|
+
intent = cx.parse(text)
|
|
124
|
+
qasm = cx.compile(intent)
|
|
125
|
+
|
|
126
|
+
console.print(f"\n[dim]Circuit:[/] [bold]{intent.circuit_type}[/] "
|
|
127
|
+
f"[dim]Qubits:[/] {intent.num_qubits} "
|
|
128
|
+
f"[dim]Shots:[/] {intent.shots}\n")
|
|
129
|
+
|
|
130
|
+
syntax = Syntax(qasm, "qasm", theme="monokai", line_numbers=True)
|
|
131
|
+
console.print(Panel(syntax, title="[bold]OpenQASM 3.0[/]", border_style="purple"))
|
|
132
|
+
|
|
133
|
+
if output:
|
|
134
|
+
with open(output, "w") as f:
|
|
135
|
+
f.write(qasm)
|
|
136
|
+
console.print(f"\n[green]✓[/] Saved to [bold]{output}[/]")
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# ── cortex submit ─────────────────────────────────────────────────────────────
|
|
140
|
+
|
|
141
|
+
@app.command()
|
|
142
|
+
def submit(
|
|
143
|
+
text: str = typer.Argument(..., help="Natural language circuit description"),
|
|
144
|
+
backend: str = typer.Option("aer", "--backend", "-b"),
|
|
145
|
+
shots: int = typer.Option(1024, "--shots", "-s"),
|
|
146
|
+
priority: int = typer.Option(5, "--priority", "-p", help="1=low 5=normal 10=high"),
|
|
147
|
+
nlp: str = typer.Option("pattern", "--nlp"),
|
|
148
|
+
wait: bool = typer.Option(False, "--wait", "-w", help="Wait for completion"),
|
|
149
|
+
server: str = typer.Option(None, "--server", help="Server URL"),
|
|
150
|
+
tags: str = typer.Option("", "--tags", help="Comma-separated tags"),
|
|
151
|
+
):
|
|
152
|
+
"""
|
|
153
|
+
[bold]Submit[/] a job to the Cortex cloud server.
|
|
154
|
+
|
|
155
|
+
Examples:
|
|
156
|
+
cortex submit "GHZ 5 qubits"
|
|
157
|
+
cortex submit "Bell state" --wait --priority 10
|
|
158
|
+
cortex submit "VQE" --server http://my-lab-server:8000
|
|
159
|
+
"""
|
|
160
|
+
import httpx
|
|
161
|
+
|
|
162
|
+
server_url = server or _get_server()
|
|
163
|
+
tag_list = [t.strip() for t in tags.split(",") if t.strip()]
|
|
164
|
+
|
|
165
|
+
payload = {
|
|
166
|
+
"text": text,
|
|
167
|
+
"backend": backend,
|
|
168
|
+
"shots": shots,
|
|
169
|
+
"priority": priority,
|
|
170
|
+
"nlp_mode": nlp,
|
|
171
|
+
"tags": tag_list,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
with console.status(f"Submitting to [bold]{server_url}[/]…"):
|
|
175
|
+
try:
|
|
176
|
+
r = httpx.post(
|
|
177
|
+
f"{server_url}/jobs",
|
|
178
|
+
json=payload,
|
|
179
|
+
headers=_headers(),
|
|
180
|
+
timeout=10,
|
|
181
|
+
)
|
|
182
|
+
r.raise_for_status()
|
|
183
|
+
except httpx.ConnectError:
|
|
184
|
+
console.print(f"[red]✗[/] Cannot reach server at [bold]{server_url}[/]")
|
|
185
|
+
console.print(" Start with: [dim]cortex server[/]")
|
|
186
|
+
raise typer.Exit(1)
|
|
187
|
+
except httpx.HTTPStatusError as e:
|
|
188
|
+
console.print(f"[red]✗[/] Server error {e.response.status_code}: {e.response.text}")
|
|
189
|
+
raise typer.Exit(1)
|
|
190
|
+
|
|
191
|
+
data = r.json()
|
|
192
|
+
job_id = data["job_id"]
|
|
193
|
+
console.print(f"\n[green]✓[/] Job submitted: [bold cyan]{job_id}[/]")
|
|
194
|
+
|
|
195
|
+
if not wait:
|
|
196
|
+
console.print(f" Track with: [dim]cortex status {job_id}[/]")
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
# Poll until done
|
|
200
|
+
_poll_job(job_id, server_url)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# ── cortex status ─────────────────────────────────────────────────────────────
|
|
204
|
+
|
|
205
|
+
@app.command()
|
|
206
|
+
def status(
|
|
207
|
+
job_id: str = typer.Argument(..., help="Job ID to check"),
|
|
208
|
+
server: str = typer.Option(None, "--server"),
|
|
209
|
+
watch: bool = typer.Option(False, "--watch", "-w", help="Poll until done"),
|
|
210
|
+
):
|
|
211
|
+
"""
|
|
212
|
+
[bold]Check status[/] of a submitted job.
|
|
213
|
+
|
|
214
|
+
Examples:
|
|
215
|
+
cortex status abc123
|
|
216
|
+
cortex status abc123 --watch
|
|
217
|
+
"""
|
|
218
|
+
server_url = server or _get_server()
|
|
219
|
+
|
|
220
|
+
if watch:
|
|
221
|
+
_poll_job(job_id, server_url)
|
|
222
|
+
else:
|
|
223
|
+
_print_job_status(job_id, server_url)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
# ── cortex jobs ───────────────────────────────────────────────────────────────
|
|
227
|
+
|
|
228
|
+
@app.command()
|
|
229
|
+
def jobs(
|
|
230
|
+
status_filter: Optional[str] = typer.Option(None, "--status", help="queued|running|done|failed"),
|
|
231
|
+
limit: int = typer.Option(15, "--limit", "-n"),
|
|
232
|
+
server: str = typer.Option(None, "--server"),
|
|
233
|
+
):
|
|
234
|
+
"""
|
|
235
|
+
[bold]List[/] recent jobs from the cloud server.
|
|
236
|
+
|
|
237
|
+
Examples:
|
|
238
|
+
cortex jobs
|
|
239
|
+
cortex jobs --status done --limit 5
|
|
240
|
+
"""
|
|
241
|
+
import httpx
|
|
242
|
+
server_url = server or _get_server()
|
|
243
|
+
|
|
244
|
+
params = {"limit": limit}
|
|
245
|
+
if status_filter:
|
|
246
|
+
params["status"] = status_filter
|
|
247
|
+
|
|
248
|
+
try:
|
|
249
|
+
r = httpx.get(
|
|
250
|
+
f"{server_url}/jobs",
|
|
251
|
+
params=params,
|
|
252
|
+
headers=_headers(),
|
|
253
|
+
timeout=10,
|
|
254
|
+
)
|
|
255
|
+
r.raise_for_status()
|
|
256
|
+
except Exception as e:
|
|
257
|
+
console.print(f"[red]✗[/] {e}")
|
|
258
|
+
raise typer.Exit(1)
|
|
259
|
+
|
|
260
|
+
data = r.json()
|
|
261
|
+
job_list = data["jobs"]
|
|
262
|
+
|
|
263
|
+
if not job_list:
|
|
264
|
+
console.print("[dim]No jobs found.[/]")
|
|
265
|
+
return
|
|
266
|
+
|
|
267
|
+
table = Table(title="Recent Jobs", border_style="dim", show_lines=False)
|
|
268
|
+
table.add_column("ID", style="cyan dim", no_wrap=True, max_width=10)
|
|
269
|
+
table.add_column("Status", no_wrap=True)
|
|
270
|
+
table.add_column("Circuit", style="dim")
|
|
271
|
+
table.add_column("Qubits", justify="right")
|
|
272
|
+
table.add_column("Shots", justify="right", style="dim")
|
|
273
|
+
table.add_column("Time (ms)", justify="right", style="dim")
|
|
274
|
+
table.add_column("Input", style="dim", max_width=35)
|
|
275
|
+
|
|
276
|
+
status_colors = {
|
|
277
|
+
"queued": "yellow", "running": "blue",
|
|
278
|
+
"done": "green", "failed": "red", "cancelled": "dim",
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
for j in job_list:
|
|
282
|
+
s = j["status"]
|
|
283
|
+
color = status_colors.get(s, "white")
|
|
284
|
+
dur = f"{j['duration_ms']:.0f}" if j.get("duration_ms") else "—"
|
|
285
|
+
table.add_row(
|
|
286
|
+
j["id"][:8],
|
|
287
|
+
f"[{color}]{s}[/]",
|
|
288
|
+
j.get("circuit_type") or "—",
|
|
289
|
+
str(j.get("num_qubits") or "—"),
|
|
290
|
+
str(j["shots"]),
|
|
291
|
+
dur,
|
|
292
|
+
j["text"],
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
console.print()
|
|
296
|
+
console.print(table)
|
|
297
|
+
console.print(f"\n[dim]Total: {data['total']} jobs[/]")
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
# ── cortex server ─────────────────────────────────────────────────────────────
|
|
301
|
+
|
|
302
|
+
@app.command()
|
|
303
|
+
def server(
|
|
304
|
+
host: str = typer.Option("127.0.0.1", "--host"),
|
|
305
|
+
port: int = typer.Option(8000, "--port"),
|
|
306
|
+
workers: int = typer.Option(2, "--workers", help="Number of QPU worker threads"),
|
|
307
|
+
reload: bool = typer.Option(False, "--reload", help="Auto-reload on code changes"),
|
|
308
|
+
):
|
|
309
|
+
"""
|
|
310
|
+
[bold]Start[/] the Cortex cloud API server.
|
|
311
|
+
|
|
312
|
+
Examples:
|
|
313
|
+
cortex server
|
|
314
|
+
cortex server --port 9000 --workers 4
|
|
315
|
+
"""
|
|
316
|
+
import uvicorn
|
|
317
|
+
|
|
318
|
+
os.environ["CORTEX_WORKERS"] = str(workers)
|
|
319
|
+
console.print(
|
|
320
|
+
Panel(
|
|
321
|
+
f"[bold purple]text4q Cortex Cloud[/]\n\n"
|
|
322
|
+
f" URL: [cyan]http://{host}:{port}[/]\n"
|
|
323
|
+
f" Workers: {workers}\n"
|
|
324
|
+
f" Docs: [cyan]http://{host}:{port}/docs[/]\n\n"
|
|
325
|
+
f"[dim]Press Ctrl+C to stop[/]",
|
|
326
|
+
border_style="purple",
|
|
327
|
+
)
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
uvicorn.run(
|
|
331
|
+
"cortex.cloud.server:app",
|
|
332
|
+
host=host,
|
|
333
|
+
port=port,
|
|
334
|
+
reload=reload,
|
|
335
|
+
log_level="warning",
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
# ── cortex info ───────────────────────────────────────────────────────────────
|
|
340
|
+
|
|
341
|
+
@app.command()
|
|
342
|
+
def info(
|
|
343
|
+
server: str = typer.Option(None, "--server"),
|
|
344
|
+
):
|
|
345
|
+
"""Show local config and server health."""
|
|
346
|
+
import httpx
|
|
347
|
+
from cortex import __version__
|
|
348
|
+
|
|
349
|
+
server_url = server or _get_server()
|
|
350
|
+
|
|
351
|
+
table = Table(show_header=False, box=None, padding=(0, 2))
|
|
352
|
+
table.add_column("Key", style="dim")
|
|
353
|
+
table.add_column("Value", style="bold")
|
|
354
|
+
|
|
355
|
+
table.add_row("Version", __version__)
|
|
356
|
+
table.add_row("Backend", _get_backend())
|
|
357
|
+
table.add_row("Server", server_url)
|
|
358
|
+
table.add_row("API Key", _get_api_key()[:8] + "…")
|
|
359
|
+
|
|
360
|
+
console.print(Panel(table, title="[bold]text4q Cortex[/]", border_style="purple"))
|
|
361
|
+
|
|
362
|
+
# Try to reach the server
|
|
363
|
+
try:
|
|
364
|
+
r = httpx.get(f"{server_url}/health", headers=_headers(), timeout=3)
|
|
365
|
+
health = r.json()
|
|
366
|
+
q = health["queue"]
|
|
367
|
+
console.print(
|
|
368
|
+
f"\n[green]✓[/] Server reachable — "
|
|
369
|
+
f"queued={q['queued']} running={q['running']} "
|
|
370
|
+
f"done={q['done']} workers={q['workers']}"
|
|
371
|
+
)
|
|
372
|
+
except Exception:
|
|
373
|
+
console.print(f"\n[yellow]⚠[/] Server at [bold]{server_url}[/] not reachable")
|
|
374
|
+
console.print(" Start with: [dim]cortex server[/]")
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
# ── Internal helpers ──────────────────────────────────────────────────────────
|
|
378
|
+
|
|
379
|
+
def _print_result(result, text: str, show_qasm: bool = False):
|
|
380
|
+
"""Pretty-print a CortexResult."""
|
|
381
|
+
status_color = "green" if result.success else "red"
|
|
382
|
+
status_icon = "✓" if result.success else "✗"
|
|
383
|
+
|
|
384
|
+
console.print()
|
|
385
|
+
console.print(Panel(
|
|
386
|
+
f"[dim]Input:[/] {text}\n"
|
|
387
|
+
f"[dim]Circuit:[/] [bold]{result.intent.circuit_type}[/] "
|
|
388
|
+
f"({result.intent.num_qubits} qubits)\n"
|
|
389
|
+
f"[dim]Backend:[/] {result.backend}\n"
|
|
390
|
+
f"[dim]Time:[/] {result.execution_time_ms:.1f} ms",
|
|
391
|
+
title=f"[{status_color}]{status_icon} Result[/]",
|
|
392
|
+
border_style=status_color,
|
|
393
|
+
))
|
|
394
|
+
|
|
395
|
+
if result.success and result.counts:
|
|
396
|
+
_print_histogram(result.counts, result.shots)
|
|
397
|
+
|
|
398
|
+
if show_qasm and result.qasm:
|
|
399
|
+
console.print()
|
|
400
|
+
syntax = Syntax(result.qasm, "qasm", theme="monokai", line_numbers=True)
|
|
401
|
+
console.print(Panel(syntax, title="OpenQASM 3.0", border_style="dim"))
|
|
402
|
+
|
|
403
|
+
if result.error:
|
|
404
|
+
console.print(f"\n[red]Error:[/] {result.error}")
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def _print_histogram(counts: dict, shots: int, max_bars: int = 10):
|
|
408
|
+
"""ASCII histogram of measurement results."""
|
|
409
|
+
sorted_counts = sorted(counts.items(), key=lambda x: -x[1])[:max_bars]
|
|
410
|
+
max_count = sorted_counts[0][1] if sorted_counts else 1
|
|
411
|
+
bar_width = 30
|
|
412
|
+
|
|
413
|
+
console.print("\n[dim]Measurement results:[/]")
|
|
414
|
+
for state, count in sorted_counts:
|
|
415
|
+
pct = count / shots * 100
|
|
416
|
+
filled = int(bar_width * count / max_count)
|
|
417
|
+
bar = "█" * filled + "░" * (bar_width - filled)
|
|
418
|
+
console.print(f" |{state}⟩ {bar} {count:>5} ({pct:>5.1f}%)")
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def _print_job_status(job_id: str, server_url: str):
|
|
422
|
+
import httpx
|
|
423
|
+
try:
|
|
424
|
+
r = httpx.get(f"{server_url}/jobs/{job_id}", headers=_headers(), timeout=10)
|
|
425
|
+
r.raise_for_status()
|
|
426
|
+
except Exception as e:
|
|
427
|
+
console.print(f"[red]✗[/] {e}")
|
|
428
|
+
raise typer.Exit(1)
|
|
429
|
+
|
|
430
|
+
j = r.json()
|
|
431
|
+
status_colors = {
|
|
432
|
+
"queued": "yellow", "running": "blue",
|
|
433
|
+
"done": "green", "failed": "red", "cancelled": "dim",
|
|
434
|
+
}
|
|
435
|
+
s = j["status"]
|
|
436
|
+
color = status_colors.get(s, "white")
|
|
437
|
+
|
|
438
|
+
console.print(f"\nJob [cyan]{job_id[:8]}[/] [{color}]{s}[/]")
|
|
439
|
+
|
|
440
|
+
if j.get("circuit_type"):
|
|
441
|
+
console.print(f" Circuit: {j['circuit_type']} ({j.get('num_qubits')}q)")
|
|
442
|
+
|
|
443
|
+
if j.get("counts"):
|
|
444
|
+
_print_histogram(j["counts"], j["shots"])
|
|
445
|
+
|
|
446
|
+
if j.get("error"):
|
|
447
|
+
console.print(f" [red]Error:[/] {j['error']}")
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def _poll_job(job_id: str, server_url: str, poll_interval: float = 0.5):
|
|
451
|
+
import httpx
|
|
452
|
+
terminal = {"done", "failed", "cancelled"}
|
|
453
|
+
|
|
454
|
+
with Progress(
|
|
455
|
+
SpinnerColumn(),
|
|
456
|
+
TextColumn("[progress.description]{task.description}"),
|
|
457
|
+
TimeElapsedColumn(),
|
|
458
|
+
console=console,
|
|
459
|
+
transient=True,
|
|
460
|
+
) as progress:
|
|
461
|
+
task = progress.add_task(f"Waiting for job [cyan]{job_id[:8]}[/]…", total=None)
|
|
462
|
+
|
|
463
|
+
while True:
|
|
464
|
+
try:
|
|
465
|
+
r = httpx.get(f"{server_url}/jobs/{job_id}", headers=_headers(), timeout=10)
|
|
466
|
+
r.raise_for_status()
|
|
467
|
+
j = r.json()
|
|
468
|
+
s = j["status"]
|
|
469
|
+
progress.update(task, description=f"Job [cyan]{job_id[:8]}[/] — {s}…")
|
|
470
|
+
if s in terminal:
|
|
471
|
+
break
|
|
472
|
+
except Exception as e:
|
|
473
|
+
console.print(f"[red]✗[/] Poll error: {e}")
|
|
474
|
+
raise typer.Exit(1)
|
|
475
|
+
time.sleep(poll_interval)
|
|
476
|
+
|
|
477
|
+
_print_job_status(job_id, server_url)
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
# ── Entry point ───────────────────────────────────────────────────────────────
|
|
481
|
+
|
|
482
|
+
def main():
|
|
483
|
+
app()
|
|
484
|
+
|
|
485
|
+
if __name__ == "__main__":
|
|
486
|
+
main()
|
cortex/cloud/__init__.py
ADDED
|
File without changes
|
cortex/cloud/models.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""
|
|
2
|
+
cortex.cloud.models
|
|
3
|
+
===================
|
|
4
|
+
Data models for the Cortex cloud layer (job queue, users, results).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
import uuid
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _now() -> datetime:
|
|
16
|
+
return datetime.now(timezone.utc)
|
|
17
|
+
|
|
18
|
+
def _uid() -> str:
|
|
19
|
+
return str(uuid.uuid4())
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# ── Enums ─────────────────────────────────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
class JobStatus(str, Enum):
|
|
25
|
+
QUEUED = "queued"
|
|
26
|
+
RUNNING = "running"
|
|
27
|
+
DONE = "done"
|
|
28
|
+
FAILED = "failed"
|
|
29
|
+
CANCELLED = "cancelled"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class JobPriority(int, Enum):
|
|
33
|
+
LOW = 1
|
|
34
|
+
NORMAL = 5
|
|
35
|
+
HIGH = 10
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# ── Job models ────────────────────────────────────────────────────────────────
|
|
39
|
+
|
|
40
|
+
class JobSubmit(BaseModel):
|
|
41
|
+
"""Payload sent by the client to submit a new job."""
|
|
42
|
+
text: str = Field(..., description="Natural language circuit description")
|
|
43
|
+
shots: int = Field(1024, ge=1, le=100_000)
|
|
44
|
+
backend: str = Field("aer", description="Target backend: aer | ibm_quantum")
|
|
45
|
+
nlp_mode: str = Field("pattern", description="NLP engine: pattern | llm")
|
|
46
|
+
priority: JobPriority = JobPriority.NORMAL
|
|
47
|
+
tags: list[str] = Field(default_factory=list)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Job(BaseModel):
|
|
51
|
+
"""A job stored in the queue."""
|
|
52
|
+
id: str = Field(default_factory=_uid)
|
|
53
|
+
user_id: str
|
|
54
|
+
status: JobStatus = JobStatus.QUEUED
|
|
55
|
+
priority: JobPriority = JobPriority.NORMAL
|
|
56
|
+
|
|
57
|
+
# Input
|
|
58
|
+
text: str
|
|
59
|
+
shots: int = 1024
|
|
60
|
+
backend: str = "aer"
|
|
61
|
+
nlp_mode: str = "pattern"
|
|
62
|
+
tags: list[str] = Field(default_factory=list)
|
|
63
|
+
|
|
64
|
+
# Output (filled on completion)
|
|
65
|
+
qasm: str | None = None
|
|
66
|
+
counts: dict[str, int] = Field(default_factory=dict)
|
|
67
|
+
circuit_type: str | None = None
|
|
68
|
+
num_qubits: int | None = None
|
|
69
|
+
execution_time_ms: float | None = None
|
|
70
|
+
error: str | None = None
|
|
71
|
+
|
|
72
|
+
# Timestamps
|
|
73
|
+
created_at: datetime = Field(default_factory=_now)
|
|
74
|
+
started_at: datetime | None = None
|
|
75
|
+
finished_at: datetime | None = None
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def duration_ms(self) -> float | None:
|
|
79
|
+
if self.started_at and self.finished_at:
|
|
80
|
+
return (self.finished_at - self.started_at).total_seconds() * 1000
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
def summary(self) -> dict[str, Any]:
|
|
84
|
+
return {
|
|
85
|
+
"id": self.id,
|
|
86
|
+
"status": self.status,
|
|
87
|
+
"text": self.text[:80] + ("…" if len(self.text) > 80 else ""),
|
|
88
|
+
"circuit_type": self.circuit_type,
|
|
89
|
+
"num_qubits": self.num_qubits,
|
|
90
|
+
"shots": self.shots,
|
|
91
|
+
"backend": self.backend,
|
|
92
|
+
"created_at": self.created_at.isoformat(),
|
|
93
|
+
"duration_ms": self.duration_ms,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# ── User models ───────────────────────────────────────────────────────────────
|
|
98
|
+
|
|
99
|
+
class User(BaseModel):
|
|
100
|
+
id: str = Field(default_factory=_uid)
|
|
101
|
+
username: str
|
|
102
|
+
api_key: str = Field(default_factory=_uid)
|
|
103
|
+
is_admin: bool = False
|
|
104
|
+
quota_jobs_per_hour: int = 60
|
|
105
|
+
created_at: datetime = Field(default_factory=_now)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# ── API response models ───────────────────────────────────────────────────────
|
|
109
|
+
|
|
110
|
+
class JobResponse(BaseModel):
|
|
111
|
+
job_id: str
|
|
112
|
+
status: JobStatus
|
|
113
|
+
message: str = ""
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class QueueStats(BaseModel):
|
|
117
|
+
queued: int
|
|
118
|
+
running: int
|
|
119
|
+
done: int
|
|
120
|
+
failed: int
|
|
121
|
+
total: int
|
|
122
|
+
workers: int
|