advay-platform 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,70 @@
1
+ """Advay Labs Platform — customer-facing circuit ingestion and SQPU projection.
2
+
3
+ The primary entry point is `run_circuit`. See README for the API contract.
4
+
5
+ Example:
6
+ from advay_platform import run_circuit, RunOptions
7
+ from qiskit import QuantumCircuit
8
+
9
+ qc = QuantumCircuit(2, 2)
10
+ qc.h(0); qc.cx(0, 1); qc.measure([0, 1], [0, 1])
11
+
12
+ response = run_circuit(qc, RunOptions(shots=1024))
13
+ print(response.to_json())
14
+
15
+ For the digital twin connection:
16
+ from advay_platform import is_live_mode, twin_info
17
+ print(is_live_mode()) # True if sqpu_digital_twin is importable
18
+ print(twin_info()) # full twin connection descriptor
19
+
20
+ CLI: `advay-platform info` or `advay-platform run <circuit.qasm>`.
21
+ """
22
+ from .api import run_circuit, get_platform_info, PLATFORM_VERSION
23
+ from .schemas import (
24
+ RunOptions,
25
+ Response,
26
+ CircuitSummary,
27
+ IdealResult,
28
+ ResourceProjection,
29
+ NoiseProjection,
30
+ Provenance,
31
+ Bound,
32
+ )
33
+ from .ingestion import CircuitIR, Operation, ingest
34
+ from .twin_bridge import (
35
+ load_profile,
36
+ twin_version,
37
+ twin_info,
38
+ is_live_mode,
39
+ force_refresh as refresh_twin_detection,
40
+ )
41
+
42
+ __version__ = PLATFORM_VERSION
43
+
44
+ __all__ = [
45
+ # Public API
46
+ "run_circuit",
47
+ "get_platform_info",
48
+ # Schemas
49
+ "RunOptions",
50
+ "Response",
51
+ "CircuitSummary",
52
+ "IdealResult",
53
+ "ResourceProjection",
54
+ "NoiseProjection",
55
+ "Provenance",
56
+ "Bound",
57
+ # IR
58
+ "CircuitIR",
59
+ "Operation",
60
+ "ingest",
61
+ # Twin connection
62
+ "load_profile",
63
+ "twin_version",
64
+ "twin_info",
65
+ "is_live_mode",
66
+ "refresh_twin_detection",
67
+ # Meta
68
+ "PLATFORM_VERSION",
69
+ "__version__",
70
+ ]
@@ -0,0 +1,5 @@
1
+ """Entry point for `python -m advay_platform`."""
2
+ import sys
3
+ from .cli import main
4
+
5
+ sys.exit(main())
advay_platform/api.py ADDED
@@ -0,0 +1,409 @@
1
+ """advay_platform.run_circuit — the single internal API entry point.
2
+
3
+ Customer programs call run_circuit(circuit, options) and get a Response.
4
+ This function:
5
+ 1. Validates options
6
+ 2. Ingests the circuit (Qiskit / OpenQASM / IR → CircuitIR)
7
+ 3. Decides which components to compute (execute / resources / noise)
8
+ 4. Routes to the right simulator if executing — with thread-based timeout
9
+ 5. Builds the resource projection (from algorithm template OR from circuit)
10
+ 6. Builds the noise projection
11
+ 7. Assembles a Response with status reflecting partial/declined components
12
+
13
+ Statelessness contract: this function performs ZERO disk writes, no
14
+ network calls, and no mutation of module-level state. Every call is
15
+ independent; customer circuits are never persisted by the platform.
16
+ """
17
+ from __future__ import annotations
18
+ import hashlib
19
+ import json
20
+ import threading
21
+ import time
22
+ import uuid
23
+ from datetime import datetime, timezone
24
+ from typing import Optional
25
+
26
+ from .schemas.request import RunOptions
27
+ from .schemas.response import (
28
+ Response, CircuitSummary, Provenance, IdealResult,
29
+ ResourceProjection, NoiseProjection, EvidenceClass,
30
+ )
31
+ from .ingestion import ingest, CircuitIR
32
+ from .execution.router import execute as run_execution, can_execute
33
+ from .resources.estimator import (
34
+ estimate_circuit_resources, estimate_algorithm_resources, select_template,
35
+ )
36
+ from .noise.projector import project_noise
37
+ from .twin_bridge import load_profile, twin_version, twin_info, is_live_mode
38
+
39
+
40
+ PLATFORM_VERSION = "0.2.0"
41
+
42
+
43
+ def _options_hash(opts: RunOptions) -> str:
44
+ """Stable hash of the options for provenance."""
45
+ import dataclasses
46
+ d = dataclasses.asdict(opts)
47
+ s = json.dumps(d, sort_keys=True, default=str)
48
+ return hashlib.sha256(s.encode()).hexdigest()[:16]
49
+
50
+
51
+ def _circuit_summary(ir: CircuitIR) -> CircuitSummary:
52
+ gate_counts = ir.gate_counts()
53
+ return CircuitSummary(
54
+ num_qubits=ir.num_qubits,
55
+ num_clbits=ir.num_clbits,
56
+ depth=ir.depth,
57
+ total_operations=len(ir.operations),
58
+ is_clifford=ir.is_clifford,
59
+ gate_counts=gate_counts,
60
+ non_clifford_count=ir.non_clifford_count(),
61
+ two_qubit_count=ir.two_qubit_count(),
62
+ measurement_count=ir.measurement_count(),
63
+ has_classical_control=any(op.clbits for op in ir.operations if not op.is_measurement),
64
+ source_format=ir.source_format,
65
+ )
66
+
67
+
68
+ def _build_provenance(opts: RunOptions, profile: dict, run_id: str) -> Provenance:
69
+ return Provenance(
70
+ platform_version=PLATFORM_VERSION,
71
+ twin_version=twin_version(),
72
+ twin_profile_version=profile.get("schema_version", "unknown"),
73
+ timestamp_utc=datetime.now(timezone.utc).isoformat(),
74
+ run_id=run_id,
75
+ caller_id=opts.caller_id,
76
+ options_hash=_options_hash(opts),
77
+ tags=dict(opts.tags),
78
+ )
79
+
80
+
81
+ def _aggregate_classification(*items) -> EvidenceClass:
82
+ """Most conservative classification among the *populated* components.
83
+
84
+ Order from least to most conservative:
85
+ measured < stitched_composition < projection < extrapolation < literature_estimate
86
+
87
+ `declined` is a component status, not an evidence class — we ignore it
88
+ if any real classification is present. If everything declined, we
89
+ return 'declined' for the aggregate.
90
+ """
91
+ order = {
92
+ "measured": 0, "stitched_composition": 1, "projection": 2,
93
+ "extrapolation": 3, "literature_estimate": 4,
94
+ }
95
+ real = [c for c in items if c and c != "declined"]
96
+ if real:
97
+ return max(real, key=lambda c: order.get(c, 10))
98
+ return "declined"
99
+
100
+
101
+ class _ExecutionThread(threading.Thread):
102
+ """Run execution in a daemon thread so we can enforce timeout."""
103
+ def __init__(self, ir, shots, seed, max_qubits):
104
+ super().__init__(daemon=True)
105
+ self.ir = ir
106
+ self.shots = shots
107
+ self.seed = seed
108
+ self.max_qubits = max_qubits
109
+ self.result: Optional[IdealResult] = None
110
+ self.error: Optional[BaseException] = None
111
+
112
+ def run(self):
113
+ try:
114
+ self.result = run_execution(
115
+ self.ir,
116
+ shots=self.shots,
117
+ seed=self.seed,
118
+ max_qubits_non_clifford=self.max_qubits,
119
+ )
120
+ except BaseException as e: # noqa: BLE001
121
+ self.error = e
122
+
123
+
124
+ def _execute_with_timeout(ir, shots, seed, max_qubits, timeout_seconds):
125
+ """Run execution; return (result, error, timed_out).
126
+
127
+ On timeout the worker thread is left to finish in the background
128
+ (daemonised; will be torn down with the process) but the caller
129
+ receives a timely response. This is a 'logical timeout' — the work
130
+ isn't forcibly killed but the API contract is honored.
131
+ """
132
+ worker = _ExecutionThread(ir, shots, seed, max_qubits)
133
+ worker.start()
134
+ worker.join(timeout=timeout_seconds)
135
+ if worker.is_alive():
136
+ return None, None, True
137
+ return worker.result, worker.error, False
138
+
139
+
140
+ def run_circuit(circuit, options: RunOptions = None, **kwargs) -> Response:
141
+ """Run a customer circuit against the SQPU platform.
142
+
143
+ Args:
144
+ circuit: One of
145
+ - qiskit.QuantumCircuit
146
+ - OpenQASM 3.0 string
147
+ - advay_platform.CircuitIR
148
+ options: A RunOptions. If None, defaults are used.
149
+ **kwargs: Convenience — passed directly to RunOptions if options is None.
150
+
151
+ Returns:
152
+ Response — JSON-serializable structured result.
153
+
154
+ The function does not raise on customer-input errors; it returns
155
+ status='declined' with structured reasons. It only raises on internal
156
+ bugs.
157
+
158
+ Statelessness: this function does not write to disk, mutate any
159
+ module-level state, or persist the customer circuit anywhere.
160
+ """
161
+ # job_id is generated up-front so the caller can correlate even if
162
+ # the function returns early (declined). In future async mode, this
163
+ # is the identifier the caller polls with.
164
+ job_id = uuid.uuid4().hex
165
+
166
+ # Build options
167
+ if options is None:
168
+ options = RunOptions(**kwargs)
169
+ elif kwargs:
170
+ raise ValueError("Pass either `options` OR keyword args, not both")
171
+
172
+ err = options.validate()
173
+ if err:
174
+ return _early_decline("invalid_options", err, options,
175
+ job_id=job_id, circuit_summary=None)
176
+
177
+ # Ingest. User input can fail in many ways (parse errors from various
178
+ # backends, type mismatches, missing dependencies). Catch broadly because
179
+ # this is a hard boundary between caller-supplied data and our code —
180
+ # we owe the caller a structured declined response, not a stack trace.
181
+ try:
182
+ ir = ingest(circuit)
183
+ except Exception as e:
184
+ return _early_decline("ingestion_failed", f"{type(e).__name__}: {e}",
185
+ options, job_id=job_id, circuit_summary=None)
186
+
187
+ summary = _circuit_summary(ir)
188
+ notes: list = []
189
+ declined: list = []
190
+ classifications: list = []
191
+
192
+ # Surface any ingestion-time preprocessor notes (e.g. "Renamed register
193
+ # 's' → 's_reg' to avoid OpenQASM 3 symbol-table collision") so the
194
+ # customer can see what we did before parsing their QASM. These come
195
+ # from advay_platform.ingestion.qasm_adapter.preprocess_qasm3 via
196
+ # ir.metadata["ingestion_notes"].
197
+ for n in ir.metadata.get("ingestion_notes", []):
198
+ notes.append(n)
199
+
200
+ # Load profile
201
+ try:
202
+ profile = load_profile(options.profile)
203
+ except (ValueError, FileNotFoundError) as e:
204
+ return _early_decline("profile_load_failed", str(e), options,
205
+ job_id=job_id, circuit_summary=summary)
206
+
207
+ # --- Execution ---
208
+ ideal_result = None
209
+ if options.execute:
210
+ ok, reason = can_execute(ir, options.max_simulatable_qubits_non_clifford)
211
+ if not ok:
212
+ notes.append(f"Execution declined: {reason}")
213
+ declined.append({
214
+ "component": "execution",
215
+ "code": "circuit_too_large",
216
+ "message": reason,
217
+ "suggestion": (
218
+ "Increase max_simulatable_qubits_non_clifford (up to 25), "
219
+ "decompose into Clifford-only segments, or rely on resource estimation only."
220
+ ),
221
+ })
222
+ classifications.append("declined")
223
+ else:
224
+ result, error, timed_out = _execute_with_timeout(
225
+ ir,
226
+ shots=options.shots,
227
+ seed=options.seed,
228
+ max_qubits=options.max_simulatable_qubits_non_clifford,
229
+ timeout_seconds=options.timeout_seconds,
230
+ )
231
+
232
+ if timed_out:
233
+ notes.append(
234
+ f"Execution exceeded timeout ({options.timeout_seconds}s). "
235
+ "Resource and noise projections may still proceed."
236
+ )
237
+ declined.append({
238
+ "component": "execution",
239
+ "code": "execution_timeout",
240
+ "message": f"Did not complete within {options.timeout_seconds}s",
241
+ "suggestion": (
242
+ "Increase timeout_seconds, reduce shots, or use a smaller circuit. "
243
+ "For Clifford-only circuits, stim should be fast — check that "
244
+ "is_clifford is True in circuit_summary."
245
+ ),
246
+ })
247
+ classifications.append("declined")
248
+ elif error is not None:
249
+ notes.append(f"Execution failed: {error}")
250
+ declined.append({
251
+ "component": "execution",
252
+ "code": "execution_failed",
253
+ "message": str(error),
254
+ "suggestion": "Check circuit for unsupported gates or memory pressure.",
255
+ })
256
+ classifications.append("declined")
257
+ else:
258
+ ideal_result = result
259
+ classifications.append("measured")
260
+
261
+ # --- Resource projection ---
262
+ resource_projection = None
263
+ if options.resource_estimate:
264
+ try:
265
+ if options.algorithm in ("shor", "grover", "qaoa"):
266
+ algo_est = select_template(options.algorithm, options.algorithm_input_bits)
267
+ resource_projection = estimate_algorithm_resources(algo_est, options, profile)
268
+ classifications.append("literature_estimate")
269
+ else:
270
+ resource_projection = estimate_circuit_resources(ir, options, profile)
271
+ classifications.append("projection")
272
+ except ValueError as e:
273
+ notes.append(f"Resource estimation failed: {e}")
274
+ declined.append({
275
+ "component": "resource_estimate",
276
+ "code": "estimation_failed",
277
+ "message": str(e),
278
+ "suggestion": "Check algorithm_input_bits and algorithm parameters.",
279
+ })
280
+ classifications.append("declined")
281
+
282
+ # --- Noise projection ---
283
+ noise_projection = None
284
+ if options.noise_projection:
285
+ try:
286
+ if resource_projection is not None:
287
+ n_logical = int(resource_projection.logical_qubits.value)
288
+ d = int(resource_projection.code_distance.value)
289
+ cycle_s = profile.get("cycle_time_s", 1e-6)
290
+ total_cycles = max(int(resource_projection.wall_clock_seconds.value / cycle_s), 1)
291
+ num_logical_ops = n_logical * total_cycles
292
+ else:
293
+ n_logical = ir.num_qubits
294
+ d = 11
295
+ num_logical_ops = max(n_logical * ir.depth, 1)
296
+ notes.append(
297
+ "Noise projection used default code distance d=11 because "
298
+ "resource estimation was not requested."
299
+ )
300
+
301
+ modes_to_project = [options.control_mode]
302
+ if "full" not in modes_to_project:
303
+ modes_to_project.insert(0, "full")
304
+ if options.control_mode == "full":
305
+ modes_to_project = ["full", "hybrid", "reduced"]
306
+
307
+ noise_projection = project_noise(
308
+ num_logical_qubits=n_logical,
309
+ num_logical_operations=num_logical_ops,
310
+ code_distance=d,
311
+ sqpu_profile=profile,
312
+ requested_modes=modes_to_project,
313
+ )
314
+ classifications.append("projection")
315
+ except (ValueError, ZeroDivisionError) as e:
316
+ notes.append(f"Noise projection failed: {e}")
317
+ declined.append({
318
+ "component": "noise_projection",
319
+ "code": "projection_failed",
320
+ "message": str(e),
321
+ "suggestion": "Ensure resource_estimate is enabled or supply explicit parameters.",
322
+ })
323
+ classifications.append("declined")
324
+
325
+ # Decide overall status
326
+ requested = []
327
+ if options.execute: requested.append("execution")
328
+ if options.resource_estimate: requested.append("resources")
329
+ if options.noise_projection: requested.append("noise")
330
+ produced = []
331
+ if ideal_result is not None: produced.append("execution")
332
+ if resource_projection is not None: produced.append("resources")
333
+ if noise_projection is not None: produced.append("noise")
334
+
335
+ if not produced:
336
+ status = "declined"
337
+ elif set(produced) == set(requested):
338
+ status = "succeeded"
339
+ else:
340
+ status = "partial"
341
+
342
+ return Response(
343
+ status=status,
344
+ job_id=job_id,
345
+ circuit_summary=summary,
346
+ ideal_result=ideal_result,
347
+ resource_projection=resource_projection,
348
+ noise_projection=noise_projection,
349
+ evidence_classification=_aggregate_classification(*classifications),
350
+ notes=notes,
351
+ declined_reasons=declined,
352
+ provenance=_build_provenance(options, profile, run_id=job_id),
353
+ )
354
+
355
+
356
+ def _early_decline(code: str, message: str, opts: RunOptions, *,
357
+ job_id: str, circuit_summary=None) -> Response:
358
+ """Build a declined-status Response when we couldn't even start."""
359
+ try:
360
+ profile = load_profile(opts.profile)
361
+ prov_profile = profile
362
+ except Exception:
363
+ prov_profile = {"schema_version": "unknown"}
364
+
365
+ return Response(
366
+ status="declined",
367
+ job_id=job_id,
368
+ circuit_summary=circuit_summary or CircuitSummary(
369
+ num_qubits=0, num_clbits=0, depth=0, total_operations=0,
370
+ is_clifford=False,
371
+ ),
372
+ ideal_result=None,
373
+ resource_projection=None,
374
+ noise_projection=None,
375
+ evidence_classification="declined",
376
+ notes=[f"Request declined before processing: {message}"],
377
+ declined_reasons=[{
378
+ "component": "request",
379
+ "code": code,
380
+ "message": message,
381
+ "suggestion": "Review options.validate() and the input circuit.",
382
+ }],
383
+ provenance=_build_provenance(opts, prov_profile, run_id=job_id),
384
+ )
385
+
386
+
387
+ def get_platform_info() -> dict:
388
+ """Return platform metadata (version, twin connection, capabilities).
389
+
390
+ Stable, JSON-serializable. Suitable for /info endpoint or CLI `info`
391
+ sub-command. Surfaces twin live/bundled status so the caller knows
392
+ which mode is active.
393
+ """
394
+ return {
395
+ "platform_version": PLATFORM_VERSION,
396
+ "twin": twin_info(),
397
+ "supported_profiles": ["advay_sqpu_v2_1_4"],
398
+ "supported_input_formats": [
399
+ "qiskit.QuantumCircuit", "openqasm3", "advay_platform.CircuitIR",
400
+ ],
401
+ "supported_algorithms": ["shor", "grover", "qaoa", "generic"],
402
+ "max_simulatable_qubits_non_clifford": 25,
403
+ "max_simulatable_qubits_clifford": 10_000,
404
+ "supported_control_modes": ["full", "hybrid", "reduced"],
405
+ "async_supported": False,
406
+ "auth_required": False,
407
+ "persistent_storage": False,
408
+ "rest_api_available": False,
409
+ }