quantumflow-sdk 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,578 @@
1
+ """
2
+ Pipeline API Routes.
3
+
4
+ REST endpoints for managing data pipelines:
5
+ - Create/start/stop pipelines
6
+ - Checkpoint management
7
+ - Rollback operations
8
+ - Anomaly tracking
9
+
10
+ Endpoints:
11
+ POST /v1/pipelines/create - Create new pipeline
12
+ POST /v1/pipelines/{id}/start - Start pipeline execution
13
+ POST /v1/pipelines/{id}/pause - Pause pipeline
14
+ POST /v1/pipelines/{id}/resume - Resume pipeline
15
+ POST /v1/pipelines/{id}/stop - Stop pipeline
16
+ GET /v1/pipelines/{id} - Get pipeline status
17
+ GET /v1/pipelines - List user pipelines
18
+ GET /v1/pipelines/{id}/checkpoints - List checkpoints
19
+ POST /v1/pipelines/{id}/rollback/{cp} - Rollback to checkpoint
20
+ GET /v1/pipelines/{id}/anomalies - List anomalies
21
+ """
22
+
23
+ from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks
24
+ from pydantic import BaseModel, Field
25
+ from typing import Any, Dict, List, Optional
26
+ from datetime import datetime
27
+ import uuid
28
+ import logging
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ router = APIRouter(prefix="/v1/pipelines", tags=["pipelines"])
33
+
34
+ # In-memory pipeline storage (replace with DB in production)
35
+ _active_pipelines: Dict[str, Any] = {}
36
+
37
+
38
+ # ============================================================
39
+ # Request/Response Models
40
+ # ============================================================
41
+
42
+
43
+ class PipelineConfigRequest(BaseModel):
44
+ """Pipeline configuration."""
45
+
46
+ checkpoint_interval_steps: int = Field(default=10, ge=1)
47
+ checkpoint_interval_seconds: int = Field(default=300, ge=60)
48
+ max_checkpoints: int = Field(default=5, ge=1)
49
+ enable_quantum_compression: bool = False
50
+ enable_anomaly_detection: bool = True
51
+ auto_rollback_on_critical: bool = True
52
+ backend: str = "simulator"
53
+
54
+
55
+ class CreateProteinPipelineRequest(BaseModel):
56
+ """Request to create protein folding pipeline."""
57
+
58
+ name: str = Field(..., min_length=1, max_length=255)
59
+ pipeline_type: str = "protein_folding"
60
+ sequence: str = Field(..., min_length=1, description="Amino acid sequence")
61
+ reference_structure: Optional[List[List[float]]] = None
62
+ config: Optional[PipelineConfigRequest] = None
63
+
64
+ # VQE settings
65
+ n_qubits: int = Field(default=8, ge=2, le=20)
66
+ ansatz_depth: int = Field(default=2, ge=1, le=10)
67
+ max_rmsd: float = Field(default=10.0, gt=0)
68
+
69
+
70
+ class CreatePortfolioPipelineRequest(BaseModel):
71
+ """Request to create portfolio optimization pipeline."""
72
+
73
+ name: str = Field(..., min_length=1, max_length=255)
74
+ pipeline_type: str = "portfolio_optimization"
75
+ assets: List[str] = Field(..., min_items=2)
76
+ expected_returns: List[float] = Field(..., min_items=2)
77
+ covariance_matrix: Optional[List[List[float]]] = None
78
+ initial_capital: float = Field(default=100000.0, gt=0)
79
+ config: Optional[PipelineConfigRequest] = None
80
+
81
+ # QAOA settings
82
+ n_qubits: int = Field(default=8, ge=2, le=20)
83
+ qaoa_depth: int = Field(default=2, ge=1, le=10)
84
+
85
+ # Risk settings
86
+ max_var: float = Field(default=0.05, gt=0, le=1)
87
+ max_drawdown: float = Field(default=0.20, gt=0, le=1)
88
+ target_return: float = Field(default=0.10)
89
+
90
+
91
+ class StartPipelineRequest(BaseModel):
92
+ """Request to start pipeline execution."""
93
+
94
+ total_steps: int = Field(..., ge=1, le=10000)
95
+ resume_from_checkpoint: Optional[int] = None
96
+
97
+
98
+ class RollbackRequest(BaseModel):
99
+ """Request to rollback to checkpoint."""
100
+
101
+ checkpoint_step: Optional[int] = None # None = latest valid
102
+
103
+
104
+ class PipelineResponse(BaseModel):
105
+ """Pipeline information response."""
106
+
107
+ id: str
108
+ name: str
109
+ pipeline_type: str
110
+ status: str
111
+ current_step: int
112
+ total_steps: Optional[int]
113
+ created_at: str
114
+ started_at: Optional[str]
115
+ completed_at: Optional[str]
116
+ metrics: Dict[str, Any]
117
+ checkpoints_count: int
118
+ anomalies_count: int
119
+ rollbacks_count: int
120
+
121
+
122
+ class CheckpointResponse(BaseModel):
123
+ """Checkpoint information."""
124
+
125
+ id: str
126
+ step_number: int
127
+ checkpoint_name: Optional[str]
128
+ created_at: str
129
+ is_valid: bool
130
+ state_size_bytes: Optional[int]
131
+ compression_ratio: Optional[float]
132
+ metrics: Dict[str, Any]
133
+
134
+
135
+ class AnomalyResponse(BaseModel):
136
+ """Anomaly event information."""
137
+
138
+ id: str
139
+ anomaly_type: str
140
+ severity: str
141
+ message: str
142
+ step_number: int
143
+ detector_name: Optional[str]
144
+ threshold: Optional[float]
145
+ actual_value: Optional[float]
146
+ was_auto_resolved: bool
147
+ created_at: str
148
+
149
+
150
+ # ============================================================
151
+ # Helper Functions
152
+ # ============================================================
153
+
154
+
155
+ def _get_pipeline(pipeline_id: str):
156
+ """Get pipeline by ID."""
157
+ if pipeline_id not in _active_pipelines:
158
+ raise HTTPException(status_code=404, detail="Pipeline not found")
159
+ return _active_pipelines[pipeline_id]
160
+
161
+
162
+ def _run_pipeline_async(pipeline_id: str, total_steps: int):
163
+ """Run pipeline in background."""
164
+ pipeline_info = _active_pipelines.get(pipeline_id)
165
+ if not pipeline_info:
166
+ return
167
+
168
+ pipeline = pipeline_info["pipeline"]
169
+
170
+ try:
171
+ pipeline_info["status"] = "running"
172
+ pipeline_info["started_at"] = datetime.utcnow().isoformat()
173
+
174
+ result = pipeline.run(total_steps=total_steps)
175
+
176
+ pipeline_info["status"] = result.status
177
+ pipeline_info["completed_at"] = datetime.utcnow().isoformat()
178
+ pipeline_info["result"] = result.to_dict()
179
+ pipeline_info["current_step"] = result.total_steps
180
+ pipeline_info["metrics"] = result.final_state.metrics
181
+ pipeline_info["checkpoints_count"] = result.checkpoints_created
182
+ pipeline_info["anomalies_count"] = result.anomalies_detected
183
+ pipeline_info["rollbacks_count"] = result.rollbacks_performed
184
+
185
+ except Exception as e:
186
+ logger.error(f"Pipeline {pipeline_id} failed: {e}")
187
+ pipeline_info["status"] = "failed"
188
+ pipeline_info["error"] = str(e)
189
+
190
+
191
+ # ============================================================
192
+ # Endpoints
193
+ # ============================================================
194
+
195
+
196
+ @router.post("/create/protein", response_model=PipelineResponse)
197
+ async def create_protein_pipeline(request: CreateProteinPipelineRequest):
198
+ """Create a new protein folding pipeline."""
199
+ from quantumflow.pipeline.healthcare.protein_folding import (
200
+ ProteinFoldingPipeline,
201
+ ProteinConfig,
202
+ )
203
+ from quantumflow.pipeline.checkpoint_manager import CheckpointManager
204
+ from quantumflow.pipeline.temporal_memory import TemporalMemoryStore
205
+
206
+ pipeline_id = str(uuid.uuid4())
207
+
208
+ # Create config
209
+ config = ProteinConfig(
210
+ sequence=request.sequence,
211
+ reference_structure=request.reference_structure,
212
+ n_qubits=request.n_qubits,
213
+ ansatz_depth=request.ansatz_depth,
214
+ max_rmsd=request.max_rmsd,
215
+ backend=request.config.backend if request.config else "simulator",
216
+ checkpoint_interval_steps=request.config.checkpoint_interval_steps if request.config else 10,
217
+ checkpoint_interval_seconds=request.config.checkpoint_interval_seconds if request.config else 300,
218
+ max_checkpoints=request.config.max_checkpoints if request.config else 5,
219
+ enable_quantum_compression=request.config.enable_quantum_compression if request.config else False,
220
+ enable_anomaly_detection=request.config.enable_anomaly_detection if request.config else True,
221
+ auto_rollback_on_critical=request.config.auto_rollback_on_critical if request.config else True,
222
+ )
223
+
224
+ # Create pipeline
225
+ pipeline = ProteinFoldingPipeline(
226
+ name=request.name,
227
+ sequence=request.sequence,
228
+ reference_structure=request.reference_structure,
229
+ config=config,
230
+ pipeline_id=pipeline_id,
231
+ )
232
+
233
+ # Setup checkpoint manager and temporal memory
234
+ checkpoint_manager = CheckpointManager(backend=config.backend, use_database=False)
235
+ temporal_memory = TemporalMemoryStore(use_database=False)
236
+
237
+ pipeline.set_checkpoint_manager(checkpoint_manager)
238
+ pipeline.set_temporal_memory(temporal_memory)
239
+
240
+ # Store pipeline
241
+ created_at = datetime.utcnow().isoformat()
242
+ _active_pipelines[pipeline_id] = {
243
+ "pipeline": pipeline,
244
+ "checkpoint_manager": checkpoint_manager,
245
+ "temporal_memory": temporal_memory,
246
+ "name": request.name,
247
+ "pipeline_type": "protein_folding",
248
+ "status": "created",
249
+ "current_step": 0,
250
+ "total_steps": None,
251
+ "created_at": created_at,
252
+ "started_at": None,
253
+ "completed_at": None,
254
+ "metrics": {},
255
+ "checkpoints_count": 0,
256
+ "anomalies_count": 0,
257
+ "rollbacks_count": 0,
258
+ }
259
+
260
+ return PipelineResponse(
261
+ id=pipeline_id,
262
+ name=request.name,
263
+ pipeline_type="protein_folding",
264
+ status="created",
265
+ current_step=0,
266
+ total_steps=None,
267
+ created_at=created_at,
268
+ started_at=None,
269
+ completed_at=None,
270
+ metrics={},
271
+ checkpoints_count=0,
272
+ anomalies_count=0,
273
+ rollbacks_count=0,
274
+ )
275
+
276
+
277
+ @router.post("/create/portfolio", response_model=PipelineResponse)
278
+ async def create_portfolio_pipeline(request: CreatePortfolioPipelineRequest):
279
+ """Create a new portfolio optimization pipeline."""
280
+ from quantumflow.pipeline.finance.portfolio_optimization import (
281
+ PortfolioOptimizationPipeline,
282
+ PortfolioConfig,
283
+ )
284
+ from quantumflow.pipeline.checkpoint_manager import CheckpointManager
285
+ from quantumflow.pipeline.temporal_memory import TemporalMemoryStore
286
+
287
+ pipeline_id = str(uuid.uuid4())
288
+
289
+ # Create config
290
+ config = PortfolioConfig(
291
+ assets=request.assets,
292
+ expected_returns=request.expected_returns,
293
+ covariance_matrix=request.covariance_matrix or [],
294
+ initial_capital=request.initial_capital,
295
+ n_qubits=request.n_qubits,
296
+ qaoa_depth=request.qaoa_depth,
297
+ max_var=request.max_var,
298
+ max_drawdown=request.max_drawdown,
299
+ target_return=request.target_return,
300
+ backend=request.config.backend if request.config else "simulator",
301
+ checkpoint_interval_steps=request.config.checkpoint_interval_steps if request.config else 10,
302
+ checkpoint_interval_seconds=request.config.checkpoint_interval_seconds if request.config else 300,
303
+ max_checkpoints=request.config.max_checkpoints if request.config else 5,
304
+ enable_quantum_compression=request.config.enable_quantum_compression if request.config else False,
305
+ enable_anomaly_detection=request.config.enable_anomaly_detection if request.config else True,
306
+ auto_rollback_on_critical=request.config.auto_rollback_on_critical if request.config else True,
307
+ )
308
+
309
+ # Create pipeline
310
+ pipeline = PortfolioOptimizationPipeline(
311
+ name=request.name,
312
+ assets=request.assets,
313
+ expected_returns=request.expected_returns,
314
+ covariance_matrix=request.covariance_matrix,
315
+ initial_capital=request.initial_capital,
316
+ config=config,
317
+ pipeline_id=pipeline_id,
318
+ )
319
+
320
+ # Setup managers
321
+ checkpoint_manager = CheckpointManager(backend=config.backend, use_database=False)
322
+ temporal_memory = TemporalMemoryStore(use_database=False)
323
+
324
+ pipeline.set_checkpoint_manager(checkpoint_manager)
325
+ pipeline.set_temporal_memory(temporal_memory)
326
+
327
+ # Store pipeline
328
+ created_at = datetime.utcnow().isoformat()
329
+ _active_pipelines[pipeline_id] = {
330
+ "pipeline": pipeline,
331
+ "checkpoint_manager": checkpoint_manager,
332
+ "temporal_memory": temporal_memory,
333
+ "name": request.name,
334
+ "pipeline_type": "portfolio_optimization",
335
+ "status": "created",
336
+ "current_step": 0,
337
+ "total_steps": None,
338
+ "created_at": created_at,
339
+ "started_at": None,
340
+ "completed_at": None,
341
+ "metrics": {},
342
+ "checkpoints_count": 0,
343
+ "anomalies_count": 0,
344
+ "rollbacks_count": 0,
345
+ }
346
+
347
+ return PipelineResponse(
348
+ id=pipeline_id,
349
+ name=request.name,
350
+ pipeline_type="portfolio_optimization",
351
+ status="created",
352
+ current_step=0,
353
+ total_steps=None,
354
+ created_at=created_at,
355
+ started_at=None,
356
+ completed_at=None,
357
+ metrics={},
358
+ checkpoints_count=0,
359
+ anomalies_count=0,
360
+ rollbacks_count=0,
361
+ )
362
+
363
+
364
+ @router.post("/{pipeline_id}/start", response_model=PipelineResponse)
365
+ async def start_pipeline(
366
+ pipeline_id: str,
367
+ request: StartPipelineRequest,
368
+ background_tasks: BackgroundTasks,
369
+ ):
370
+ """Start pipeline execution."""
371
+ info = _get_pipeline(pipeline_id)
372
+
373
+ if info["status"] == "running":
374
+ raise HTTPException(status_code=400, detail="Pipeline is already running")
375
+
376
+ info["total_steps"] = request.total_steps
377
+ info["status"] = "starting"
378
+
379
+ # Run in background
380
+ background_tasks.add_task(_run_pipeline_async, pipeline_id, request.total_steps)
381
+
382
+ return PipelineResponse(
383
+ id=pipeline_id,
384
+ name=info["name"],
385
+ pipeline_type=info["pipeline_type"],
386
+ status="starting",
387
+ current_step=info["current_step"],
388
+ total_steps=request.total_steps,
389
+ created_at=info["created_at"],
390
+ started_at=info["started_at"],
391
+ completed_at=info["completed_at"],
392
+ metrics=info["metrics"],
393
+ checkpoints_count=info["checkpoints_count"],
394
+ anomalies_count=info["anomalies_count"],
395
+ rollbacks_count=info["rollbacks_count"],
396
+ )
397
+
398
+
399
+ @router.post("/{pipeline_id}/pause")
400
+ async def pause_pipeline(pipeline_id: str):
401
+ """Pause pipeline execution."""
402
+ info = _get_pipeline(pipeline_id)
403
+ pipeline = info["pipeline"]
404
+
405
+ if info["status"] != "running":
406
+ raise HTTPException(status_code=400, detail="Pipeline is not running")
407
+
408
+ pipeline.pause()
409
+ info["status"] = "paused"
410
+
411
+ return {"status": "paused", "pipeline_id": pipeline_id}
412
+
413
+
414
+ @router.post("/{pipeline_id}/resume")
415
+ async def resume_pipeline(pipeline_id: str):
416
+ """Resume paused pipeline."""
417
+ info = _get_pipeline(pipeline_id)
418
+ pipeline = info["pipeline"]
419
+
420
+ if info["status"] != "paused":
421
+ raise HTTPException(status_code=400, detail="Pipeline is not paused")
422
+
423
+ pipeline.resume()
424
+ info["status"] = "running"
425
+
426
+ return {"status": "running", "pipeline_id": pipeline_id}
427
+
428
+
429
+ @router.post("/{pipeline_id}/stop")
430
+ async def stop_pipeline(pipeline_id: str):
431
+ """Stop pipeline execution."""
432
+ info = _get_pipeline(pipeline_id)
433
+ pipeline = info["pipeline"]
434
+
435
+ pipeline.stop()
436
+ info["status"] = "stopped"
437
+
438
+ return {"status": "stopped", "pipeline_id": pipeline_id}
439
+
440
+
441
+ @router.get("/{pipeline_id}", response_model=PipelineResponse)
442
+ async def get_pipeline(pipeline_id: str):
443
+ """Get pipeline status and information."""
444
+ info = _get_pipeline(pipeline_id)
445
+ pipeline = info["pipeline"]
446
+
447
+ return PipelineResponse(
448
+ id=pipeline_id,
449
+ name=info["name"],
450
+ pipeline_type=info["pipeline_type"],
451
+ status=info["status"],
452
+ current_step=pipeline.state.step if pipeline.state else info["current_step"],
453
+ total_steps=info["total_steps"],
454
+ created_at=info["created_at"],
455
+ started_at=info["started_at"],
456
+ completed_at=info["completed_at"],
457
+ metrics=pipeline.state.metrics if pipeline.state else info["metrics"],
458
+ checkpoints_count=info["checkpoints_count"],
459
+ anomalies_count=info["anomalies_count"],
460
+ rollbacks_count=info["rollbacks_count"],
461
+ )
462
+
463
+
464
+ @router.get("", response_model=List[PipelineResponse])
465
+ async def list_pipelines():
466
+ """List all pipelines."""
467
+ return [
468
+ PipelineResponse(
469
+ id=pid,
470
+ name=info["name"],
471
+ pipeline_type=info["pipeline_type"],
472
+ status=info["status"],
473
+ current_step=info["current_step"],
474
+ total_steps=info["total_steps"],
475
+ created_at=info["created_at"],
476
+ started_at=info["started_at"],
477
+ completed_at=info["completed_at"],
478
+ metrics=info["metrics"],
479
+ checkpoints_count=info["checkpoints_count"],
480
+ anomalies_count=info["anomalies_count"],
481
+ rollbacks_count=info["rollbacks_count"],
482
+ )
483
+ for pid, info in _active_pipelines.items()
484
+ ]
485
+
486
+
487
+ @router.get("/{pipeline_id}/checkpoints", response_model=List[CheckpointResponse])
488
+ async def list_checkpoints(pipeline_id: str, limit: int = 10):
489
+ """List checkpoints for a pipeline."""
490
+ info = _get_pipeline(pipeline_id)
491
+ checkpoint_manager = info["checkpoint_manager"]
492
+
493
+ checkpoints = checkpoint_manager.list_checkpoints(pipeline_id, limit=limit)
494
+
495
+ return [
496
+ CheckpointResponse(
497
+ id=cp["id"],
498
+ step_number=cp["step_number"],
499
+ checkpoint_name=cp.get("checkpoint_name"),
500
+ created_at=cp["created_at"],
501
+ is_valid=cp["is_valid"],
502
+ state_size_bytes=cp.get("state_size_bytes"),
503
+ compression_ratio=cp.get("compression_ratio"),
504
+ metrics=cp.get("metrics", {}),
505
+ )
506
+ for cp in checkpoints
507
+ ]
508
+
509
+
510
+ @router.post("/{pipeline_id}/rollback/{checkpoint_step}")
511
+ async def rollback_to_checkpoint(pipeline_id: str, checkpoint_step: int):
512
+ """Rollback pipeline to a specific checkpoint."""
513
+ info = _get_pipeline(pipeline_id)
514
+ pipeline = info["pipeline"]
515
+ checkpoint_manager = info["checkpoint_manager"]
516
+
517
+ # Load checkpoint
518
+ checkpoint = checkpoint_manager.load(pipeline_id, checkpoint_step)
519
+ if not checkpoint:
520
+ raise HTTPException(status_code=404, detail="Checkpoint not found")
521
+
522
+ if not checkpoint["is_valid"]:
523
+ raise HTTPException(status_code=400, detail="Checkpoint is invalid")
524
+
525
+ # Restore state
526
+ restored_state = pipeline.restore_state_from_checkpoint(checkpoint["state_data"])
527
+ pipeline._state = restored_state
528
+ info["current_step"] = checkpoint_step
529
+
530
+ return {
531
+ "status": "rolled_back",
532
+ "pipeline_id": pipeline_id,
533
+ "rolled_back_to_step": checkpoint_step,
534
+ }
535
+
536
+
537
+ @router.get("/{pipeline_id}/anomalies", response_model=List[AnomalyResponse])
538
+ async def list_anomalies(pipeline_id: str, limit: int = 50):
539
+ """List anomaly events for a pipeline."""
540
+ info = _get_pipeline(pipeline_id)
541
+
542
+ # Get anomalies from pipeline's anomaly detector history
543
+ # Note: In production, this would come from the database
544
+ anomalies = info.get("anomalies", [])
545
+
546
+ return [
547
+ AnomalyResponse(
548
+ id=str(uuid.uuid4()),
549
+ anomaly_type=a.get("anomaly_type", "unknown"),
550
+ severity=a.get("severity", "warning"),
551
+ message=a.get("message", ""),
552
+ step_number=a.get("step", 0),
553
+ detector_name=a.get("detector_name"),
554
+ threshold=a.get("threshold"),
555
+ actual_value=a.get("actual_value"),
556
+ was_auto_resolved=a.get("was_auto_resolved", False),
557
+ created_at=a.get("created_at", datetime.utcnow().isoformat()),
558
+ )
559
+ for a in anomalies[-limit:]
560
+ ]
561
+
562
+
563
+ @router.delete("/{pipeline_id}")
564
+ async def delete_pipeline(pipeline_id: str):
565
+ """Delete a pipeline."""
566
+ if pipeline_id not in _active_pipelines:
567
+ raise HTTPException(status_code=404, detail="Pipeline not found")
568
+
569
+ info = _active_pipelines[pipeline_id]
570
+ pipeline = info["pipeline"]
571
+
572
+ # Stop if running
573
+ if info["status"] == "running":
574
+ pipeline.stop()
575
+
576
+ del _active_pipelines[pipeline_id]
577
+
578
+ return {"status": "deleted", "pipeline_id": pipeline_id}