quantumflow-sdk 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantumflow/__init__.py +15 -2
- quantumflow/api/routes/__init__.py +2 -1
- quantumflow/api/routes/workflow_routes.py +227 -0
- quantumflow/core/workflow.py +311 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.0.dist-info}/METADATA +1 -1
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.0.dist-info}/RECORD +9 -7
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.0.dist-info}/WHEEL +0 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.0.dist-info}/entry_points.txt +0 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.0.dist-info}/top_level.txt +0 -0
quantumflow/__init__.py
CHANGED
|
@@ -6,10 +6,11 @@ Core Features:
|
|
|
6
6
|
- O(log n) memory through quantum entanglement
|
|
7
7
|
- Quantum teleportation for secure messaging
|
|
8
8
|
- BB84 QKD for unconditionally secure key exchange
|
|
9
|
+
- Workflow orchestration for chaining quantum operations
|
|
9
10
|
- Multi-backend support (IBM, AWS Braket, Simulator)
|
|
10
11
|
|
|
11
12
|
Installation:
|
|
12
|
-
pip install quantumflow
|
|
13
|
+
pip install quantumflow-sdk
|
|
13
14
|
|
|
14
15
|
Quick Start:
|
|
15
16
|
from quantumflow import QuantumCompressor
|
|
@@ -17,6 +18,14 @@ Quick Start:
|
|
|
17
18
|
compressor = QuantumCompressor(backend="simulator")
|
|
18
19
|
result = compressor.compress([100, 200, 150, 175])
|
|
19
20
|
print(f"Compression: {result.compression_percentage}%")
|
|
21
|
+
|
|
22
|
+
Workflow Example:
|
|
23
|
+
from quantumflow import QuantumWorkflow
|
|
24
|
+
|
|
25
|
+
workflow = QuantumWorkflow()
|
|
26
|
+
workflow.add_step("compress", params={"tokens": [100, 200, 150]})
|
|
27
|
+
workflow.add_step("qkd", params={"key_length": 256})
|
|
28
|
+
result = workflow.execute()
|
|
20
29
|
"""
|
|
21
30
|
|
|
22
31
|
from quantumflow.core.quantum_compressor import QuantumCompressor, CompressedResult
|
|
@@ -28,8 +37,9 @@ from quantumflow.core.teleportation import (
|
|
|
28
37
|
QKDExchange,
|
|
29
38
|
SecureMessenger,
|
|
30
39
|
)
|
|
40
|
+
from quantumflow.core.workflow import QuantumWorkflow, WorkflowResult
|
|
31
41
|
|
|
32
|
-
__version__ = "0.
|
|
42
|
+
__version__ = "0.2.0"
|
|
33
43
|
__all__ = [
|
|
34
44
|
# Core compression
|
|
35
45
|
"QuantumCompressor",
|
|
@@ -44,4 +54,7 @@ __all__ = [
|
|
|
44
54
|
"QuantumTeleporter",
|
|
45
55
|
"QKDExchange",
|
|
46
56
|
"SecureMessenger",
|
|
57
|
+
# Workflow
|
|
58
|
+
"QuantumWorkflow",
|
|
59
|
+
"WorkflowResult",
|
|
47
60
|
]
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow API Routes.
|
|
3
|
+
|
|
4
|
+
Provides REST API endpoints for quantum workflow orchestration.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class WorkflowStepRequest:
|
|
13
|
+
"""Request model for a workflow step."""
|
|
14
|
+
type: str
|
|
15
|
+
params: Optional[Dict[str, Any]] = None
|
|
16
|
+
depends_on: Optional[List[str]] = None
|
|
17
|
+
id: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class WorkflowRequest:
|
|
22
|
+
"""Request model for creating a workflow."""
|
|
23
|
+
steps: List[Dict[str, Any]]
|
|
24
|
+
backend: str = "simulator"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class WorkflowResponse:
|
|
29
|
+
"""Response model for workflow execution."""
|
|
30
|
+
workflow_id: str
|
|
31
|
+
status: str
|
|
32
|
+
steps: List[Dict[str, Any]]
|
|
33
|
+
total_duration_ms: float
|
|
34
|
+
outputs: Dict[str, Any]
|
|
35
|
+
error: Optional[str] = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def create_workflow_routes():
|
|
39
|
+
"""
|
|
40
|
+
Create workflow API routes.
|
|
41
|
+
|
|
42
|
+
Returns a router that can be mounted on a FastAPI app.
|
|
43
|
+
|
|
44
|
+
Example usage with FastAPI:
|
|
45
|
+
from fastapi import FastAPI
|
|
46
|
+
from quantumflow.api.routes.workflow_routes import create_workflow_routes
|
|
47
|
+
|
|
48
|
+
app = FastAPI()
|
|
49
|
+
workflow_router = create_workflow_routes()
|
|
50
|
+
app.include_router(workflow_router, prefix="/v1/workflow")
|
|
51
|
+
"""
|
|
52
|
+
try:
|
|
53
|
+
from fastapi import APIRouter, HTTPException
|
|
54
|
+
from pydantic import BaseModel
|
|
55
|
+
except ImportError:
|
|
56
|
+
# Return a dummy router if FastAPI is not installed
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
router = APIRouter(tags=["workflow"])
|
|
60
|
+
|
|
61
|
+
class WorkflowStepModel(BaseModel):
|
|
62
|
+
type: str
|
|
63
|
+
params: Optional[Dict[str, Any]] = None
|
|
64
|
+
depends_on: Optional[List[str]] = None
|
|
65
|
+
id: Optional[str] = None
|
|
66
|
+
|
|
67
|
+
class CreateWorkflowModel(BaseModel):
|
|
68
|
+
steps: List[WorkflowStepModel]
|
|
69
|
+
backend: str = "simulator"
|
|
70
|
+
|
|
71
|
+
class ExecuteWorkflowModel(BaseModel):
|
|
72
|
+
workflow_id: Optional[str] = None
|
|
73
|
+
steps: Optional[List[WorkflowStepModel]] = None
|
|
74
|
+
backend: str = "simulator"
|
|
75
|
+
|
|
76
|
+
@router.post("/create")
|
|
77
|
+
async def create_workflow(request: CreateWorkflowModel) -> Dict[str, Any]:
|
|
78
|
+
"""
|
|
79
|
+
Create a new workflow without executing it.
|
|
80
|
+
|
|
81
|
+
Returns the workflow definition that can be executed later.
|
|
82
|
+
"""
|
|
83
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
84
|
+
|
|
85
|
+
workflow = QuantumWorkflow(backend=request.backend)
|
|
86
|
+
|
|
87
|
+
for step in request.steps:
|
|
88
|
+
workflow.add_step(
|
|
89
|
+
step_type=step.type,
|
|
90
|
+
params=step.params,
|
|
91
|
+
depends_on=step.depends_on,
|
|
92
|
+
step_id=step.id,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
return workflow.to_dict()
|
|
96
|
+
|
|
97
|
+
@router.post("/execute")
|
|
98
|
+
async def execute_workflow(request: ExecuteWorkflowModel) -> Dict[str, Any]:
|
|
99
|
+
"""
|
|
100
|
+
Execute a quantum workflow.
|
|
101
|
+
|
|
102
|
+
Can either:
|
|
103
|
+
- Execute a new workflow by providing steps
|
|
104
|
+
- Execute an existing workflow by providing workflow_id
|
|
105
|
+
|
|
106
|
+
Returns execution results for all steps.
|
|
107
|
+
"""
|
|
108
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
109
|
+
|
|
110
|
+
if not request.steps:
|
|
111
|
+
raise HTTPException(
|
|
112
|
+
status_code=400,
|
|
113
|
+
detail="Steps are required to execute a workflow"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
workflow = QuantumWorkflow(backend=request.backend)
|
|
117
|
+
|
|
118
|
+
for step in request.steps:
|
|
119
|
+
workflow.add_step(
|
|
120
|
+
step_type=step.type,
|
|
121
|
+
params=step.params,
|
|
122
|
+
depends_on=step.depends_on,
|
|
123
|
+
step_id=step.id,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
result = workflow.execute()
|
|
127
|
+
|
|
128
|
+
return {
|
|
129
|
+
"workflow_id": result.workflow_id,
|
|
130
|
+
"status": result.status,
|
|
131
|
+
"steps": result.steps,
|
|
132
|
+
"total_duration_ms": result.total_duration_ms,
|
|
133
|
+
"outputs": result.outputs,
|
|
134
|
+
"error": result.error,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
@router.post("/compress-and-teleport")
|
|
138
|
+
async def compress_and_teleport(
|
|
139
|
+
tokens: List[int],
|
|
140
|
+
recipient: Optional[str] = None,
|
|
141
|
+
backend: str = "simulator",
|
|
142
|
+
) -> Dict[str, Any]:
|
|
143
|
+
"""
|
|
144
|
+
Convenience endpoint that combines compression and teleportation.
|
|
145
|
+
|
|
146
|
+
This is a common workflow pattern for secure quantum messaging.
|
|
147
|
+
"""
|
|
148
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
149
|
+
|
|
150
|
+
workflow = QuantumWorkflow(backend=backend)
|
|
151
|
+
|
|
152
|
+
# Step 1: Compress tokens
|
|
153
|
+
workflow.add_step(
|
|
154
|
+
step_type="compress",
|
|
155
|
+
params={"tokens": tokens},
|
|
156
|
+
step_id="compression",
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Step 2: QKD key exchange
|
|
160
|
+
workflow.add_step(
|
|
161
|
+
step_type="qkd",
|
|
162
|
+
params={"key_length": 256},
|
|
163
|
+
step_id="key_exchange",
|
|
164
|
+
depends_on=["compression"],
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Step 3: Create Bell pairs for teleportation
|
|
168
|
+
workflow.add_step(
|
|
169
|
+
step_type="teleport",
|
|
170
|
+
params={"n_pairs": 10},
|
|
171
|
+
step_id="teleportation",
|
|
172
|
+
depends_on=["key_exchange"],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
result = workflow.execute()
|
|
176
|
+
|
|
177
|
+
return {
|
|
178
|
+
"workflow_id": result.workflow_id,
|
|
179
|
+
"status": result.status,
|
|
180
|
+
"compression": result.outputs.get("compression", {}),
|
|
181
|
+
"qkd": result.outputs.get("key_exchange", {}),
|
|
182
|
+
"teleportation": result.outputs.get("teleportation", {}),
|
|
183
|
+
"total_duration_ms": result.total_duration_ms,
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
@router.get("/templates")
|
|
187
|
+
async def get_workflow_templates() -> List[Dict[str, Any]]:
|
|
188
|
+
"""
|
|
189
|
+
Get available workflow templates.
|
|
190
|
+
|
|
191
|
+
Returns predefined workflow configurations for common use cases.
|
|
192
|
+
"""
|
|
193
|
+
return [
|
|
194
|
+
{
|
|
195
|
+
"name": "secure_messaging",
|
|
196
|
+
"description": "Compress tokens, exchange QKD key, and teleport",
|
|
197
|
+
"steps": [
|
|
198
|
+
{"type": "compress", "params": {"tokens": []}, "id": "compress"},
|
|
199
|
+
{"type": "qkd", "params": {"key_length": 256}, "id": "qkd", "depends_on": ["compress"]},
|
|
200
|
+
{"type": "teleport", "params": {"n_pairs": 10}, "id": "teleport", "depends_on": ["qkd"]},
|
|
201
|
+
],
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
"name": "quantum_ml_training",
|
|
205
|
+
"description": "Quantum backpropagation workflow",
|
|
206
|
+
"steps": [
|
|
207
|
+
{"type": "compress", "params": {"tokens": []}, "id": "compress_input"},
|
|
208
|
+
{"type": "backprop", "params": {}, "id": "gradient", "depends_on": ["compress_input"]},
|
|
209
|
+
],
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
"name": "optimization",
|
|
213
|
+
"description": "QAOA optimization workflow",
|
|
214
|
+
"steps": [
|
|
215
|
+
{"type": "qaoa", "params": {"p": 2}, "id": "optimize"},
|
|
216
|
+
],
|
|
217
|
+
},
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
return router
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# Export router for use with FastAPI
|
|
224
|
+
try:
|
|
225
|
+
router = create_workflow_routes()
|
|
226
|
+
except Exception:
|
|
227
|
+
router = None
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum Workflow Orchestration Module.
|
|
3
|
+
|
|
4
|
+
Provides workflow orchestration for chaining quantum operations like:
|
|
5
|
+
- Token compression
|
|
6
|
+
- QKD key exchange
|
|
7
|
+
- Quantum teleportation
|
|
8
|
+
- Algorithm execution
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
12
|
+
|
|
13
|
+
workflow = QuantumWorkflow()
|
|
14
|
+
workflow.add_step("compress", tokens=[100, 200, 150])
|
|
15
|
+
workflow.add_step("qkd", key_length=256)
|
|
16
|
+
workflow.add_step("teleport", use_compression=True)
|
|
17
|
+
result = workflow.execute()
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from dataclasses import dataclass, field
|
|
21
|
+
from typing import Any, Dict, List, Optional
|
|
22
|
+
from enum import Enum
|
|
23
|
+
import time
|
|
24
|
+
import uuid
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class StepType(Enum):
|
|
28
|
+
"""Types of workflow steps."""
|
|
29
|
+
COMPRESS = "compress"
|
|
30
|
+
DECOMPRESS = "decompress"
|
|
31
|
+
QKD = "qkd"
|
|
32
|
+
TELEPORT = "teleport"
|
|
33
|
+
BACKPROP = "backprop"
|
|
34
|
+
QAOA = "qaoa"
|
|
35
|
+
VQE = "vqe"
|
|
36
|
+
CUSTOM = "custom"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class StepStatus(Enum):
|
|
40
|
+
"""Status of a workflow step."""
|
|
41
|
+
PENDING = "pending"
|
|
42
|
+
RUNNING = "running"
|
|
43
|
+
COMPLETED = "completed"
|
|
44
|
+
FAILED = "failed"
|
|
45
|
+
SKIPPED = "skipped"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class WorkflowStep:
|
|
50
|
+
"""A single step in a quantum workflow."""
|
|
51
|
+
id: str
|
|
52
|
+
step_type: StepType
|
|
53
|
+
params: Dict[str, Any]
|
|
54
|
+
status: StepStatus = StepStatus.PENDING
|
|
55
|
+
result: Optional[Dict[str, Any]] = None
|
|
56
|
+
error: Optional[str] = None
|
|
57
|
+
started_at: Optional[float] = None
|
|
58
|
+
completed_at: Optional[float] = None
|
|
59
|
+
depends_on: List[str] = field(default_factory=list)
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def duration_ms(self) -> Optional[float]:
|
|
63
|
+
"""Duration of step execution in milliseconds."""
|
|
64
|
+
if self.started_at and self.completed_at:
|
|
65
|
+
return (self.completed_at - self.started_at) * 1000
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclass
|
|
70
|
+
class WorkflowResult:
|
|
71
|
+
"""Result of workflow execution."""
|
|
72
|
+
workflow_id: str
|
|
73
|
+
status: str
|
|
74
|
+
steps: List[Dict[str, Any]]
|
|
75
|
+
total_duration_ms: float
|
|
76
|
+
outputs: Dict[str, Any]
|
|
77
|
+
error: Optional[str] = None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class QuantumWorkflow:
|
|
81
|
+
"""
|
|
82
|
+
Orchestrates quantum operations in a workflow.
|
|
83
|
+
|
|
84
|
+
Supports:
|
|
85
|
+
- Sequential step execution
|
|
86
|
+
- Dependency management between steps
|
|
87
|
+
- Error handling and recovery
|
|
88
|
+
- Step result passing to subsequent steps
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
def __init__(self, backend: str = "simulator"):
|
|
92
|
+
"""
|
|
93
|
+
Initialize a quantum workflow.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
backend: Quantum backend to use (simulator, ibm, braket)
|
|
97
|
+
"""
|
|
98
|
+
self.workflow_id = str(uuid.uuid4())
|
|
99
|
+
self.backend = backend
|
|
100
|
+
self.steps: List[WorkflowStep] = []
|
|
101
|
+
self._step_results: Dict[str, Any] = {}
|
|
102
|
+
|
|
103
|
+
def add_step(
|
|
104
|
+
self,
|
|
105
|
+
step_type: str,
|
|
106
|
+
params: Optional[Dict[str, Any]] = None,
|
|
107
|
+
depends_on: Optional[List[str]] = None,
|
|
108
|
+
step_id: Optional[str] = None,
|
|
109
|
+
) -> str:
|
|
110
|
+
"""
|
|
111
|
+
Add a step to the workflow.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
step_type: Type of step (compress, qkd, teleport, etc.)
|
|
115
|
+
params: Parameters for the step
|
|
116
|
+
depends_on: List of step IDs this step depends on
|
|
117
|
+
step_id: Optional custom step ID
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
The ID of the created step
|
|
121
|
+
"""
|
|
122
|
+
if step_id is None:
|
|
123
|
+
step_id = f"step_{len(self.steps) + 1}"
|
|
124
|
+
|
|
125
|
+
step = WorkflowStep(
|
|
126
|
+
id=step_id,
|
|
127
|
+
step_type=StepType(step_type),
|
|
128
|
+
params=params or {},
|
|
129
|
+
depends_on=depends_on or [],
|
|
130
|
+
)
|
|
131
|
+
self.steps.append(step)
|
|
132
|
+
return step_id
|
|
133
|
+
|
|
134
|
+
def _execute_step(self, step: WorkflowStep) -> Dict[str, Any]:
|
|
135
|
+
"""
|
|
136
|
+
Execute a single workflow step.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
step: The step to execute
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Result of the step execution
|
|
143
|
+
"""
|
|
144
|
+
from quantumflow.core.quantum_compressor import QuantumCompressor
|
|
145
|
+
from quantumflow.core.teleportation import QKDExchange, QuantumTeleporter
|
|
146
|
+
|
|
147
|
+
step.status = StepStatus.RUNNING
|
|
148
|
+
step.started_at = time.time()
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
result = {}
|
|
152
|
+
|
|
153
|
+
if step.step_type == StepType.COMPRESS:
|
|
154
|
+
compressor = QuantumCompressor(backend=self.backend)
|
|
155
|
+
tokens = step.params.get("tokens", [])
|
|
156
|
+
compressed = compressor.compress(tokens)
|
|
157
|
+
result = {
|
|
158
|
+
"amplitudes": compressed.amplitudes.tolist() if hasattr(compressed.amplitudes, 'tolist') else compressed.amplitudes,
|
|
159
|
+
"n_qubits": compressed.n_qubits,
|
|
160
|
+
"compression_percentage": compressed.compression_percentage,
|
|
161
|
+
"input_token_count": compressed.input_token_count,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
elif step.step_type == StepType.QKD:
|
|
165
|
+
qkd = QKDExchange(backend=self.backend)
|
|
166
|
+
key_length = step.params.get("key_length", 256)
|
|
167
|
+
qkd_result = qkd.exchange(key_length=key_length)
|
|
168
|
+
result = qkd_result
|
|
169
|
+
|
|
170
|
+
elif step.step_type == StepType.TELEPORT:
|
|
171
|
+
teleporter = QuantumTeleporter(backend=self.backend)
|
|
172
|
+
state = step.params.get("state")
|
|
173
|
+
if state:
|
|
174
|
+
teleport_result = teleporter.teleport_state(state)
|
|
175
|
+
result = {
|
|
176
|
+
"fidelity": teleport_result.fidelity,
|
|
177
|
+
"corrections_applied": teleport_result.corrections_applied,
|
|
178
|
+
}
|
|
179
|
+
else:
|
|
180
|
+
# Create Bell pairs
|
|
181
|
+
n_pairs = step.params.get("n_pairs", 10)
|
|
182
|
+
pairs = teleporter.create_bell_pairs(n_pairs)
|
|
183
|
+
result = {"bell_pairs_created": n_pairs}
|
|
184
|
+
|
|
185
|
+
elif step.step_type == StepType.BACKPROP:
|
|
186
|
+
from quantumflow.core.quantum_backprop import QuantumBackprop
|
|
187
|
+
backprop = QuantumBackprop(backend=self.backend)
|
|
188
|
+
bp_result = backprop.compute_gradient(
|
|
189
|
+
input_state=step.params.get("input_state", [0.5, 0.5]),
|
|
190
|
+
target_state=step.params.get("target_state", [0.8, 0.2]),
|
|
191
|
+
weights=step.params.get("weights", [0.3, 0.7]),
|
|
192
|
+
)
|
|
193
|
+
result = {
|
|
194
|
+
"gradients": bp_result.gradients,
|
|
195
|
+
"similarity": bp_result.similarity,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
elif step.step_type == StepType.QAOA:
|
|
199
|
+
from quantumflow.algorithms.optimization.qaoa import QuantumQAOA
|
|
200
|
+
qaoa = QuantumQAOA(backend=self.backend)
|
|
201
|
+
qaoa_result = qaoa.optimize(
|
|
202
|
+
problem=step.params.get("problem", {}),
|
|
203
|
+
p=step.params.get("p", 2),
|
|
204
|
+
)
|
|
205
|
+
result = qaoa_result
|
|
206
|
+
|
|
207
|
+
elif step.step_type == StepType.VQE:
|
|
208
|
+
from quantumflow.algorithms.machine_learning.vqe import QuantumVQE
|
|
209
|
+
vqe = QuantumVQE(backend=self.backend)
|
|
210
|
+
vqe_result = vqe.find_ground_state(
|
|
211
|
+
hamiltonian=step.params.get("hamiltonian", {}),
|
|
212
|
+
)
|
|
213
|
+
result = vqe_result
|
|
214
|
+
|
|
215
|
+
else:
|
|
216
|
+
# Custom step - just pass through params
|
|
217
|
+
result = step.params
|
|
218
|
+
|
|
219
|
+
step.status = StepStatus.COMPLETED
|
|
220
|
+
step.result = result
|
|
221
|
+
step.completed_at = time.time()
|
|
222
|
+
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
step.status = StepStatus.FAILED
|
|
227
|
+
step.error = str(e)
|
|
228
|
+
step.completed_at = time.time()
|
|
229
|
+
raise
|
|
230
|
+
|
|
231
|
+
def execute(self) -> WorkflowResult:
|
|
232
|
+
"""
|
|
233
|
+
Execute all steps in the workflow.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
WorkflowResult with all step results
|
|
237
|
+
"""
|
|
238
|
+
start_time = time.time()
|
|
239
|
+
outputs = {}
|
|
240
|
+
error = None
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
for step in self.steps:
|
|
244
|
+
# Check dependencies
|
|
245
|
+
for dep_id in step.depends_on:
|
|
246
|
+
dep_step = next((s for s in self.steps if s.id == dep_id), None)
|
|
247
|
+
if dep_step and dep_step.status != StepStatus.COMPLETED:
|
|
248
|
+
step.status = StepStatus.SKIPPED
|
|
249
|
+
step.error = f"Dependency {dep_id} not completed"
|
|
250
|
+
continue
|
|
251
|
+
|
|
252
|
+
# Execute the step
|
|
253
|
+
result = self._execute_step(step)
|
|
254
|
+
self._step_results[step.id] = result
|
|
255
|
+
outputs[step.id] = result
|
|
256
|
+
|
|
257
|
+
except Exception as e:
|
|
258
|
+
error = str(e)
|
|
259
|
+
|
|
260
|
+
end_time = time.time()
|
|
261
|
+
|
|
262
|
+
return WorkflowResult(
|
|
263
|
+
workflow_id=self.workflow_id,
|
|
264
|
+
status="completed" if error is None else "failed",
|
|
265
|
+
steps=[
|
|
266
|
+
{
|
|
267
|
+
"id": s.id,
|
|
268
|
+
"type": s.step_type.value,
|
|
269
|
+
"status": s.status.value,
|
|
270
|
+
"result": s.result,
|
|
271
|
+
"error": s.error,
|
|
272
|
+
"duration_ms": s.duration_ms,
|
|
273
|
+
}
|
|
274
|
+
for s in self.steps
|
|
275
|
+
],
|
|
276
|
+
total_duration_ms=(end_time - start_time) * 1000,
|
|
277
|
+
outputs=outputs,
|
|
278
|
+
error=error,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
282
|
+
"""Convert workflow to dictionary representation."""
|
|
283
|
+
return {
|
|
284
|
+
"workflow_id": self.workflow_id,
|
|
285
|
+
"backend": self.backend,
|
|
286
|
+
"steps": [
|
|
287
|
+
{
|
|
288
|
+
"id": s.id,
|
|
289
|
+
"type": s.step_type.value,
|
|
290
|
+
"params": s.params,
|
|
291
|
+
"depends_on": s.depends_on,
|
|
292
|
+
}
|
|
293
|
+
for s in self.steps
|
|
294
|
+
],
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
@classmethod
|
|
298
|
+
def from_dict(cls, data: Dict[str, Any]) -> "QuantumWorkflow":
|
|
299
|
+
"""Create workflow from dictionary representation."""
|
|
300
|
+
workflow = cls(backend=data.get("backend", "simulator"))
|
|
301
|
+
workflow.workflow_id = data.get("workflow_id", workflow.workflow_id)
|
|
302
|
+
|
|
303
|
+
for step_data in data.get("steps", []):
|
|
304
|
+
workflow.add_step(
|
|
305
|
+
step_type=step_data["type"],
|
|
306
|
+
params=step_data.get("params", {}),
|
|
307
|
+
depends_on=step_data.get("depends_on", []),
|
|
308
|
+
step_id=step_data.get("id"),
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
return workflow
|
|
@@ -9,7 +9,7 @@ db/__init__.py,sha256=CLyI_3_AP7wQATuxLV2yHPvYDNikJYmH5BMQ3Oez1xw,280
|
|
|
9
9
|
db/crud.py,sha256=q_HpP0RzsI7Lz1EMhEWzaMxdxzGAdVTBD2LALqbGLGQ,8175
|
|
10
10
|
db/database.py,sha256=Md_e3z1t6NzKCCVdS4gRo0Pf6LNZvRClIX4QU8F8InI,2078
|
|
11
11
|
db/models.py,sha256=W-NLqT6kGtXTN3r4DqVPSPglt-_iFDBlqB6e3nzflrg,5975
|
|
12
|
-
quantumflow/__init__.py,sha256
|
|
12
|
+
quantumflow/__init__.py,sha256=-uhqJ01N-_CmulHeZleJraqd_52b7G3O8LFW42JgqPw,1731
|
|
13
13
|
quantumflow/algorithms/__init__.py,sha256=waXASb2jnbAcjV-xS1wx-bGmPJ5lCj111dJ14eB8KCo,916
|
|
14
14
|
quantumflow/algorithms/compression/__init__.py,sha256=rejDCdZJEy1tamQdDaZodGbo8wA1rszlXEtwvYgZO7A,361
|
|
15
15
|
quantumflow/algorithms/compression/amplitude_amplification.py,sha256=pebJiATIhwEdnAZY4CwInYPEKbtZufJ0Ka3OCdtvZRk,5440
|
|
@@ -30,8 +30,9 @@ quantumflow/algorithms/utility/__init__.py,sha256=O-pY9GQ3i5DspBg-pLXFKz2QpLobL-
|
|
|
30
30
|
quantumflow/algorithms/utility/circuit_optimizer.py,sha256=PsvSZA6teiO4soktMW2hiVKrTEieqw5ccKYjtV4T63I,5839
|
|
31
31
|
quantumflow/algorithms/utility/error_correction.py,sha256=1L4ikcgasFwKYFSCxkx9Bk-Jz9En1KqZI-uGUScjsFw,9398
|
|
32
32
|
quantumflow/api/__init__.py,sha256=WxcaYzyvrvOdWFlJn2AchfEm9Jy8ytFD9f0POaAF-f8,25
|
|
33
|
-
quantumflow/api/routes/__init__.py,sha256=
|
|
33
|
+
quantumflow/api/routes/__init__.py,sha256=qth1cwVYo7-9a1s44XjuusdHUoWHimgXjDkFW1sjCx8,226
|
|
34
34
|
quantumflow/api/routes/billing_routes.py,sha256=MDvwkbVaXZhFl4_j5pgJ72_049a14u3fgAXxh314OdQ,16955
|
|
35
|
+
quantumflow/api/routes/workflow_routes.py,sha256=4aiaOXPRq4MTQdbp2nHKEgcsoB6cugxKIuCKV1nDmog,6875
|
|
35
36
|
quantumflow/backends/__init__.py,sha256=Q2Oi3VMp-AMXYN3QffsYd9NFz-qOaxgK_iSVCpVIivI,829
|
|
36
37
|
quantumflow/backends/base_backend.py,sha256=cvvFcInPtzmzp1-uF796ciW3GpXgzaj4NJI2u_zsbL4,5408
|
|
37
38
|
quantumflow/backends/braket_backend.py,sha256=vpWH0w6sQa_56SEBk_kvomKsuOJlb1ILANS-jUGcDv4,11527
|
|
@@ -46,6 +47,7 @@ quantumflow/core/memory.py,sha256=0rB2Dks3XM1NEiL-v4G0P6jeL2BvcdJNljezRrVccdI,44
|
|
|
46
47
|
quantumflow/core/quantum_backprop.py,sha256=XozlPtwinegI5NosxlW4TrpP5Zh58_yI8WnxznwBHlU,12370
|
|
47
48
|
quantumflow/core/quantum_compressor.py,sha256=okzfF4-1uL22FcK9Xhng1DLzmIbz3ADN-R1xzFTv-tI,10094
|
|
48
49
|
quantumflow/core/teleportation.py,sha256=_T5rRItjFramqWdqBNLwu_fY7Yr9pmdUda4L7STcwcA,12420
|
|
50
|
+
quantumflow/core/workflow.py,sha256=HCazmlPns68TvIJytvcVoy0LSHb9sO-pUcDN5wPR2Zw,10116
|
|
49
51
|
quantumflow/integrations/__init__.py,sha256=_QS8M9XxmBzRspuBCly6qocCopPCPsykFt-QhHWmfQ8,2571
|
|
50
52
|
quantumflow/integrations/autogen_tools.py,sha256=sTTX7mGFjUKbmVatlcY9N9F2NeR6vkIj-VYduE0SNus,14672
|
|
51
53
|
quantumflow/integrations/crewai_agents.py,sha256=t62hukL0xg2FV54yczTAakNYQA-AOZ9AWWgzDnH0LGM,12604
|
|
@@ -53,8 +55,8 @@ quantumflow/integrations/crewai_tools.py,sha256=bY5uJyKmCegt6Kb9hvErhvaKcCDlk2_i
|
|
|
53
55
|
quantumflow/integrations/langchain_memory.py,sha256=wgYTdovncZNWpFwcNZjhNUqNRi661ys9GXaHYmbXP-Q,12608
|
|
54
56
|
quantumflow/integrations/langchain_tools.py,sha256=bDrKZDYSRQJJGSNc9iay1Q4NoIR8CHmtZLcybS5ub_w,12401
|
|
55
57
|
quantumflow/integrations/mcp_server.py,sha256=KJTAxJOyCVl7-whTD1iss9VZmyi0K1f4gNJCH8Cvl_0,21117
|
|
56
|
-
quantumflow_sdk-0.
|
|
57
|
-
quantumflow_sdk-0.
|
|
58
|
-
quantumflow_sdk-0.
|
|
59
|
-
quantumflow_sdk-0.
|
|
60
|
-
quantumflow_sdk-0.
|
|
58
|
+
quantumflow_sdk-0.2.0.dist-info/METADATA,sha256=wVYAu4ziVhtNxe2Fxachem29PJq4AnM6NGPRpBWPkWw,5461
|
|
59
|
+
quantumflow_sdk-0.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
60
|
+
quantumflow_sdk-0.2.0.dist-info/entry_points.txt,sha256=ebX2acoOLgym42XZEqym3OfKCYiPz-mFuPSSGsHFz4c,53
|
|
61
|
+
quantumflow_sdk-0.2.0.dist-info/top_level.txt,sha256=hEr_GRvoZ3-83naVIhNuJvoAND1aCvhBag_ynxQguIo,19
|
|
62
|
+
quantumflow_sdk-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|