quantumflow-sdk 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantumflow/__init__.py +15 -2
- quantumflow/api/routes/__init__.py +2 -1
- quantumflow/api/routes/workflow_routes.py +227 -0
- quantumflow/core/workflow.py +311 -0
- quantumflow/integrations/__init__.py +14 -0
- quantumflow/integrations/openai_functions.py +578 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.1.dist-info}/METADATA +1 -1
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.1.dist-info}/RECORD +11 -8
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.1.dist-info}/WHEEL +0 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.1.dist-info}/entry_points.txt +0 -0
- {quantumflow_sdk-0.1.0.dist-info → quantumflow_sdk-0.2.1.dist-info}/top_level.txt +0 -0
quantumflow/__init__.py
CHANGED
|
@@ -6,10 +6,11 @@ Core Features:
|
|
|
6
6
|
- O(log n) memory through quantum entanglement
|
|
7
7
|
- Quantum teleportation for secure messaging
|
|
8
8
|
- BB84 QKD for unconditionally secure key exchange
|
|
9
|
+
- Workflow orchestration for chaining quantum operations
|
|
9
10
|
- Multi-backend support (IBM, AWS Braket, Simulator)
|
|
10
11
|
|
|
11
12
|
Installation:
|
|
12
|
-
pip install quantumflow
|
|
13
|
+
pip install quantumflow-sdk
|
|
13
14
|
|
|
14
15
|
Quick Start:
|
|
15
16
|
from quantumflow import QuantumCompressor
|
|
@@ -17,6 +18,14 @@ Quick Start:
|
|
|
17
18
|
compressor = QuantumCompressor(backend="simulator")
|
|
18
19
|
result = compressor.compress([100, 200, 150, 175])
|
|
19
20
|
print(f"Compression: {result.compression_percentage}%")
|
|
21
|
+
|
|
22
|
+
Workflow Example:
|
|
23
|
+
from quantumflow import QuantumWorkflow
|
|
24
|
+
|
|
25
|
+
workflow = QuantumWorkflow()
|
|
26
|
+
workflow.add_step("compress", params={"tokens": [100, 200, 150]})
|
|
27
|
+
workflow.add_step("qkd", params={"key_length": 256})
|
|
28
|
+
result = workflow.execute()
|
|
20
29
|
"""
|
|
21
30
|
|
|
22
31
|
from quantumflow.core.quantum_compressor import QuantumCompressor, CompressedResult
|
|
@@ -28,8 +37,9 @@ from quantumflow.core.teleportation import (
|
|
|
28
37
|
QKDExchange,
|
|
29
38
|
SecureMessenger,
|
|
30
39
|
)
|
|
40
|
+
from quantumflow.core.workflow import QuantumWorkflow, WorkflowResult
|
|
31
41
|
|
|
32
|
-
__version__ = "0.1
|
|
42
|
+
__version__ = "0.2.1"
|
|
33
43
|
__all__ = [
|
|
34
44
|
# Core compression
|
|
35
45
|
"QuantumCompressor",
|
|
@@ -44,4 +54,7 @@ __all__ = [
|
|
|
44
54
|
"QuantumTeleporter",
|
|
45
55
|
"QKDExchange",
|
|
46
56
|
"SecureMessenger",
|
|
57
|
+
# Workflow
|
|
58
|
+
"QuantumWorkflow",
|
|
59
|
+
"WorkflowResult",
|
|
47
60
|
]
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow API Routes.
|
|
3
|
+
|
|
4
|
+
Provides REST API endpoints for quantum workflow orchestration.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class WorkflowStepRequest:
|
|
13
|
+
"""Request model for a workflow step."""
|
|
14
|
+
type: str
|
|
15
|
+
params: Optional[Dict[str, Any]] = None
|
|
16
|
+
depends_on: Optional[List[str]] = None
|
|
17
|
+
id: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class WorkflowRequest:
|
|
22
|
+
"""Request model for creating a workflow."""
|
|
23
|
+
steps: List[Dict[str, Any]]
|
|
24
|
+
backend: str = "simulator"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class WorkflowResponse:
|
|
29
|
+
"""Response model for workflow execution."""
|
|
30
|
+
workflow_id: str
|
|
31
|
+
status: str
|
|
32
|
+
steps: List[Dict[str, Any]]
|
|
33
|
+
total_duration_ms: float
|
|
34
|
+
outputs: Dict[str, Any]
|
|
35
|
+
error: Optional[str] = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def create_workflow_routes():
|
|
39
|
+
"""
|
|
40
|
+
Create workflow API routes.
|
|
41
|
+
|
|
42
|
+
Returns a router that can be mounted on a FastAPI app.
|
|
43
|
+
|
|
44
|
+
Example usage with FastAPI:
|
|
45
|
+
from fastapi import FastAPI
|
|
46
|
+
from quantumflow.api.routes.workflow_routes import create_workflow_routes
|
|
47
|
+
|
|
48
|
+
app = FastAPI()
|
|
49
|
+
workflow_router = create_workflow_routes()
|
|
50
|
+
app.include_router(workflow_router, prefix="/v1/workflow")
|
|
51
|
+
"""
|
|
52
|
+
try:
|
|
53
|
+
from fastapi import APIRouter, HTTPException
|
|
54
|
+
from pydantic import BaseModel
|
|
55
|
+
except ImportError:
|
|
56
|
+
# Return a dummy router if FastAPI is not installed
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
router = APIRouter(tags=["workflow"])
|
|
60
|
+
|
|
61
|
+
class WorkflowStepModel(BaseModel):
|
|
62
|
+
type: str
|
|
63
|
+
params: Optional[Dict[str, Any]] = None
|
|
64
|
+
depends_on: Optional[List[str]] = None
|
|
65
|
+
id: Optional[str] = None
|
|
66
|
+
|
|
67
|
+
class CreateWorkflowModel(BaseModel):
|
|
68
|
+
steps: List[WorkflowStepModel]
|
|
69
|
+
backend: str = "simulator"
|
|
70
|
+
|
|
71
|
+
class ExecuteWorkflowModel(BaseModel):
|
|
72
|
+
workflow_id: Optional[str] = None
|
|
73
|
+
steps: Optional[List[WorkflowStepModel]] = None
|
|
74
|
+
backend: str = "simulator"
|
|
75
|
+
|
|
76
|
+
@router.post("/create")
|
|
77
|
+
async def create_workflow(request: CreateWorkflowModel) -> Dict[str, Any]:
|
|
78
|
+
"""
|
|
79
|
+
Create a new workflow without executing it.
|
|
80
|
+
|
|
81
|
+
Returns the workflow definition that can be executed later.
|
|
82
|
+
"""
|
|
83
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
84
|
+
|
|
85
|
+
workflow = QuantumWorkflow(backend=request.backend)
|
|
86
|
+
|
|
87
|
+
for step in request.steps:
|
|
88
|
+
workflow.add_step(
|
|
89
|
+
step_type=step.type,
|
|
90
|
+
params=step.params,
|
|
91
|
+
depends_on=step.depends_on,
|
|
92
|
+
step_id=step.id,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
return workflow.to_dict()
|
|
96
|
+
|
|
97
|
+
@router.post("/execute")
|
|
98
|
+
async def execute_workflow(request: ExecuteWorkflowModel) -> Dict[str, Any]:
|
|
99
|
+
"""
|
|
100
|
+
Execute a quantum workflow.
|
|
101
|
+
|
|
102
|
+
Can either:
|
|
103
|
+
- Execute a new workflow by providing steps
|
|
104
|
+
- Execute an existing workflow by providing workflow_id
|
|
105
|
+
|
|
106
|
+
Returns execution results for all steps.
|
|
107
|
+
"""
|
|
108
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
109
|
+
|
|
110
|
+
if not request.steps:
|
|
111
|
+
raise HTTPException(
|
|
112
|
+
status_code=400,
|
|
113
|
+
detail="Steps are required to execute a workflow"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
workflow = QuantumWorkflow(backend=request.backend)
|
|
117
|
+
|
|
118
|
+
for step in request.steps:
|
|
119
|
+
workflow.add_step(
|
|
120
|
+
step_type=step.type,
|
|
121
|
+
params=step.params,
|
|
122
|
+
depends_on=step.depends_on,
|
|
123
|
+
step_id=step.id,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
result = workflow.execute()
|
|
127
|
+
|
|
128
|
+
return {
|
|
129
|
+
"workflow_id": result.workflow_id,
|
|
130
|
+
"status": result.status,
|
|
131
|
+
"steps": result.steps,
|
|
132
|
+
"total_duration_ms": result.total_duration_ms,
|
|
133
|
+
"outputs": result.outputs,
|
|
134
|
+
"error": result.error,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
@router.post("/compress-and-teleport")
|
|
138
|
+
async def compress_and_teleport(
|
|
139
|
+
tokens: List[int],
|
|
140
|
+
recipient: Optional[str] = None,
|
|
141
|
+
backend: str = "simulator",
|
|
142
|
+
) -> Dict[str, Any]:
|
|
143
|
+
"""
|
|
144
|
+
Convenience endpoint that combines compression and teleportation.
|
|
145
|
+
|
|
146
|
+
This is a common workflow pattern for secure quantum messaging.
|
|
147
|
+
"""
|
|
148
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
149
|
+
|
|
150
|
+
workflow = QuantumWorkflow(backend=backend)
|
|
151
|
+
|
|
152
|
+
# Step 1: Compress tokens
|
|
153
|
+
workflow.add_step(
|
|
154
|
+
step_type="compress",
|
|
155
|
+
params={"tokens": tokens},
|
|
156
|
+
step_id="compression",
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Step 2: QKD key exchange
|
|
160
|
+
workflow.add_step(
|
|
161
|
+
step_type="qkd",
|
|
162
|
+
params={"key_length": 256},
|
|
163
|
+
step_id="key_exchange",
|
|
164
|
+
depends_on=["compression"],
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Step 3: Create Bell pairs for teleportation
|
|
168
|
+
workflow.add_step(
|
|
169
|
+
step_type="teleport",
|
|
170
|
+
params={"n_pairs": 10},
|
|
171
|
+
step_id="teleportation",
|
|
172
|
+
depends_on=["key_exchange"],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
result = workflow.execute()
|
|
176
|
+
|
|
177
|
+
return {
|
|
178
|
+
"workflow_id": result.workflow_id,
|
|
179
|
+
"status": result.status,
|
|
180
|
+
"compression": result.outputs.get("compression", {}),
|
|
181
|
+
"qkd": result.outputs.get("key_exchange", {}),
|
|
182
|
+
"teleportation": result.outputs.get("teleportation", {}),
|
|
183
|
+
"total_duration_ms": result.total_duration_ms,
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
@router.get("/templates")
|
|
187
|
+
async def get_workflow_templates() -> List[Dict[str, Any]]:
|
|
188
|
+
"""
|
|
189
|
+
Get available workflow templates.
|
|
190
|
+
|
|
191
|
+
Returns predefined workflow configurations for common use cases.
|
|
192
|
+
"""
|
|
193
|
+
return [
|
|
194
|
+
{
|
|
195
|
+
"name": "secure_messaging",
|
|
196
|
+
"description": "Compress tokens, exchange QKD key, and teleport",
|
|
197
|
+
"steps": [
|
|
198
|
+
{"type": "compress", "params": {"tokens": []}, "id": "compress"},
|
|
199
|
+
{"type": "qkd", "params": {"key_length": 256}, "id": "qkd", "depends_on": ["compress"]},
|
|
200
|
+
{"type": "teleport", "params": {"n_pairs": 10}, "id": "teleport", "depends_on": ["qkd"]},
|
|
201
|
+
],
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
"name": "quantum_ml_training",
|
|
205
|
+
"description": "Quantum backpropagation workflow",
|
|
206
|
+
"steps": [
|
|
207
|
+
{"type": "compress", "params": {"tokens": []}, "id": "compress_input"},
|
|
208
|
+
{"type": "backprop", "params": {}, "id": "gradient", "depends_on": ["compress_input"]},
|
|
209
|
+
],
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
"name": "optimization",
|
|
213
|
+
"description": "QAOA optimization workflow",
|
|
214
|
+
"steps": [
|
|
215
|
+
{"type": "qaoa", "params": {"p": 2}, "id": "optimize"},
|
|
216
|
+
],
|
|
217
|
+
},
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
return router
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# Export router for use with FastAPI
|
|
224
|
+
try:
|
|
225
|
+
router = create_workflow_routes()
|
|
226
|
+
except Exception:
|
|
227
|
+
router = None
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantum Workflow Orchestration Module.
|
|
3
|
+
|
|
4
|
+
Provides workflow orchestration for chaining quantum operations like:
|
|
5
|
+
- Token compression
|
|
6
|
+
- QKD key exchange
|
|
7
|
+
- Quantum teleportation
|
|
8
|
+
- Algorithm execution
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from quantumflow.core.workflow import QuantumWorkflow
|
|
12
|
+
|
|
13
|
+
workflow = QuantumWorkflow()
|
|
14
|
+
workflow.add_step("compress", tokens=[100, 200, 150])
|
|
15
|
+
workflow.add_step("qkd", key_length=256)
|
|
16
|
+
workflow.add_step("teleport", use_compression=True)
|
|
17
|
+
result = workflow.execute()
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from dataclasses import dataclass, field
|
|
21
|
+
from typing import Any, Dict, List, Optional
|
|
22
|
+
from enum import Enum
|
|
23
|
+
import time
|
|
24
|
+
import uuid
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class StepType(Enum):
|
|
28
|
+
"""Types of workflow steps."""
|
|
29
|
+
COMPRESS = "compress"
|
|
30
|
+
DECOMPRESS = "decompress"
|
|
31
|
+
QKD = "qkd"
|
|
32
|
+
TELEPORT = "teleport"
|
|
33
|
+
BACKPROP = "backprop"
|
|
34
|
+
QAOA = "qaoa"
|
|
35
|
+
VQE = "vqe"
|
|
36
|
+
CUSTOM = "custom"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class StepStatus(Enum):
|
|
40
|
+
"""Status of a workflow step."""
|
|
41
|
+
PENDING = "pending"
|
|
42
|
+
RUNNING = "running"
|
|
43
|
+
COMPLETED = "completed"
|
|
44
|
+
FAILED = "failed"
|
|
45
|
+
SKIPPED = "skipped"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class WorkflowStep:
|
|
50
|
+
"""A single step in a quantum workflow."""
|
|
51
|
+
id: str
|
|
52
|
+
step_type: StepType
|
|
53
|
+
params: Dict[str, Any]
|
|
54
|
+
status: StepStatus = StepStatus.PENDING
|
|
55
|
+
result: Optional[Dict[str, Any]] = None
|
|
56
|
+
error: Optional[str] = None
|
|
57
|
+
started_at: Optional[float] = None
|
|
58
|
+
completed_at: Optional[float] = None
|
|
59
|
+
depends_on: List[str] = field(default_factory=list)
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def duration_ms(self) -> Optional[float]:
|
|
63
|
+
"""Duration of step execution in milliseconds."""
|
|
64
|
+
if self.started_at and self.completed_at:
|
|
65
|
+
return (self.completed_at - self.started_at) * 1000
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclass
|
|
70
|
+
class WorkflowResult:
|
|
71
|
+
"""Result of workflow execution."""
|
|
72
|
+
workflow_id: str
|
|
73
|
+
status: str
|
|
74
|
+
steps: List[Dict[str, Any]]
|
|
75
|
+
total_duration_ms: float
|
|
76
|
+
outputs: Dict[str, Any]
|
|
77
|
+
error: Optional[str] = None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class QuantumWorkflow:
|
|
81
|
+
"""
|
|
82
|
+
Orchestrates quantum operations in a workflow.
|
|
83
|
+
|
|
84
|
+
Supports:
|
|
85
|
+
- Sequential step execution
|
|
86
|
+
- Dependency management between steps
|
|
87
|
+
- Error handling and recovery
|
|
88
|
+
- Step result passing to subsequent steps
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
def __init__(self, backend: str = "simulator"):
|
|
92
|
+
"""
|
|
93
|
+
Initialize a quantum workflow.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
backend: Quantum backend to use (simulator, ibm, braket)
|
|
97
|
+
"""
|
|
98
|
+
self.workflow_id = str(uuid.uuid4())
|
|
99
|
+
self.backend = backend
|
|
100
|
+
self.steps: List[WorkflowStep] = []
|
|
101
|
+
self._step_results: Dict[str, Any] = {}
|
|
102
|
+
|
|
103
|
+
def add_step(
|
|
104
|
+
self,
|
|
105
|
+
step_type: str,
|
|
106
|
+
params: Optional[Dict[str, Any]] = None,
|
|
107
|
+
depends_on: Optional[List[str]] = None,
|
|
108
|
+
step_id: Optional[str] = None,
|
|
109
|
+
) -> str:
|
|
110
|
+
"""
|
|
111
|
+
Add a step to the workflow.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
step_type: Type of step (compress, qkd, teleport, etc.)
|
|
115
|
+
params: Parameters for the step
|
|
116
|
+
depends_on: List of step IDs this step depends on
|
|
117
|
+
step_id: Optional custom step ID
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
The ID of the created step
|
|
121
|
+
"""
|
|
122
|
+
if step_id is None:
|
|
123
|
+
step_id = f"step_{len(self.steps) + 1}"
|
|
124
|
+
|
|
125
|
+
step = WorkflowStep(
|
|
126
|
+
id=step_id,
|
|
127
|
+
step_type=StepType(step_type),
|
|
128
|
+
params=params or {},
|
|
129
|
+
depends_on=depends_on or [],
|
|
130
|
+
)
|
|
131
|
+
self.steps.append(step)
|
|
132
|
+
return step_id
|
|
133
|
+
|
|
134
|
+
def _execute_step(self, step: WorkflowStep) -> Dict[str, Any]:
|
|
135
|
+
"""
|
|
136
|
+
Execute a single workflow step.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
step: The step to execute
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Result of the step execution
|
|
143
|
+
"""
|
|
144
|
+
from quantumflow.core.quantum_compressor import QuantumCompressor
|
|
145
|
+
from quantumflow.core.teleportation import QKDExchange, QuantumTeleporter
|
|
146
|
+
|
|
147
|
+
step.status = StepStatus.RUNNING
|
|
148
|
+
step.started_at = time.time()
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
result = {}
|
|
152
|
+
|
|
153
|
+
if step.step_type == StepType.COMPRESS:
|
|
154
|
+
compressor = QuantumCompressor(backend=self.backend)
|
|
155
|
+
tokens = step.params.get("tokens", [])
|
|
156
|
+
compressed = compressor.compress(tokens)
|
|
157
|
+
result = {
|
|
158
|
+
"amplitudes": compressed.amplitudes.tolist() if hasattr(compressed.amplitudes, 'tolist') else compressed.amplitudes,
|
|
159
|
+
"n_qubits": compressed.n_qubits,
|
|
160
|
+
"compression_percentage": compressed.compression_percentage,
|
|
161
|
+
"input_token_count": compressed.input_token_count,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
elif step.step_type == StepType.QKD:
|
|
165
|
+
qkd = QKDExchange(backend=self.backend)
|
|
166
|
+
key_length = step.params.get("key_length", 256)
|
|
167
|
+
qkd_result = qkd.exchange(key_length=key_length)
|
|
168
|
+
result = qkd_result
|
|
169
|
+
|
|
170
|
+
elif step.step_type == StepType.TELEPORT:
|
|
171
|
+
teleporter = QuantumTeleporter(backend=self.backend)
|
|
172
|
+
state = step.params.get("state")
|
|
173
|
+
if state:
|
|
174
|
+
teleport_result = teleporter.teleport_state(state)
|
|
175
|
+
result = {
|
|
176
|
+
"fidelity": teleport_result.fidelity,
|
|
177
|
+
"corrections_applied": teleport_result.corrections_applied,
|
|
178
|
+
}
|
|
179
|
+
else:
|
|
180
|
+
# Create Bell pairs
|
|
181
|
+
n_pairs = step.params.get("n_pairs", 10)
|
|
182
|
+
pairs = teleporter.create_bell_pairs(n_pairs)
|
|
183
|
+
result = {"bell_pairs_created": n_pairs}
|
|
184
|
+
|
|
185
|
+
elif step.step_type == StepType.BACKPROP:
|
|
186
|
+
from quantumflow.core.quantum_backprop import QuantumBackprop
|
|
187
|
+
backprop = QuantumBackprop(backend=self.backend)
|
|
188
|
+
bp_result = backprop.compute_gradient(
|
|
189
|
+
input_state=step.params.get("input_state", [0.5, 0.5]),
|
|
190
|
+
target_state=step.params.get("target_state", [0.8, 0.2]),
|
|
191
|
+
weights=step.params.get("weights", [0.3, 0.7]),
|
|
192
|
+
)
|
|
193
|
+
result = {
|
|
194
|
+
"gradients": bp_result.gradients,
|
|
195
|
+
"similarity": bp_result.similarity,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
elif step.step_type == StepType.QAOA:
|
|
199
|
+
from quantumflow.algorithms.optimization.qaoa import QuantumQAOA
|
|
200
|
+
qaoa = QuantumQAOA(backend=self.backend)
|
|
201
|
+
qaoa_result = qaoa.optimize(
|
|
202
|
+
problem=step.params.get("problem", {}),
|
|
203
|
+
p=step.params.get("p", 2),
|
|
204
|
+
)
|
|
205
|
+
result = qaoa_result
|
|
206
|
+
|
|
207
|
+
elif step.step_type == StepType.VQE:
|
|
208
|
+
from quantumflow.algorithms.machine_learning.vqe import QuantumVQE
|
|
209
|
+
vqe = QuantumVQE(backend=self.backend)
|
|
210
|
+
vqe_result = vqe.find_ground_state(
|
|
211
|
+
hamiltonian=step.params.get("hamiltonian", {}),
|
|
212
|
+
)
|
|
213
|
+
result = vqe_result
|
|
214
|
+
|
|
215
|
+
else:
|
|
216
|
+
# Custom step - just pass through params
|
|
217
|
+
result = step.params
|
|
218
|
+
|
|
219
|
+
step.status = StepStatus.COMPLETED
|
|
220
|
+
step.result = result
|
|
221
|
+
step.completed_at = time.time()
|
|
222
|
+
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
step.status = StepStatus.FAILED
|
|
227
|
+
step.error = str(e)
|
|
228
|
+
step.completed_at = time.time()
|
|
229
|
+
raise
|
|
230
|
+
|
|
231
|
+
def execute(self) -> WorkflowResult:
|
|
232
|
+
"""
|
|
233
|
+
Execute all steps in the workflow.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
WorkflowResult with all step results
|
|
237
|
+
"""
|
|
238
|
+
start_time = time.time()
|
|
239
|
+
outputs = {}
|
|
240
|
+
error = None
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
for step in self.steps:
|
|
244
|
+
# Check dependencies
|
|
245
|
+
for dep_id in step.depends_on:
|
|
246
|
+
dep_step = next((s for s in self.steps if s.id == dep_id), None)
|
|
247
|
+
if dep_step and dep_step.status != StepStatus.COMPLETED:
|
|
248
|
+
step.status = StepStatus.SKIPPED
|
|
249
|
+
step.error = f"Dependency {dep_id} not completed"
|
|
250
|
+
continue
|
|
251
|
+
|
|
252
|
+
# Execute the step
|
|
253
|
+
result = self._execute_step(step)
|
|
254
|
+
self._step_results[step.id] = result
|
|
255
|
+
outputs[step.id] = result
|
|
256
|
+
|
|
257
|
+
except Exception as e:
|
|
258
|
+
error = str(e)
|
|
259
|
+
|
|
260
|
+
end_time = time.time()
|
|
261
|
+
|
|
262
|
+
return WorkflowResult(
|
|
263
|
+
workflow_id=self.workflow_id,
|
|
264
|
+
status="completed" if error is None else "failed",
|
|
265
|
+
steps=[
|
|
266
|
+
{
|
|
267
|
+
"id": s.id,
|
|
268
|
+
"type": s.step_type.value,
|
|
269
|
+
"status": s.status.value,
|
|
270
|
+
"result": s.result,
|
|
271
|
+
"error": s.error,
|
|
272
|
+
"duration_ms": s.duration_ms,
|
|
273
|
+
}
|
|
274
|
+
for s in self.steps
|
|
275
|
+
],
|
|
276
|
+
total_duration_ms=(end_time - start_time) * 1000,
|
|
277
|
+
outputs=outputs,
|
|
278
|
+
error=error,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
282
|
+
"""Convert workflow to dictionary representation."""
|
|
283
|
+
return {
|
|
284
|
+
"workflow_id": self.workflow_id,
|
|
285
|
+
"backend": self.backend,
|
|
286
|
+
"steps": [
|
|
287
|
+
{
|
|
288
|
+
"id": s.id,
|
|
289
|
+
"type": s.step_type.value,
|
|
290
|
+
"params": s.params,
|
|
291
|
+
"depends_on": s.depends_on,
|
|
292
|
+
}
|
|
293
|
+
for s in self.steps
|
|
294
|
+
],
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
@classmethod
|
|
298
|
+
def from_dict(cls, data: Dict[str, Any]) -> "QuantumWorkflow":
|
|
299
|
+
"""Create workflow from dictionary representation."""
|
|
300
|
+
workflow = cls(backend=data.get("backend", "simulator"))
|
|
301
|
+
workflow.workflow_id = data.get("workflow_id", workflow.workflow_id)
|
|
302
|
+
|
|
303
|
+
for step_data in data.get("steps", []):
|
|
304
|
+
workflow.add_step(
|
|
305
|
+
step_type=step_data["type"],
|
|
306
|
+
params=step_data.get("params", {}),
|
|
307
|
+
depends_on=step_data.get("depends_on", []),
|
|
308
|
+
step_id=step_data.get("id"),
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
return workflow
|
|
@@ -2,12 +2,21 @@
|
|
|
2
2
|
QuantumFlow Integrations.
|
|
3
3
|
|
|
4
4
|
Provides integration with popular AI agent frameworks:
|
|
5
|
+
- OpenAI: Function calling for GPT-4/GPT-3.5-turbo
|
|
5
6
|
- LangChain: Tools, Memory, and Chains
|
|
6
7
|
- CrewAI: Tools and Agents
|
|
7
8
|
- AutoGen: Agents and Function Tools
|
|
8
9
|
- MCP: Model Context Protocol Server
|
|
9
10
|
"""
|
|
10
11
|
|
|
12
|
+
# OpenAI Integration
|
|
13
|
+
from quantumflow.integrations.openai_functions import (
|
|
14
|
+
get_quantum_functions as get_openai_functions,
|
|
15
|
+
get_quantum_tools as get_openai_tools,
|
|
16
|
+
execute_quantum_function,
|
|
17
|
+
QuantumAssistant,
|
|
18
|
+
)
|
|
19
|
+
|
|
11
20
|
# LangChain Integration
|
|
12
21
|
from quantumflow.integrations.langchain_tools import (
|
|
13
22
|
QuantumCompressTool,
|
|
@@ -64,6 +73,11 @@ from quantumflow.integrations.mcp_server import (
|
|
|
64
73
|
)
|
|
65
74
|
|
|
66
75
|
__all__ = [
|
|
76
|
+
# OpenAI Functions
|
|
77
|
+
"get_openai_functions",
|
|
78
|
+
"get_openai_tools",
|
|
79
|
+
"execute_quantum_function",
|
|
80
|
+
"QuantumAssistant",
|
|
67
81
|
# LangChain Tools
|
|
68
82
|
"QuantumCompressTool",
|
|
69
83
|
"QuantumGradientTool",
|
|
@@ -0,0 +1,578 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI Function Calling Integration for QuantumFlow.
|
|
3
|
+
|
|
4
|
+
Provides function definitions and execution handlers compatible with
|
|
5
|
+
OpenAI's function calling API (GPT-4, GPT-3.5-turbo).
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from openai import OpenAI
|
|
9
|
+
from quantumflow.integrations.openai_functions import (
|
|
10
|
+
get_quantum_functions,
|
|
11
|
+
execute_quantum_function,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
client = OpenAI()
|
|
15
|
+
functions = get_quantum_functions()
|
|
16
|
+
|
|
17
|
+
response = client.chat.completions.create(
|
|
18
|
+
model="gpt-4",
|
|
19
|
+
messages=[{"role": "user", "content": "Compress these tokens: 100, 200, 150"}],
|
|
20
|
+
functions=functions,
|
|
21
|
+
function_call="auto",
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Execute the function call
|
|
25
|
+
if response.choices[0].message.function_call:
|
|
26
|
+
result = execute_quantum_function(
|
|
27
|
+
response.choices[0].message.function_call.name,
|
|
28
|
+
json.loads(response.choices[0].message.function_call.arguments)
|
|
29
|
+
)
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
import json
|
|
33
|
+
from typing import Any, Dict, List, Optional, Callable
|
|
34
|
+
from dataclasses import dataclass
|
|
35
|
+
|
|
36
|
+
from quantumflow.core.quantum_compressor import QuantumCompressor
|
|
37
|
+
from quantumflow.core.quantum_backprop import QuantumBackprop
|
|
38
|
+
from quantumflow.core.teleportation import QuantumTeleporter, QKDExchange, SecureMessenger
|
|
39
|
+
from quantumflow.core.entanglement import Entangler
|
|
40
|
+
from quantumflow.core.memory import QuantumMemory
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# OpenAI Function Definitions
|
|
44
|
+
QUANTUM_FUNCTIONS: List[Dict[str, Any]] = [
|
|
45
|
+
{
|
|
46
|
+
"name": "quantum_compress",
|
|
47
|
+
"description": "Compress a list of token values using quantum amplitude encoding. Achieves ~53% compression while preserving semantic information.",
|
|
48
|
+
"parameters": {
|
|
49
|
+
"type": "object",
|
|
50
|
+
"properties": {
|
|
51
|
+
"tokens": {
|
|
52
|
+
"type": "array",
|
|
53
|
+
"items": {"type": "number"},
|
|
54
|
+
"description": "List of token values (integers or floats) to compress",
|
|
55
|
+
},
|
|
56
|
+
"backend": {
|
|
57
|
+
"type": "string",
|
|
58
|
+
"enum": ["simulator", "ibm", "aws"],
|
|
59
|
+
"description": "Quantum backend to use. Default: simulator",
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
"required": ["tokens"],
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
"name": "quantum_decompress",
|
|
67
|
+
"description": "Decompress previously quantum-compressed data back to original tokens.",
|
|
68
|
+
"parameters": {
|
|
69
|
+
"type": "object",
|
|
70
|
+
"properties": {
|
|
71
|
+
"compressed_state": {
|
|
72
|
+
"type": "array",
|
|
73
|
+
"items": {"type": "number"},
|
|
74
|
+
"description": "The compressed quantum state amplitudes",
|
|
75
|
+
},
|
|
76
|
+
"n_qubits": {
|
|
77
|
+
"type": "integer",
|
|
78
|
+
"description": "Number of qubits used in compression",
|
|
79
|
+
},
|
|
80
|
+
"original_length": {
|
|
81
|
+
"type": "integer",
|
|
82
|
+
"description": "Original number of tokens",
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
"required": ["compressed_state", "n_qubits", "original_length"],
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
"name": "quantum_gradient",
|
|
90
|
+
"description": "Compute gradients using quantum backpropagation via teleportation protocol. Achieves 97.78% gradient similarity with classical methods.",
|
|
91
|
+
"parameters": {
|
|
92
|
+
"type": "object",
|
|
93
|
+
"properties": {
|
|
94
|
+
"weights": {
|
|
95
|
+
"type": "array",
|
|
96
|
+
"items": {"type": "number"},
|
|
97
|
+
"description": "Neural network weights to compute gradients for",
|
|
98
|
+
},
|
|
99
|
+
"loss_value": {
|
|
100
|
+
"type": "number",
|
|
101
|
+
"description": "Current loss value",
|
|
102
|
+
},
|
|
103
|
+
"learning_rate": {
|
|
104
|
+
"type": "number",
|
|
105
|
+
"description": "Learning rate for gradient computation. Default: 0.01",
|
|
106
|
+
},
|
|
107
|
+
},
|
|
108
|
+
"required": ["weights", "loss_value"],
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
"name": "quantum_entangle",
|
|
113
|
+
"description": "Create quantum entanglement between data points for correlation analysis.",
|
|
114
|
+
"parameters": {
|
|
115
|
+
"type": "object",
|
|
116
|
+
"properties": {
|
|
117
|
+
"data_a": {
|
|
118
|
+
"type": "array",
|
|
119
|
+
"items": {"type": "number"},
|
|
120
|
+
"description": "First data array",
|
|
121
|
+
},
|
|
122
|
+
"data_b": {
|
|
123
|
+
"type": "array",
|
|
124
|
+
"items": {"type": "number"},
|
|
125
|
+
"description": "Second data array",
|
|
126
|
+
},
|
|
127
|
+
},
|
|
128
|
+
"required": ["data_a", "data_b"],
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
"name": "quantum_teleport",
|
|
133
|
+
"description": "Teleport quantum state information using entangled Bell pairs.",
|
|
134
|
+
"parameters": {
|
|
135
|
+
"type": "object",
|
|
136
|
+
"properties": {
|
|
137
|
+
"state": {
|
|
138
|
+
"type": "array",
|
|
139
|
+
"items": {"type": "number"},
|
|
140
|
+
"description": "Quantum state amplitudes to teleport",
|
|
141
|
+
},
|
|
142
|
+
"n_pairs": {
|
|
143
|
+
"type": "integer",
|
|
144
|
+
"description": "Number of Bell pairs to use. Default: 10",
|
|
145
|
+
},
|
|
146
|
+
},
|
|
147
|
+
"required": ["state"],
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
{
|
|
151
|
+
"name": "qkd_exchange",
|
|
152
|
+
"description": "Perform BB84 quantum key distribution for unconditionally secure key exchange.",
|
|
153
|
+
"parameters": {
|
|
154
|
+
"type": "object",
|
|
155
|
+
"properties": {
|
|
156
|
+
"key_length": {
|
|
157
|
+
"type": "integer",
|
|
158
|
+
"description": "Desired key length in bits. Default: 256",
|
|
159
|
+
},
|
|
160
|
+
"error_threshold": {
|
|
161
|
+
"type": "number",
|
|
162
|
+
"description": "Maximum acceptable error rate. Default: 0.11",
|
|
163
|
+
},
|
|
164
|
+
},
|
|
165
|
+
"required": [],
|
|
166
|
+
},
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
"name": "secure_message",
|
|
170
|
+
"description": "Send a message using quantum-secure encryption with QKD-generated keys.",
|
|
171
|
+
"parameters": {
|
|
172
|
+
"type": "object",
|
|
173
|
+
"properties": {
|
|
174
|
+
"message": {
|
|
175
|
+
"type": "string",
|
|
176
|
+
"description": "The message to encrypt and send",
|
|
177
|
+
},
|
|
178
|
+
"key_length": {
|
|
179
|
+
"type": "integer",
|
|
180
|
+
"description": "Key length for encryption. Default: 256",
|
|
181
|
+
},
|
|
182
|
+
},
|
|
183
|
+
"required": ["message"],
|
|
184
|
+
},
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
"name": "quantum_memory_store",
|
|
188
|
+
"description": "Store data in quantum memory with O(log n) space complexity.",
|
|
189
|
+
"parameters": {
|
|
190
|
+
"type": "object",
|
|
191
|
+
"properties": {
|
|
192
|
+
"key": {
|
|
193
|
+
"type": "string",
|
|
194
|
+
"description": "Storage key identifier",
|
|
195
|
+
},
|
|
196
|
+
"data": {
|
|
197
|
+
"type": "array",
|
|
198
|
+
"items": {"type": "number"},
|
|
199
|
+
"description": "Data to store",
|
|
200
|
+
},
|
|
201
|
+
},
|
|
202
|
+
"required": ["key", "data"],
|
|
203
|
+
},
|
|
204
|
+
},
|
|
205
|
+
{
|
|
206
|
+
"name": "quantum_memory_retrieve",
|
|
207
|
+
"description": "Retrieve data from quantum memory.",
|
|
208
|
+
"parameters": {
|
|
209
|
+
"type": "object",
|
|
210
|
+
"properties": {
|
|
211
|
+
"key": {
|
|
212
|
+
"type": "string",
|
|
213
|
+
"description": "Storage key identifier",
|
|
214
|
+
},
|
|
215
|
+
},
|
|
216
|
+
"required": ["key"],
|
|
217
|
+
},
|
|
218
|
+
},
|
|
219
|
+
]
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
# Singleton instances for stateful operations
|
|
223
|
+
_compressor: Optional[QuantumCompressor] = None
|
|
224
|
+
_backprop: Optional[QuantumBackprop] = None
|
|
225
|
+
_teleporter: Optional[QuantumTeleporter] = None
|
|
226
|
+
_qkd: Optional[QKDExchange] = None
|
|
227
|
+
_messenger: Optional[SecureMessenger] = None
|
|
228
|
+
_entangler: Optional[Entangler] = None
|
|
229
|
+
_memory: Optional[QuantumMemory] = None
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _get_compressor(backend: str = "simulator") -> QuantumCompressor:
|
|
233
|
+
global _compressor
|
|
234
|
+
if _compressor is None:
|
|
235
|
+
_compressor = QuantumCompressor(backend=backend)
|
|
236
|
+
return _compressor
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def _get_backprop() -> QuantumBackprop:
|
|
240
|
+
global _backprop
|
|
241
|
+
if _backprop is None:
|
|
242
|
+
_backprop = QuantumBackprop()
|
|
243
|
+
return _backprop
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _get_teleporter() -> QuantumTeleporter:
|
|
247
|
+
global _teleporter
|
|
248
|
+
if _teleporter is None:
|
|
249
|
+
_teleporter = QuantumTeleporter()
|
|
250
|
+
return _teleporter
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _get_qkd() -> QKDExchange:
|
|
254
|
+
global _qkd
|
|
255
|
+
if _qkd is None:
|
|
256
|
+
_qkd = QKDExchange()
|
|
257
|
+
return _qkd
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _get_messenger() -> SecureMessenger:
|
|
261
|
+
global _messenger
|
|
262
|
+
if _messenger is None:
|
|
263
|
+
_messenger = SecureMessenger()
|
|
264
|
+
return _messenger
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _get_entangler() -> Entangler:
|
|
268
|
+
global _entangler
|
|
269
|
+
if _entangler is None:
|
|
270
|
+
_entangler = Entangler()
|
|
271
|
+
return _entangler
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _get_memory() -> QuantumMemory:
|
|
275
|
+
global _memory
|
|
276
|
+
if _memory is None:
|
|
277
|
+
_memory = QuantumMemory()
|
|
278
|
+
return _memory
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def get_quantum_functions() -> List[Dict[str, Any]]:
|
|
282
|
+
"""
|
|
283
|
+
Get the list of quantum function definitions for OpenAI's function calling API.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
List of function definitions compatible with OpenAI's API.
|
|
287
|
+
|
|
288
|
+
Example:
|
|
289
|
+
from openai import OpenAI
|
|
290
|
+
from quantumflow.integrations.openai_functions import get_quantum_functions
|
|
291
|
+
|
|
292
|
+
client = OpenAI()
|
|
293
|
+
response = client.chat.completions.create(
|
|
294
|
+
model="gpt-4",
|
|
295
|
+
messages=[...],
|
|
296
|
+
functions=get_quantum_functions(),
|
|
297
|
+
function_call="auto",
|
|
298
|
+
)
|
|
299
|
+
"""
|
|
300
|
+
return QUANTUM_FUNCTIONS.copy()
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def get_quantum_tools() -> List[Dict[str, Any]]:
|
|
304
|
+
"""
|
|
305
|
+
Get the list of quantum tools for OpenAI's tools API (newer format).
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
List of tool definitions compatible with OpenAI's tools API.
|
|
309
|
+
|
|
310
|
+
Example:
|
|
311
|
+
from openai import OpenAI
|
|
312
|
+
from quantumflow.integrations.openai_functions import get_quantum_tools
|
|
313
|
+
|
|
314
|
+
client = OpenAI()
|
|
315
|
+
response = client.chat.completions.create(
|
|
316
|
+
model="gpt-4-turbo",
|
|
317
|
+
messages=[...],
|
|
318
|
+
tools=get_quantum_tools(),
|
|
319
|
+
tool_choice="auto",
|
|
320
|
+
)
|
|
321
|
+
"""
|
|
322
|
+
return [
|
|
323
|
+
{"type": "function", "function": func}
|
|
324
|
+
for func in QUANTUM_FUNCTIONS
|
|
325
|
+
]
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def execute_quantum_function(name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
|
329
|
+
"""
|
|
330
|
+
Execute a quantum function by name with the given arguments.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
name: The function name to execute.
|
|
334
|
+
arguments: Dictionary of function arguments.
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Dictionary containing the function result.
|
|
338
|
+
|
|
339
|
+
Raises:
|
|
340
|
+
ValueError: If the function name is not recognized.
|
|
341
|
+
|
|
342
|
+
Example:
|
|
343
|
+
result = execute_quantum_function(
|
|
344
|
+
"quantum_compress",
|
|
345
|
+
{"tokens": [100, 200, 150, 175]}
|
|
346
|
+
)
|
|
347
|
+
"""
|
|
348
|
+
handlers: Dict[str, Callable] = {
|
|
349
|
+
"quantum_compress": _handle_compress,
|
|
350
|
+
"quantum_decompress": _handle_decompress,
|
|
351
|
+
"quantum_gradient": _handle_gradient,
|
|
352
|
+
"quantum_entangle": _handle_entangle,
|
|
353
|
+
"quantum_teleport": _handle_teleport,
|
|
354
|
+
"qkd_exchange": _handle_qkd,
|
|
355
|
+
"secure_message": _handle_secure_message,
|
|
356
|
+
"quantum_memory_store": _handle_memory_store,
|
|
357
|
+
"quantum_memory_retrieve": _handle_memory_retrieve,
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
if name not in handlers:
|
|
361
|
+
raise ValueError(f"Unknown quantum function: {name}")
|
|
362
|
+
|
|
363
|
+
return handlers[name](arguments)
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def _handle_compress(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
367
|
+
tokens = args["tokens"]
|
|
368
|
+
backend = args.get("backend", "simulator")
|
|
369
|
+
compressor = _get_compressor(backend)
|
|
370
|
+
result = compressor.compress(tokens)
|
|
371
|
+
return {
|
|
372
|
+
"success": True,
|
|
373
|
+
"compressed_state": result.amplitudes.tolist() if hasattr(result.amplitudes, 'tolist') else list(result.amplitudes),
|
|
374
|
+
"n_qubits": result.n_qubits,
|
|
375
|
+
"original_length": len(tokens),
|
|
376
|
+
"compression_percentage": result.compression_percentage,
|
|
377
|
+
"fidelity": result.fidelity,
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def _handle_decompress(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
382
|
+
compressor = _get_compressor()
|
|
383
|
+
result = compressor.decompress(
|
|
384
|
+
compressed_state=args["compressed_state"],
|
|
385
|
+
n_qubits=args["n_qubits"],
|
|
386
|
+
original_length=args["original_length"],
|
|
387
|
+
)
|
|
388
|
+
return {
|
|
389
|
+
"success": True,
|
|
390
|
+
"tokens": result.tolist() if hasattr(result, 'tolist') else list(result),
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def _handle_gradient(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
395
|
+
backprop = _get_backprop()
|
|
396
|
+
weights = args["weights"]
|
|
397
|
+
loss = args["loss_value"]
|
|
398
|
+
lr = args.get("learning_rate", 0.01)
|
|
399
|
+
result = backprop.compute_gradients(weights, loss, learning_rate=lr)
|
|
400
|
+
return {
|
|
401
|
+
"success": True,
|
|
402
|
+
"gradients": result.gradients.tolist() if hasattr(result.gradients, 'tolist') else list(result.gradients),
|
|
403
|
+
"similarity": result.similarity,
|
|
404
|
+
"teleportation_fidelity": result.fidelity,
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _handle_entangle(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
409
|
+
entangler = _get_entangler()
|
|
410
|
+
result = entangler.entangle(args["data_a"], args["data_b"])
|
|
411
|
+
return {
|
|
412
|
+
"success": True,
|
|
413
|
+
"correlation": result.correlation,
|
|
414
|
+
"bell_state": result.bell_state,
|
|
415
|
+
"fidelity": result.fidelity,
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def _handle_teleport(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
420
|
+
teleporter = _get_teleporter()
|
|
421
|
+
n_pairs = args.get("n_pairs", 10)
|
|
422
|
+
result = teleporter.teleport(args["state"], n_pairs=n_pairs)
|
|
423
|
+
return {
|
|
424
|
+
"success": True,
|
|
425
|
+
"teleported_state": result.state.tolist() if hasattr(result.state, 'tolist') else list(result.state),
|
|
426
|
+
"fidelity": result.fidelity,
|
|
427
|
+
"bell_pairs_used": n_pairs,
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def _handle_qkd(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
432
|
+
qkd = _get_qkd()
|
|
433
|
+
key_length = args.get("key_length", 256)
|
|
434
|
+
error_threshold = args.get("error_threshold", 0.11)
|
|
435
|
+
result = qkd.exchange(key_length=key_length, error_threshold=error_threshold)
|
|
436
|
+
return {
|
|
437
|
+
"success": True,
|
|
438
|
+
"key": result.key,
|
|
439
|
+
"key_length": len(result.key),
|
|
440
|
+
"error_rate": result.error_rate,
|
|
441
|
+
"secure": result.secure,
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
def _handle_secure_message(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
446
|
+
messenger = _get_messenger()
|
|
447
|
+
message = args["message"]
|
|
448
|
+
key_length = args.get("key_length", 256)
|
|
449
|
+
result = messenger.send_message(message, key_length=key_length)
|
|
450
|
+
return {
|
|
451
|
+
"success": True,
|
|
452
|
+
"encrypted": result.encrypted,
|
|
453
|
+
"message_hash": result.message_hash,
|
|
454
|
+
"key_id": result.key_id,
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def _handle_memory_store(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
459
|
+
memory = _get_memory()
|
|
460
|
+
key = args["key"]
|
|
461
|
+
data = args["data"]
|
|
462
|
+
memory.store(key, data)
|
|
463
|
+
return {
|
|
464
|
+
"success": True,
|
|
465
|
+
"key": key,
|
|
466
|
+
"stored_length": len(data),
|
|
467
|
+
"quantum_bits_used": memory.get_usage(key),
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
def _handle_memory_retrieve(args: Dict[str, Any]) -> Dict[str, Any]:
|
|
472
|
+
memory = _get_memory()
|
|
473
|
+
key = args["key"]
|
|
474
|
+
data = memory.retrieve(key)
|
|
475
|
+
if data is None:
|
|
476
|
+
return {
|
|
477
|
+
"success": False,
|
|
478
|
+
"error": f"Key '{key}' not found in quantum memory",
|
|
479
|
+
}
|
|
480
|
+
return {
|
|
481
|
+
"success": True,
|
|
482
|
+
"key": key,
|
|
483
|
+
"data": data.tolist() if hasattr(data, 'tolist') else list(data),
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
@dataclass
|
|
488
|
+
class QuantumAssistant:
|
|
489
|
+
"""
|
|
490
|
+
A helper class for integrating QuantumFlow with OpenAI's chat completions.
|
|
491
|
+
|
|
492
|
+
Example:
|
|
493
|
+
from openai import OpenAI
|
|
494
|
+
from quantumflow.integrations.openai_functions import QuantumAssistant
|
|
495
|
+
|
|
496
|
+
client = OpenAI()
|
|
497
|
+
assistant = QuantumAssistant(client)
|
|
498
|
+
|
|
499
|
+
# Chat with quantum capabilities
|
|
500
|
+
response = assistant.chat("Compress these tokens: 100, 200, 150, 175")
|
|
501
|
+
print(response)
|
|
502
|
+
"""
|
|
503
|
+
|
|
504
|
+
client: Any # OpenAI client
|
|
505
|
+
model: str = "gpt-4"
|
|
506
|
+
messages: List[Dict[str, str]] = None
|
|
507
|
+
|
|
508
|
+
def __post_init__(self):
|
|
509
|
+
if self.messages is None:
|
|
510
|
+
self.messages = [
|
|
511
|
+
{
|
|
512
|
+
"role": "system",
|
|
513
|
+
"content": (
|
|
514
|
+
"You are a quantum computing assistant with access to QuantumFlow tools. "
|
|
515
|
+
"You can compress tokens, compute quantum gradients, perform quantum key distribution, "
|
|
516
|
+
"send secure messages, and manage quantum memory. Use these tools to help users "
|
|
517
|
+
"leverage quantum computing capabilities."
|
|
518
|
+
),
|
|
519
|
+
}
|
|
520
|
+
]
|
|
521
|
+
|
|
522
|
+
def chat(self, user_message: str) -> str:
|
|
523
|
+
"""
|
|
524
|
+
Send a message and get a response, automatically handling function calls.
|
|
525
|
+
|
|
526
|
+
Args:
|
|
527
|
+
user_message: The user's message.
|
|
528
|
+
|
|
529
|
+
Returns:
|
|
530
|
+
The assistant's response text.
|
|
531
|
+
"""
|
|
532
|
+
self.messages.append({"role": "user", "content": user_message})
|
|
533
|
+
|
|
534
|
+
response = self.client.chat.completions.create(
|
|
535
|
+
model=self.model,
|
|
536
|
+
messages=self.messages,
|
|
537
|
+
tools=get_quantum_tools(),
|
|
538
|
+
tool_choice="auto",
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
message = response.choices[0].message
|
|
542
|
+
|
|
543
|
+
# Handle tool calls
|
|
544
|
+
while message.tool_calls:
|
|
545
|
+
self.messages.append(message)
|
|
546
|
+
|
|
547
|
+
for tool_call in message.tool_calls:
|
|
548
|
+
function_name = tool_call.function.name
|
|
549
|
+
arguments = json.loads(tool_call.function.arguments)
|
|
550
|
+
|
|
551
|
+
try:
|
|
552
|
+
result = execute_quantum_function(function_name, arguments)
|
|
553
|
+
tool_response = json.dumps(result)
|
|
554
|
+
except Exception as e:
|
|
555
|
+
tool_response = json.dumps({"error": str(e)})
|
|
556
|
+
|
|
557
|
+
self.messages.append({
|
|
558
|
+
"role": "tool",
|
|
559
|
+
"tool_call_id": tool_call.id,
|
|
560
|
+
"content": tool_response,
|
|
561
|
+
})
|
|
562
|
+
|
|
563
|
+
# Get next response
|
|
564
|
+
response = self.client.chat.completions.create(
|
|
565
|
+
model=self.model,
|
|
566
|
+
messages=self.messages,
|
|
567
|
+
tools=get_quantum_tools(),
|
|
568
|
+
tool_choice="auto",
|
|
569
|
+
)
|
|
570
|
+
message = response.choices[0].message
|
|
571
|
+
|
|
572
|
+
# Final response
|
|
573
|
+
self.messages.append({"role": "assistant", "content": message.content})
|
|
574
|
+
return message.content
|
|
575
|
+
|
|
576
|
+
def reset(self):
|
|
577
|
+
"""Reset the conversation history."""
|
|
578
|
+
self.__post_init__()
|
|
@@ -9,7 +9,7 @@ db/__init__.py,sha256=CLyI_3_AP7wQATuxLV2yHPvYDNikJYmH5BMQ3Oez1xw,280
|
|
|
9
9
|
db/crud.py,sha256=q_HpP0RzsI7Lz1EMhEWzaMxdxzGAdVTBD2LALqbGLGQ,8175
|
|
10
10
|
db/database.py,sha256=Md_e3z1t6NzKCCVdS4gRo0Pf6LNZvRClIX4QU8F8InI,2078
|
|
11
11
|
db/models.py,sha256=W-NLqT6kGtXTN3r4DqVPSPglt-_iFDBlqB6e3nzflrg,5975
|
|
12
|
-
quantumflow/__init__.py,sha256=
|
|
12
|
+
quantumflow/__init__.py,sha256=lUdk3uElyzBJe_VRjAjbehIULFDcbmvFePieaayWIwk,1731
|
|
13
13
|
quantumflow/algorithms/__init__.py,sha256=waXASb2jnbAcjV-xS1wx-bGmPJ5lCj111dJ14eB8KCo,916
|
|
14
14
|
quantumflow/algorithms/compression/__init__.py,sha256=rejDCdZJEy1tamQdDaZodGbo8wA1rszlXEtwvYgZO7A,361
|
|
15
15
|
quantumflow/algorithms/compression/amplitude_amplification.py,sha256=pebJiATIhwEdnAZY4CwInYPEKbtZufJ0Ka3OCdtvZRk,5440
|
|
@@ -30,8 +30,9 @@ quantumflow/algorithms/utility/__init__.py,sha256=O-pY9GQ3i5DspBg-pLXFKz2QpLobL-
|
|
|
30
30
|
quantumflow/algorithms/utility/circuit_optimizer.py,sha256=PsvSZA6teiO4soktMW2hiVKrTEieqw5ccKYjtV4T63I,5839
|
|
31
31
|
quantumflow/algorithms/utility/error_correction.py,sha256=1L4ikcgasFwKYFSCxkx9Bk-Jz9En1KqZI-uGUScjsFw,9398
|
|
32
32
|
quantumflow/api/__init__.py,sha256=WxcaYzyvrvOdWFlJn2AchfEm9Jy8ytFD9f0POaAF-f8,25
|
|
33
|
-
quantumflow/api/routes/__init__.py,sha256=
|
|
33
|
+
quantumflow/api/routes/__init__.py,sha256=qth1cwVYo7-9a1s44XjuusdHUoWHimgXjDkFW1sjCx8,226
|
|
34
34
|
quantumflow/api/routes/billing_routes.py,sha256=MDvwkbVaXZhFl4_j5pgJ72_049a14u3fgAXxh314OdQ,16955
|
|
35
|
+
quantumflow/api/routes/workflow_routes.py,sha256=4aiaOXPRq4MTQdbp2nHKEgcsoB6cugxKIuCKV1nDmog,6875
|
|
35
36
|
quantumflow/backends/__init__.py,sha256=Q2Oi3VMp-AMXYN3QffsYd9NFz-qOaxgK_iSVCpVIivI,829
|
|
36
37
|
quantumflow/backends/base_backend.py,sha256=cvvFcInPtzmzp1-uF796ciW3GpXgzaj4NJI2u_zsbL4,5408
|
|
37
38
|
quantumflow/backends/braket_backend.py,sha256=vpWH0w6sQa_56SEBk_kvomKsuOJlb1ILANS-jUGcDv4,11527
|
|
@@ -46,15 +47,17 @@ quantumflow/core/memory.py,sha256=0rB2Dks3XM1NEiL-v4G0P6jeL2BvcdJNljezRrVccdI,44
|
|
|
46
47
|
quantumflow/core/quantum_backprop.py,sha256=XozlPtwinegI5NosxlW4TrpP5Zh58_yI8WnxznwBHlU,12370
|
|
47
48
|
quantumflow/core/quantum_compressor.py,sha256=okzfF4-1uL22FcK9Xhng1DLzmIbz3ADN-R1xzFTv-tI,10094
|
|
48
49
|
quantumflow/core/teleportation.py,sha256=_T5rRItjFramqWdqBNLwu_fY7Yr9pmdUda4L7STcwcA,12420
|
|
49
|
-
quantumflow/
|
|
50
|
+
quantumflow/core/workflow.py,sha256=HCazmlPns68TvIJytvcVoy0LSHb9sO-pUcDN5wPR2Zw,10116
|
|
51
|
+
quantumflow/integrations/__init__.py,sha256=ksQr0HHYMwTBY0_MgyT5MUBX9fdRn_QErDzHs8rQ7F8,2979
|
|
50
52
|
quantumflow/integrations/autogen_tools.py,sha256=sTTX7mGFjUKbmVatlcY9N9F2NeR6vkIj-VYduE0SNus,14672
|
|
51
53
|
quantumflow/integrations/crewai_agents.py,sha256=t62hukL0xg2FV54yczTAakNYQA-AOZ9AWWgzDnH0LGM,12604
|
|
52
54
|
quantumflow/integrations/crewai_tools.py,sha256=bY5uJyKmCegt6Kb9hvErhvaKcCDlk2_injx50-krN7E,13744
|
|
53
55
|
quantumflow/integrations/langchain_memory.py,sha256=wgYTdovncZNWpFwcNZjhNUqNRi661ys9GXaHYmbXP-Q,12608
|
|
54
56
|
quantumflow/integrations/langchain_tools.py,sha256=bDrKZDYSRQJJGSNc9iay1Q4NoIR8CHmtZLcybS5ub_w,12401
|
|
55
57
|
quantumflow/integrations/mcp_server.py,sha256=KJTAxJOyCVl7-whTD1iss9VZmyi0K1f4gNJCH8Cvl_0,21117
|
|
56
|
-
|
|
57
|
-
quantumflow_sdk-0.1.
|
|
58
|
-
quantumflow_sdk-0.1.
|
|
59
|
-
quantumflow_sdk-0.1.
|
|
60
|
-
quantumflow_sdk-0.1.
|
|
58
|
+
quantumflow/integrations/openai_functions.py,sha256=8jQH4XkBxK9AbwC47BEYVIrbRAEWGdsMyw0xbZrGNB4,18412
|
|
59
|
+
quantumflow_sdk-0.2.1.dist-info/METADATA,sha256=QcTBk2aAfAE7goV311z5qrdubgYNhormcxUNdYqGsSI,5461
|
|
60
|
+
quantumflow_sdk-0.2.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
61
|
+
quantumflow_sdk-0.2.1.dist-info/entry_points.txt,sha256=ebX2acoOLgym42XZEqym3OfKCYiPz-mFuPSSGsHFz4c,53
|
|
62
|
+
quantumflow_sdk-0.2.1.dist-info/top_level.txt,sha256=hEr_GRvoZ3-83naVIhNuJvoAND1aCvhBag_ynxQguIo,19
|
|
63
|
+
quantumflow_sdk-0.2.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|