quantumflow-sdk 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- api/main.py +28 -1
- api/models.py +41 -0
- api/routes/algorithm_routes.py +134 -0
- api/routes/chat_routes.py +565 -0
- api/routes/pipeline_routes.py +578 -0
- db/models.py +357 -0
- quantumflow/algorithms/machine_learning/__init__.py +14 -2
- quantumflow/algorithms/machine_learning/vqe.py +355 -3
- quantumflow/core/__init__.py +10 -1
- quantumflow/core/quantum_compressor.py +379 -1
- quantumflow/integrations/domain_agents.py +617 -0
- quantumflow/pipeline/__init__.py +29 -0
- quantumflow/pipeline/anomaly_detector.py +521 -0
- quantumflow/pipeline/base_pipeline.py +602 -0
- quantumflow/pipeline/checkpoint_manager.py +587 -0
- quantumflow/pipeline/finance/__init__.py +5 -0
- quantumflow/pipeline/finance/portfolio_optimization.py +595 -0
- quantumflow/pipeline/healthcare/__init__.py +5 -0
- quantumflow/pipeline/healthcare/protein_folding.py +994 -0
- quantumflow/pipeline/temporal_memory.py +577 -0
- {quantumflow_sdk-0.3.0.dist-info → quantumflow_sdk-0.4.0.dist-info}/METADATA +3 -3
- {quantumflow_sdk-0.3.0.dist-info → quantumflow_sdk-0.4.0.dist-info}/RECORD +25 -13
- {quantumflow_sdk-0.3.0.dist-info → quantumflow_sdk-0.4.0.dist-info}/WHEEL +0 -0
- {quantumflow_sdk-0.3.0.dist-info → quantumflow_sdk-0.4.0.dist-info}/entry_points.txt +0 -0
- {quantumflow_sdk-0.3.0.dist-info → quantumflow_sdk-0.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,595 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Portfolio Optimization Pipeline with QAOA.
|
|
3
|
+
|
|
4
|
+
Uses Quantum Approximate Optimization Algorithm for portfolio
|
|
5
|
+
optimization with risk metrics, regime detection, and auto-rollback.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
pipeline = PortfolioOptimizationPipeline(
|
|
9
|
+
name="Tech Portfolio",
|
|
10
|
+
assets=["AAPL", "GOOGL", "MSFT", "AMZN"],
|
|
11
|
+
expected_returns=[0.12, 0.10, 0.11, 0.09],
|
|
12
|
+
covariance_matrix=cov_matrix,
|
|
13
|
+
initial_capital=100000,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
result = pipeline.run(total_steps=50)
|
|
17
|
+
print(f"Final Sharpe: {result.final_state.metrics['sharpe_ratio']}")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import math
|
|
21
|
+
import random
|
|
22
|
+
import logging
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
from typing import Any, Dict, List, Optional
|
|
25
|
+
from enum import Enum
|
|
26
|
+
|
|
27
|
+
from quantumflow.pipeline.base_pipeline import (
|
|
28
|
+
BasePipeline,
|
|
29
|
+
PipelineConfig,
|
|
30
|
+
PipelineState,
|
|
31
|
+
)
|
|
32
|
+
from quantumflow.pipeline.anomaly_detector import (
|
|
33
|
+
AnomalyDetector,
|
|
34
|
+
create_var_detector,
|
|
35
|
+
create_drawdown_detector,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class MarketRegime(str, Enum):
|
|
42
|
+
"""Market regime classification."""
|
|
43
|
+
|
|
44
|
+
BULL = "bull"
|
|
45
|
+
BEAR = "bear"
|
|
46
|
+
SIDEWAYS = "sideways"
|
|
47
|
+
CRISIS = "crisis"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class PortfolioConfig(PipelineConfig):
|
|
52
|
+
"""Configuration for portfolio optimization pipeline."""
|
|
53
|
+
|
|
54
|
+
# Assets
|
|
55
|
+
assets: List[str] = field(default_factory=list)
|
|
56
|
+
expected_returns: List[float] = field(default_factory=list)
|
|
57
|
+
covariance_matrix: List[List[float]] = field(default_factory=list)
|
|
58
|
+
|
|
59
|
+
# Capital
|
|
60
|
+
initial_capital: float = 100000.0
|
|
61
|
+
|
|
62
|
+
# QAOA settings
|
|
63
|
+
n_qubits: int = 8
|
|
64
|
+
qaoa_depth: int = 2
|
|
65
|
+
optimizer: str = "COBYLA"
|
|
66
|
+
|
|
67
|
+
# Risk constraints
|
|
68
|
+
max_var: float = 0.05 # 5% VaR limit
|
|
69
|
+
max_drawdown: float = 0.20 # 20% max drawdown
|
|
70
|
+
target_return: float = 0.10 # 10% target annual return
|
|
71
|
+
risk_free_rate: float = 0.02 # 2% risk-free rate
|
|
72
|
+
|
|
73
|
+
# Rebalancing
|
|
74
|
+
rebalance_threshold: float = 0.05 # 5% drift triggers rebalance
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class PortfolioState(PipelineState):
|
|
79
|
+
"""State for portfolio optimization pipeline."""
|
|
80
|
+
|
|
81
|
+
# Portfolio weights
|
|
82
|
+
weights: List[float] = field(default_factory=list)
|
|
83
|
+
weights_history: List[List[float]] = field(default_factory=list)
|
|
84
|
+
|
|
85
|
+
# Value tracking
|
|
86
|
+
portfolio_value: float = 0.0
|
|
87
|
+
value_history: List[float] = field(default_factory=list)
|
|
88
|
+
peak_value: float = 0.0
|
|
89
|
+
|
|
90
|
+
# Risk metrics
|
|
91
|
+
var: float = 0.0 # Value at Risk
|
|
92
|
+
cvar: float = 0.0 # Conditional VaR
|
|
93
|
+
sharpe_ratio: float = 0.0
|
|
94
|
+
sortino_ratio: float = 0.0
|
|
95
|
+
max_drawdown: float = 0.0
|
|
96
|
+
current_drawdown: float = 0.0
|
|
97
|
+
|
|
98
|
+
# Regime
|
|
99
|
+
market_regime: str = "sideways"
|
|
100
|
+
regime_history: List[str] = field(default_factory=list)
|
|
101
|
+
|
|
102
|
+
# QAOA state
|
|
103
|
+
qaoa_parameters: List[float] = field(default_factory=list)
|
|
104
|
+
|
|
105
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
106
|
+
"""Serialize to dictionary."""
|
|
107
|
+
base = super().to_dict()
|
|
108
|
+
base.update({
|
|
109
|
+
"weights": self.weights,
|
|
110
|
+
"weights_history": self.weights_history,
|
|
111
|
+
"portfolio_value": self.portfolio_value,
|
|
112
|
+
"value_history": self.value_history,
|
|
113
|
+
"peak_value": self.peak_value,
|
|
114
|
+
"var": self.var,
|
|
115
|
+
"cvar": self.cvar,
|
|
116
|
+
"sharpe_ratio": self.sharpe_ratio,
|
|
117
|
+
"sortino_ratio": self.sortino_ratio,
|
|
118
|
+
"max_drawdown": self.max_drawdown,
|
|
119
|
+
"current_drawdown": self.current_drawdown,
|
|
120
|
+
"market_regime": self.market_regime,
|
|
121
|
+
"regime_history": self.regime_history,
|
|
122
|
+
"qaoa_parameters": self.qaoa_parameters,
|
|
123
|
+
})
|
|
124
|
+
return base
|
|
125
|
+
|
|
126
|
+
@classmethod
|
|
127
|
+
def from_dict(cls, data: Dict[str, Any]) -> "PortfolioState":
|
|
128
|
+
"""Deserialize from dictionary."""
|
|
129
|
+
state = cls()
|
|
130
|
+
state.step = data.get("step", 0)
|
|
131
|
+
state.data = data.get("data", {})
|
|
132
|
+
state.metrics = data.get("metrics", {})
|
|
133
|
+
state.gradient_history = data.get("gradient_history", [])
|
|
134
|
+
state.weights = data.get("weights", [])
|
|
135
|
+
state.weights_history = data.get("weights_history", [])
|
|
136
|
+
state.portfolio_value = data.get("portfolio_value", 0.0)
|
|
137
|
+
state.value_history = data.get("value_history", [])
|
|
138
|
+
state.peak_value = data.get("peak_value", 0.0)
|
|
139
|
+
state.var = data.get("var", 0.0)
|
|
140
|
+
state.cvar = data.get("cvar", 0.0)
|
|
141
|
+
state.sharpe_ratio = data.get("sharpe_ratio", 0.0)
|
|
142
|
+
state.sortino_ratio = data.get("sortino_ratio", 0.0)
|
|
143
|
+
state.max_drawdown = data.get("max_drawdown", 0.0)
|
|
144
|
+
state.current_drawdown = data.get("current_drawdown", 0.0)
|
|
145
|
+
state.market_regime = data.get("market_regime", "sideways")
|
|
146
|
+
state.regime_history = data.get("regime_history", [])
|
|
147
|
+
state.qaoa_parameters = data.get("qaoa_parameters", [])
|
|
148
|
+
return state
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class PortfolioOptimizationPipeline(BasePipeline):
|
|
152
|
+
"""
|
|
153
|
+
Pipeline for portfolio optimization using QAOA.
|
|
154
|
+
|
|
155
|
+
Features:
|
|
156
|
+
- QAOA-based portfolio optimization
|
|
157
|
+
- Risk metrics: VaR, CVaR, Sharpe, Sortino
|
|
158
|
+
- Market regime detection
|
|
159
|
+
- Auto-rollback on max drawdown breach
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
def __init__(
|
|
163
|
+
self,
|
|
164
|
+
name: str,
|
|
165
|
+
assets: List[str],
|
|
166
|
+
expected_returns: List[float],
|
|
167
|
+
covariance_matrix: Optional[List[List[float]]] = None,
|
|
168
|
+
initial_capital: float = 100000.0,
|
|
169
|
+
config: Optional[PortfolioConfig] = None,
|
|
170
|
+
**kwargs,
|
|
171
|
+
):
|
|
172
|
+
"""
|
|
173
|
+
Initialize portfolio optimization pipeline.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
name: Pipeline name
|
|
177
|
+
assets: List of asset symbols
|
|
178
|
+
expected_returns: Expected annual returns
|
|
179
|
+
covariance_matrix: Return covariance matrix
|
|
180
|
+
initial_capital: Starting capital
|
|
181
|
+
config: Pipeline configuration
|
|
182
|
+
"""
|
|
183
|
+
if config is None:
|
|
184
|
+
config = PortfolioConfig(
|
|
185
|
+
assets=assets,
|
|
186
|
+
expected_returns=expected_returns,
|
|
187
|
+
covariance_matrix=covariance_matrix or [],
|
|
188
|
+
initial_capital=initial_capital,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
super().__init__(name=name, config=config, **kwargs)
|
|
192
|
+
|
|
193
|
+
self._assets = assets
|
|
194
|
+
self._returns = expected_returns
|
|
195
|
+
self._cov = covariance_matrix or self._generate_covariance(len(assets))
|
|
196
|
+
self._initial_capital = initial_capital
|
|
197
|
+
|
|
198
|
+
self._qaoa = None
|
|
199
|
+
|
|
200
|
+
# Setup anomaly detectors
|
|
201
|
+
self._setup_anomaly_detectors()
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
def pipeline_type(self) -> str:
|
|
205
|
+
return "portfolio_optimization"
|
|
206
|
+
|
|
207
|
+
def _setup_anomaly_detectors(self):
|
|
208
|
+
"""Configure domain-specific anomaly detectors."""
|
|
209
|
+
detector = AnomalyDetector()
|
|
210
|
+
|
|
211
|
+
config = self.config
|
|
212
|
+
if isinstance(config, PortfolioConfig):
|
|
213
|
+
# VaR breach detector
|
|
214
|
+
detector.register_detector(
|
|
215
|
+
"var_breach",
|
|
216
|
+
create_var_detector(max_var_breach=config.max_var),
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Drawdown detector
|
|
220
|
+
detector.register_detector(
|
|
221
|
+
"drawdown_breach",
|
|
222
|
+
create_drawdown_detector(max_drawdown=config.max_drawdown),
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
self.set_anomaly_detector(detector)
|
|
226
|
+
|
|
227
|
+
def _generate_covariance(self, n: int) -> List[List[float]]:
|
|
228
|
+
"""Generate random positive semi-definite covariance matrix."""
|
|
229
|
+
# Generate random correlation-like matrix
|
|
230
|
+
cov = [[0.0] * n for _ in range(n)]
|
|
231
|
+
|
|
232
|
+
for i in range(n):
|
|
233
|
+
for j in range(n):
|
|
234
|
+
if i == j:
|
|
235
|
+
cov[i][j] = random.uniform(0.01, 0.04) # Variance
|
|
236
|
+
elif i < j:
|
|
237
|
+
cov[i][j] = random.uniform(-0.01, 0.02) # Covariance
|
|
238
|
+
cov[j][i] = cov[i][j]
|
|
239
|
+
|
|
240
|
+
return cov
|
|
241
|
+
|
|
242
|
+
def _get_qaoa(self):
|
|
243
|
+
"""Get or create QAOA instance."""
|
|
244
|
+
if self._qaoa is None:
|
|
245
|
+
try:
|
|
246
|
+
from quantumflow.algorithms.optimization.qaoa import QuantumQAOA
|
|
247
|
+
|
|
248
|
+
config = self.config
|
|
249
|
+
n_qubits = config.n_qubits if isinstance(config, PortfolioConfig) else 8
|
|
250
|
+
|
|
251
|
+
self._qaoa = QuantumQAOA(
|
|
252
|
+
n_qubits=n_qubits,
|
|
253
|
+
backend=self.config.backend,
|
|
254
|
+
)
|
|
255
|
+
except ImportError:
|
|
256
|
+
logger.warning("QAOA not available, using classical optimization")
|
|
257
|
+
|
|
258
|
+
return self._qaoa
|
|
259
|
+
|
|
260
|
+
def initialize(self) -> PortfolioState:
|
|
261
|
+
"""Initialize portfolio state."""
|
|
262
|
+
state = PortfolioState()
|
|
263
|
+
|
|
264
|
+
n_assets = len(self._assets)
|
|
265
|
+
|
|
266
|
+
# Equal weight initialization
|
|
267
|
+
state.weights = [1.0 / n_assets] * n_assets
|
|
268
|
+
state.weights_history.append(state.weights.copy())
|
|
269
|
+
|
|
270
|
+
# Initial values
|
|
271
|
+
state.portfolio_value = self._initial_capital
|
|
272
|
+
state.value_history.append(state.portfolio_value)
|
|
273
|
+
state.peak_value = state.portfolio_value
|
|
274
|
+
|
|
275
|
+
# Initialize QAOA parameters
|
|
276
|
+
config = self.config
|
|
277
|
+
depth = config.qaoa_depth if isinstance(config, PortfolioConfig) else 2
|
|
278
|
+
state.qaoa_parameters = [
|
|
279
|
+
random.uniform(0, 2 * math.pi) for _ in range(2 * depth)
|
|
280
|
+
]
|
|
281
|
+
|
|
282
|
+
# Compute initial risk metrics
|
|
283
|
+
state = self._compute_risk_metrics(state)
|
|
284
|
+
|
|
285
|
+
# Detect initial regime
|
|
286
|
+
state.market_regime = self._detect_regime(state)
|
|
287
|
+
state.regime_history.append(state.market_regime)
|
|
288
|
+
|
|
289
|
+
# Update metrics
|
|
290
|
+
state.update_metrics(
|
|
291
|
+
portfolio_value=state.portfolio_value,
|
|
292
|
+
var=state.var,
|
|
293
|
+
sharpe_ratio=state.sharpe_ratio,
|
|
294
|
+
max_drawdown=state.max_drawdown,
|
|
295
|
+
regime=state.market_regime,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
return state
|
|
299
|
+
|
|
300
|
+
def execute_step(self, step: int, state: PortfolioState) -> PortfolioState:
|
|
301
|
+
"""Execute one optimization step."""
|
|
302
|
+
config = self.config
|
|
303
|
+
if not isinstance(config, PortfolioConfig):
|
|
304
|
+
config = PortfolioConfig()
|
|
305
|
+
|
|
306
|
+
# Get QAOA optimizer
|
|
307
|
+
qaoa = self._get_qaoa()
|
|
308
|
+
|
|
309
|
+
if qaoa:
|
|
310
|
+
try:
|
|
311
|
+
# Formulate portfolio optimization as QUBO
|
|
312
|
+
qubo = self._create_portfolio_qubo(state, config)
|
|
313
|
+
|
|
314
|
+
# Run QAOA
|
|
315
|
+
qaoa_result = qaoa.optimize(
|
|
316
|
+
problem=qubo,
|
|
317
|
+
p=config.qaoa_depth,
|
|
318
|
+
initial_params=state.qaoa_parameters,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# Extract optimal weights
|
|
322
|
+
if "solution" in qaoa_result:
|
|
323
|
+
state.weights = self._decode_weights(
|
|
324
|
+
qaoa_result["solution"], len(self._assets)
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
state.qaoa_parameters = qaoa_result.get(
|
|
328
|
+
"optimal_params", state.qaoa_parameters
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
# Track gradient
|
|
332
|
+
if "gradient" in qaoa_result:
|
|
333
|
+
grad_norm = math.sqrt(sum(g * g for g in qaoa_result["gradient"]))
|
|
334
|
+
state.add_gradient(grad_norm)
|
|
335
|
+
|
|
336
|
+
except Exception as e:
|
|
337
|
+
logger.warning(f"QAOA optimization failed: {e}")
|
|
338
|
+
state = self._classical_optimization_step(state, config)
|
|
339
|
+
else:
|
|
340
|
+
state = self._classical_optimization_step(state, config)
|
|
341
|
+
|
|
342
|
+
# Normalize weights
|
|
343
|
+
state.weights = self._normalize_weights(state.weights)
|
|
344
|
+
state.weights_history.append(state.weights.copy())
|
|
345
|
+
|
|
346
|
+
# Simulate portfolio return for this step
|
|
347
|
+
step_return = self._simulate_step_return(state.weights)
|
|
348
|
+
state.portfolio_value *= (1 + step_return)
|
|
349
|
+
state.value_history.append(state.portfolio_value)
|
|
350
|
+
|
|
351
|
+
# Update peak value
|
|
352
|
+
if state.portfolio_value > state.peak_value:
|
|
353
|
+
state.peak_value = state.portfolio_value
|
|
354
|
+
|
|
355
|
+
# Compute drawdown
|
|
356
|
+
if state.peak_value > 0:
|
|
357
|
+
state.current_drawdown = (state.peak_value - state.portfolio_value) / state.peak_value
|
|
358
|
+
state.max_drawdown = max(state.max_drawdown, state.current_drawdown)
|
|
359
|
+
|
|
360
|
+
# Compute risk metrics
|
|
361
|
+
state = self._compute_risk_metrics(state)
|
|
362
|
+
|
|
363
|
+
# Detect market regime
|
|
364
|
+
state.market_regime = self._detect_regime(state)
|
|
365
|
+
state.regime_history.append(state.market_regime)
|
|
366
|
+
|
|
367
|
+
# Update metrics
|
|
368
|
+
state.update_metrics(
|
|
369
|
+
portfolio_value=state.portfolio_value,
|
|
370
|
+
var=state.var,
|
|
371
|
+
cvar=state.cvar,
|
|
372
|
+
sharpe_ratio=state.sharpe_ratio,
|
|
373
|
+
sortino_ratio=state.sortino_ratio,
|
|
374
|
+
max_drawdown=state.max_drawdown,
|
|
375
|
+
current_drawdown=state.current_drawdown,
|
|
376
|
+
regime=state.market_regime,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
return state
|
|
380
|
+
|
|
381
|
+
def _classical_optimization_step(
|
|
382
|
+
self, state: PortfolioState, config: PortfolioConfig
|
|
383
|
+
) -> PortfolioState:
|
|
384
|
+
"""Perform classical mean-variance optimization step."""
|
|
385
|
+
n = len(self._assets)
|
|
386
|
+
|
|
387
|
+
# Simple gradient-based update
|
|
388
|
+
gradients = []
|
|
389
|
+
|
|
390
|
+
for i in range(n):
|
|
391
|
+
# Expected return contribution
|
|
392
|
+
ret_grad = self._returns[i] if i < len(self._returns) else 0.1
|
|
393
|
+
|
|
394
|
+
# Risk contribution (variance + covariance)
|
|
395
|
+
risk_grad = 0.0
|
|
396
|
+
for j in range(n):
|
|
397
|
+
if i < len(self._cov) and j < len(self._cov[i]):
|
|
398
|
+
risk_grad += 2 * state.weights[j] * self._cov[i][j]
|
|
399
|
+
|
|
400
|
+
# Sharpe-like gradient (return / risk)
|
|
401
|
+
grad = ret_grad - 0.5 * risk_grad
|
|
402
|
+
gradients.append(grad)
|
|
403
|
+
|
|
404
|
+
# Update weights
|
|
405
|
+
learning_rate = 0.1
|
|
406
|
+
for i in range(n):
|
|
407
|
+
state.weights[i] += learning_rate * gradients[i]
|
|
408
|
+
|
|
409
|
+
# Track gradient
|
|
410
|
+
grad_norm = math.sqrt(sum(g * g for g in gradients))
|
|
411
|
+
state.add_gradient(grad_norm)
|
|
412
|
+
|
|
413
|
+
return state
|
|
414
|
+
|
|
415
|
+
def _normalize_weights(self, weights: List[float]) -> List[float]:
|
|
416
|
+
"""Normalize weights to sum to 1, enforce non-negative."""
|
|
417
|
+
# Make non-negative
|
|
418
|
+
weights = [max(0, w) for w in weights]
|
|
419
|
+
|
|
420
|
+
total = sum(weights)
|
|
421
|
+
if total == 0:
|
|
422
|
+
# Equal weights if all zero
|
|
423
|
+
n = len(weights)
|
|
424
|
+
return [1.0 / n] * n
|
|
425
|
+
|
|
426
|
+
return [w / total for w in weights]
|
|
427
|
+
|
|
428
|
+
def _create_portfolio_qubo(
|
|
429
|
+
self, state: PortfolioState, config: PortfolioConfig
|
|
430
|
+
) -> Dict[str, Any]:
|
|
431
|
+
"""Create QUBO formulation for portfolio optimization."""
|
|
432
|
+
n = len(self._assets)
|
|
433
|
+
|
|
434
|
+
# Simplified QUBO: maximize return - lambda * risk
|
|
435
|
+
qubo = {}
|
|
436
|
+
risk_aversion = 2.0
|
|
437
|
+
|
|
438
|
+
for i in range(n):
|
|
439
|
+
# Linear term (expected return)
|
|
440
|
+
ret = self._returns[i] if i < len(self._returns) else 0.1
|
|
441
|
+
qubo[(i, i)] = -ret # Negative because QAOA minimizes
|
|
442
|
+
|
|
443
|
+
# Quadratic term (covariance = risk)
|
|
444
|
+
for j in range(i, n):
|
|
445
|
+
if i < len(self._cov) and j < len(self._cov[i]):
|
|
446
|
+
cov_val = self._cov[i][j]
|
|
447
|
+
qubo[(i, j)] = qubo.get((i, j), 0) + risk_aversion * cov_val
|
|
448
|
+
|
|
449
|
+
return {
|
|
450
|
+
"type": "portfolio",
|
|
451
|
+
"qubo": qubo,
|
|
452
|
+
"n_assets": n,
|
|
453
|
+
"target_return": config.target_return,
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
def _decode_weights(self, solution: Any, n_assets: int) -> List[float]:
|
|
457
|
+
"""Decode QAOA solution to portfolio weights."""
|
|
458
|
+
if isinstance(solution, list):
|
|
459
|
+
if len(solution) >= n_assets:
|
|
460
|
+
return solution[:n_assets]
|
|
461
|
+
else:
|
|
462
|
+
# Pad with zeros
|
|
463
|
+
return solution + [0.0] * (n_assets - len(solution))
|
|
464
|
+
|
|
465
|
+
if isinstance(solution, dict):
|
|
466
|
+
weights = [0.0] * n_assets
|
|
467
|
+
for key, value in solution.items():
|
|
468
|
+
if isinstance(key, int) and key < n_assets:
|
|
469
|
+
weights[key] = value
|
|
470
|
+
return weights
|
|
471
|
+
|
|
472
|
+
# Fallback to equal weights
|
|
473
|
+
return [1.0 / n_assets] * n_assets
|
|
474
|
+
|
|
475
|
+
def _simulate_step_return(self, weights: List[float]) -> float:
|
|
476
|
+
"""Simulate portfolio return for one step."""
|
|
477
|
+
# Generate random returns based on expected returns and covariance
|
|
478
|
+
n = len(weights)
|
|
479
|
+
|
|
480
|
+
# Simple: weighted average of expected returns + noise
|
|
481
|
+
expected = sum(
|
|
482
|
+
w * (self._returns[i] if i < len(self._returns) else 0.1) / 252 # Daily return
|
|
483
|
+
for i, w in enumerate(weights)
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# Add volatility-based noise
|
|
487
|
+
vol = self._compute_portfolio_volatility(weights)
|
|
488
|
+
noise = random.gauss(0, vol / math.sqrt(252)) # Daily volatility
|
|
489
|
+
|
|
490
|
+
return expected + noise
|
|
491
|
+
|
|
492
|
+
def _compute_portfolio_volatility(self, weights: List[float]) -> float:
|
|
493
|
+
"""Compute portfolio volatility."""
|
|
494
|
+
n = len(weights)
|
|
495
|
+
variance = 0.0
|
|
496
|
+
|
|
497
|
+
for i in range(n):
|
|
498
|
+
for j in range(n):
|
|
499
|
+
if i < len(self._cov) and j < len(self._cov[i]):
|
|
500
|
+
variance += weights[i] * weights[j] * self._cov[i][j]
|
|
501
|
+
|
|
502
|
+
return math.sqrt(max(0, variance))
|
|
503
|
+
|
|
504
|
+
def _compute_risk_metrics(self, state: PortfolioState) -> PortfolioState:
|
|
505
|
+
"""Compute risk metrics."""
|
|
506
|
+
if len(state.value_history) < 2:
|
|
507
|
+
return state
|
|
508
|
+
|
|
509
|
+
# Calculate returns
|
|
510
|
+
returns = []
|
|
511
|
+
for i in range(1, len(state.value_history)):
|
|
512
|
+
ret = (state.value_history[i] - state.value_history[i - 1]) / state.value_history[i - 1]
|
|
513
|
+
returns.append(ret)
|
|
514
|
+
|
|
515
|
+
if not returns:
|
|
516
|
+
return state
|
|
517
|
+
|
|
518
|
+
# Mean and std of returns
|
|
519
|
+
mean_return = sum(returns) / len(returns)
|
|
520
|
+
variance = sum((r - mean_return) ** 2 for r in returns) / len(returns)
|
|
521
|
+
std_return = math.sqrt(variance) if variance > 0 else 0.0001
|
|
522
|
+
|
|
523
|
+
# VaR (95% confidence)
|
|
524
|
+
sorted_returns = sorted(returns)
|
|
525
|
+
var_index = int(0.05 * len(sorted_returns))
|
|
526
|
+
state.var = -sorted_returns[var_index] if var_index < len(sorted_returns) else 0
|
|
527
|
+
|
|
528
|
+
# CVaR (Expected Shortfall)
|
|
529
|
+
if var_index > 0:
|
|
530
|
+
state.cvar = -sum(sorted_returns[:var_index]) / var_index
|
|
531
|
+
else:
|
|
532
|
+
state.cvar = state.var
|
|
533
|
+
|
|
534
|
+
# Sharpe Ratio
|
|
535
|
+
config = self.config
|
|
536
|
+
rf = config.risk_free_rate / 252 if isinstance(config, PortfolioConfig) else 0.02 / 252
|
|
537
|
+
state.sharpe_ratio = (mean_return - rf) / std_return if std_return > 0 else 0
|
|
538
|
+
|
|
539
|
+
# Sortino Ratio (downside deviation)
|
|
540
|
+
downside_returns = [r for r in returns if r < rf]
|
|
541
|
+
if downside_returns:
|
|
542
|
+
downside_var = sum((r - rf) ** 2 for r in downside_returns) / len(downside_returns)
|
|
543
|
+
downside_std = math.sqrt(downside_var)
|
|
544
|
+
state.sortino_ratio = (mean_return - rf) / downside_std if downside_std > 0 else 0
|
|
545
|
+
else:
|
|
546
|
+
state.sortino_ratio = state.sharpe_ratio
|
|
547
|
+
|
|
548
|
+
return state
|
|
549
|
+
|
|
550
|
+
def _detect_regime(self, state: PortfolioState) -> str:
|
|
551
|
+
"""Detect market regime based on returns."""
|
|
552
|
+
if len(state.value_history) < 20:
|
|
553
|
+
return MarketRegime.SIDEWAYS.value
|
|
554
|
+
|
|
555
|
+
# Look at recent returns
|
|
556
|
+
recent_values = state.value_history[-20:]
|
|
557
|
+
returns = [
|
|
558
|
+
(recent_values[i] - recent_values[i - 1]) / recent_values[i - 1]
|
|
559
|
+
for i in range(1, len(recent_values))
|
|
560
|
+
]
|
|
561
|
+
|
|
562
|
+
mean_return = sum(returns) / len(returns)
|
|
563
|
+
volatility = math.sqrt(sum((r - mean_return) ** 2 for r in returns) / len(returns))
|
|
564
|
+
|
|
565
|
+
# Classify regime
|
|
566
|
+
if mean_return > 0.001 and volatility < 0.02:
|
|
567
|
+
return MarketRegime.BULL.value
|
|
568
|
+
elif mean_return < -0.001 and volatility < 0.02:
|
|
569
|
+
return MarketRegime.BEAR.value
|
|
570
|
+
elif volatility > 0.03:
|
|
571
|
+
return MarketRegime.CRISIS.value
|
|
572
|
+
else:
|
|
573
|
+
return MarketRegime.SIDEWAYS.value
|
|
574
|
+
|
|
575
|
+
def get_state_for_checkpoint(self, state: PipelineState) -> Dict[str, Any]:
|
|
576
|
+
"""Get state for checkpoint."""
|
|
577
|
+
if isinstance(state, PortfolioState):
|
|
578
|
+
return state.to_dict()
|
|
579
|
+
return state.to_dict()
|
|
580
|
+
|
|
581
|
+
def restore_state_from_checkpoint(self, checkpoint_data: Dict[str, Any]) -> PortfolioState:
|
|
582
|
+
"""Restore state from checkpoint."""
|
|
583
|
+
return PortfolioState.from_dict(checkpoint_data)
|
|
584
|
+
|
|
585
|
+
def should_stop(self, state: PipelineState) -> bool:
|
|
586
|
+
"""Check stopping criteria."""
|
|
587
|
+
if not isinstance(state, PortfolioState):
|
|
588
|
+
return False
|
|
589
|
+
|
|
590
|
+
# Stop if Sharpe ratio is good and stable
|
|
591
|
+
if state.sharpe_ratio > 2.0:
|
|
592
|
+
logger.info(f"Target Sharpe ratio achieved: {state.sharpe_ratio:.2f}")
|
|
593
|
+
return True
|
|
594
|
+
|
|
595
|
+
return False
|