ivolatility-backtesting 1.4.0__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ivolatility-backtesting might be problematic. Click here for more details.

@@ -1,59 +1,48 @@
1
- """
2
- ivolatility_backtesting.py - UNIVERSAL BACKTEST FRAMEWORK
3
- Version 4.1 - Dual RAM Display (Process + Container)
4
-
5
- Key Features:
6
- - ResourceMonitor: CPU/RAM tracking (cgroups v2 + psutil fallback)
7
- - Enhanced progress bar with ETA, CPU%, RAM
8
- - Shows BOTH Python process RAM AND container total RAM
9
- - api_call(): Auto-normalization for dict/DataFrame responses
10
- - 30+ metrics, charts, exports
11
- - One-command: run_backtest()
12
-
13
- Usage:
14
- from ivolatility_backtesting import *
15
-
16
- init_api(os.getenv("API_KEY"))
17
-
18
- data = api_call('/equities/eod/stock-prices',
19
- symbol='AAPL',
20
- from_='2024-01-01',
21
- to='2024-12-31',
22
- debug=True)
23
-
24
- analyzer = run_backtest(my_strategy, CONFIG)
25
-
26
- Resource Monitoring:
27
- - CPU: Process CPU % (smoothed over 5 readings)
28
- - RAM: Shows BOTH metrics when in container:
29
- * Green: Python process memory (your strategy)
30
- * Blue: Total container memory (includes Jupyter, cache, etc.)
31
-
32
- Progress Display Example:
33
- Processing 2024-07-30 (144/252)
34
- ETA: 5m 23s | CPU: 46.8% | RAM: 856MB (42%) Python | 1280MB (64%) Container
35
- Container: 1.0 cores, 2.0GB limit
36
- """
1
+ # ============================================================
2
+ # ivolatility_backtesting.py - ENHANCED VERSION
3
+ #
4
+ # NEW FEATURES:
5
+ # 1. Combined stop-loss (requires BOTH conditions)
6
+ # 2. Parameter optimization framework
7
+ # 3. Optimization results visualization
8
+ # ============================================================
37
9
 
38
10
  import pandas as pd
39
11
  import numpy as np
40
12
  import matplotlib.pyplot as plt
41
13
  import seaborn as sns
42
- from datetime import datetime, timedelta
14
+ from datetime import datetime
43
15
  import ivolatility as ivol
44
16
  import os
45
17
  import time
46
18
  import psutil
19
+ import warnings
20
+ from itertools import product
21
+ warnings.filterwarnings('ignore', category=pd.errors.SettingWithCopyWarning)
22
+ warnings.filterwarnings('ignore', message='.*SettingWithCopyWarning.*')
23
+ warnings.filterwarnings('ignore', category=FutureWarning)
24
+ warnings.filterwarnings('ignore', category=DeprecationWarning)
47
25
 
48
26
  sns.set_style('darkgrid')
49
27
  plt.rcParams['figure.figsize'] = (15, 8)
50
28
 
29
+ def create_optimization_folder(base_dir='optimization_results'):
30
+ """
31
+ Create timestamped folder for optimization run
32
+ Returns: folder path (e.g., 'optimization_results/20250122_143025')
33
+ """
34
+ from pathlib import Path
35
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
36
+ folder_path = Path(base_dir) / timestamp
37
+ folder_path.mkdir(parents=True, exist_ok=True)
38
+ print(f"\n📁 Created optimization folder: {folder_path}")
39
+ return str(folder_path)
51
40
 
52
41
  # ============================================================
53
42
  # RESOURCE MONITOR
54
43
  # ============================================================
55
44
  class ResourceMonitor:
56
- """Monitor CPU and RAM - shows PROCESS resources (Python), not full container"""
45
+ """Monitor CPU and RAM with container support"""
57
46
 
58
47
  def __init__(self, show_container_total=False):
59
48
  self.process = psutil.Process()
@@ -61,38 +50,33 @@ class ResourceMonitor:
61
50
  self.last_cpu_time = None
62
51
  self.last_check_time = None
63
52
  self.use_cgroups = self._check_cgroups_v2()
64
- self.show_container_total = show_container_total # False = process RAM, True = container RAM
65
-
66
- # CPU smoothing for more stable readings
53
+ self.show_container_total = show_container_total
67
54
  self.cpu_history = []
68
- self.cpu_history_max = 5 # Average over last 5 readings
55
+ self.cpu_history_max = 5
69
56
 
70
- # Determine actual CPU quota for containers
71
57
  if self.use_cgroups:
72
58
  quota = self._read_cpu_quota()
73
59
  if quota and quota > 0:
74
- self.cpu_count = quota # Override with container quota
60
+ self.cpu_count = quota
75
61
 
76
62
  self.context = "Container" if self.use_cgroups else "Host"
77
63
 
78
64
  def _read_cpu_quota(self):
79
- """Read CPU quota from cgroups v2 (returns cores, e.g., 1.5)"""
80
65
  try:
81
66
  with open('/sys/fs/cgroup/cpu.max', 'r') as f:
82
67
  line = f.read().strip()
83
68
  if line == 'max':
84
- return None # No limit
69
+ return None
85
70
  parts = line.split()
86
71
  if len(parts) == 2:
87
- quota = int(parts[0]) # microseconds
88
- period = int(parts[1]) # microseconds
89
- return quota / period # cores (e.g., 100000/100000 = 1.0)
72
+ quota = int(parts[0])
73
+ period = int(parts[1])
74
+ return quota / period
90
75
  except:
91
76
  pass
92
77
  return None
93
78
 
94
79
  def get_context_info(self):
95
- """Returns monitoring context and resource limits"""
96
80
  if self.use_cgroups:
97
81
  current, max_mem = self._read_cgroup_memory()
98
82
  ram_info = ""
@@ -139,7 +123,6 @@ class ResourceMonitor:
139
123
  return None, None
140
124
 
141
125
  def get_cpu_percent(self):
142
- """Get CPU% with smoothing - shows container limits if in container, host if not"""
143
126
  if self.use_cgroups:
144
127
  current_time = time.time()
145
128
  current_cpu = self._read_cgroup_cpu()
@@ -149,13 +132,9 @@ class ResourceMonitor:
149
132
  cpu_delta = current_cpu - self.last_cpu_time
150
133
 
151
134
  if time_delta > 0:
152
- # Calculate based on container CPU quota
153
135
  cpu_percent = (cpu_delta / (time_delta * 1_000_000)) * 100
154
-
155
- # Clamp to container limits
156
136
  cpu_percent = min(cpu_percent, 100 * self.cpu_count)
157
137
 
158
- # Add to history for smoothing
159
138
  self.cpu_history.append(cpu_percent)
160
139
  if len(self.cpu_history) > self.cpu_history_max:
161
140
  self.cpu_history.pop(0)
@@ -163,13 +142,11 @@ class ResourceMonitor:
163
142
  self.last_cpu_time = current_cpu
164
143
  self.last_check_time = current_time
165
144
 
166
- # Return smoothed average
167
145
  return round(sum(self.cpu_history) / len(self.cpu_history), 1)
168
146
 
169
147
  self.last_cpu_time = current_cpu
170
148
  self.last_check_time = current_time
171
149
 
172
- # Fallback: host resources with smoothing
173
150
  try:
174
151
  cpu = self.process.cpu_percent(interval=0.1)
175
152
  if cpu == 0:
@@ -184,20 +161,11 @@ class ResourceMonitor:
184
161
  return 0.0
185
162
 
186
163
  def get_memory_info(self):
187
- """
188
- Get memory usage - returns BOTH process and container/host
189
-
190
- Returns:
191
- tuple: (process_mb, process_pct, container_mb, container_pct)
192
- If no container, container values = process values
193
- """
194
164
  try:
195
- # Get process memory (Python only)
196
165
  mem = self.process.memory_info()
197
166
  process_mb = mem.rss / (1024 * 1024)
198
167
 
199
168
  if self.use_cgroups:
200
- # Get container total and limit
201
169
  current, max_mem = self._read_cgroup_memory()
202
170
  if max_mem:
203
171
  process_percent = (mem.rss / max_mem) * 100
@@ -212,7 +180,6 @@ class ResourceMonitor:
212
180
  round(container_percent, 1)
213
181
  )
214
182
 
215
- # No container data, return process only
216
183
  return (
217
184
  round(process_mb, 1),
218
185
  round(process_percent, 1),
@@ -220,11 +187,9 @@ class ResourceMonitor:
220
187
  round(process_percent, 1)
221
188
  )
222
189
 
223
- # Host: calculate % of total RAM
224
190
  total = psutil.virtual_memory().total
225
191
  percent = (mem.rss / total) * 100
226
192
 
227
- # On host, process = "container" (no container isolation)
228
193
  return (
229
194
  round(process_mb, 1),
230
195
  round(percent, 1),
@@ -236,35 +201,47 @@ class ResourceMonitor:
236
201
  return 0.0, 0.0, 0.0, 0.0
237
202
 
238
203
 
239
- def create_progress_bar():
240
- """Create enhanced progress bar with ETA, CPU%, RAM"""
241
- from IPython.display import display
242
- import ipywidgets as widgets
243
-
244
- progress_bar = widgets.FloatProgress(
245
- value=0, min=0, max=100,
246
- description='Progress:',
247
- bar_style='info',
248
- style={'bar_color': '#00ff00'},
249
- layout=widgets.Layout(width='100%', height='30px')
250
- )
251
-
252
- status_label = widgets.HTML(
253
- value="<b style='color:#0066cc'>Starting...</b>"
254
- )
255
-
256
- display(widgets.VBox([progress_bar, status_label]))
204
+ def create_progress_bar(reuse_existing=None):
205
+ """Create or reuse enhanced progress bar"""
206
+ if reuse_existing is not None:
207
+ progress_bar, status_label, monitor, start_time = reuse_existing
208
+ progress_bar.value = 0
209
+ progress_bar.bar_style = 'info'
210
+ status_label.value = "<b style='color:#0066cc'>Starting...</b>"
211
+ return progress_bar, status_label, monitor, time.time()
257
212
 
258
- monitor = ResourceMonitor()
259
- start_time = time.time()
260
-
261
- return progress_bar, status_label, monitor, start_time
213
+ try:
214
+ from IPython.display import display
215
+ import ipywidgets as widgets
216
+
217
+ progress_bar = widgets.FloatProgress(
218
+ value=0, min=0, max=100,
219
+ description='Progress:',
220
+ bar_style='info',
221
+ style={'bar_color': '#00ff00'},
222
+ layout=widgets.Layout(width='100%', height='30px')
223
+ )
224
+
225
+ status_label = widgets.HTML(
226
+ value="<b style='color:#0066cc'>Starting...</b>"
227
+ )
228
+
229
+ display(widgets.VBox([progress_bar, status_label]))
230
+
231
+ monitor = ResourceMonitor()
232
+ start_time = time.time()
233
+
234
+ return progress_bar, status_label, monitor, start_time
235
+ except ImportError:
236
+ print("Warning: ipywidgets not available. Progress bar disabled.")
237
+ return None, None, ResourceMonitor(), time.time()
262
238
 
263
239
 
264
240
  def update_progress(progress_bar, status_label, monitor, current, total, start_time, message="Processing"):
265
- """
266
- Update progress bar with ETA, CPU%, RAM (shows BOTH process and container)
267
- """
241
+ """Update progress bar with ETA, CPU%, RAM"""
242
+ if progress_bar is None or status_label is None:
243
+ return
244
+
268
245
  progress = (current / total) * 100
269
246
  progress_bar.value = progress
270
247
 
@@ -278,28 +255,23 @@ def update_progress(progress_bar, status_label, monitor, current, total, start_t
278
255
  cpu = monitor.get_cpu_percent()
279
256
  process_mb, process_pct, container_mb, container_pct = monitor.get_memory_info()
280
257
 
281
- # Build RAM display - show both if different, otherwise just one
282
- if abs(container_mb - process_mb) > 10: # Significant difference (>10MB)
258
+ if abs(container_mb - process_mb) > 10:
283
259
  ram_display = (
284
260
  f"RAM: <span style='color:#4CAF50'>{process_mb}MB ({process_pct}%)</span> Python | "
285
261
  f"<span style='color:#2196F3'>{container_mb}MB ({container_pct}%)</span> Container"
286
262
  )
287
263
  else:
288
- # Same values (on host or small difference)
289
264
  ram_display = f"RAM: {process_mb}MB ({process_pct}%)"
290
265
 
291
- # Context info
292
- if monitor.use_cgroups:
293
- context_info = f"Container: {monitor.cpu_count:.1f} cores"
294
- current, max_mem = monitor._read_cgroup_memory()
295
- if max_mem:
296
- context_info += f", {max_mem / (1024**3):.1f}GB limit"
297
- else:
298
- context_info = f"Host: {monitor.cpu_count} cores"
266
+ context_info = monitor.get_context_info()
267
+
268
+ elapsed_str = format_time(elapsed)
269
+ start_time_str = datetime.fromtimestamp(start_time).strftime('%H:%M:%S')
299
270
 
300
271
  status_label.value = (
301
272
  f"<b style='color:#0066cc'>{message} ({current}/{total})</b><br>"
302
- f"<span style='color:#666'>ETA: {eta_str} | CPU: {cpu}% | {ram_display}</span><br>"
273
+ f"<span style='color:#666'>⏱️ Elapsed: {elapsed_str} | ETA: {eta_str} | Started: {start_time_str}</span><br>"
274
+ f"<span style='color:#666'>CPU: {cpu}% | {ram_display}</span><br>"
303
275
  f"<span style='color:#999;font-size:10px'>{context_info}</span>"
304
276
  )
305
277
 
@@ -317,10 +289,10 @@ def format_time(seconds):
317
289
 
318
290
 
319
291
  # ============================================================
320
- # API HELPER - AUTOMATIC NORMALIZATION
292
+ # API HELPER
321
293
  # ============================================================
322
294
  class APIHelper:
323
- """Normalizes API responses to consistent format"""
295
+ """Normalizes API responses"""
324
296
 
325
297
  @staticmethod
326
298
  def normalize_response(response, debug=False):
@@ -464,7 +436,572 @@ class BacktestResults:
464
436
 
465
437
 
466
438
  # ============================================================
467
- # BACKTEST ANALYZER (30+ METRICS)
439
+ # STOP-LOSS MANAGER (ENHANCED VERSION WITH COMBINED STOP)
440
+ # ============================================================
441
+ class StopLossManager:
442
+ """
443
+ Enhanced stop-loss manager with COMBINED STOP support
444
+
445
+ NEW STOP TYPE:
446
+ - combined: Requires BOTH pl_loss AND directional conditions (from code 2)
447
+ """
448
+
449
+ def __init__(self):
450
+ self.positions = {}
451
+
452
+ def add_position(self, position_id, entry_price, entry_date, stop_type='fixed_pct',
453
+ stop_value=0.05, atr=None, trailing_distance=None, use_pnl_pct=False,
454
+ is_short_bias=False, **kwargs):
455
+ """
456
+ Add position with stop-loss
457
+
458
+ NEW for combined stop:
459
+ stop_type='combined'
460
+ stop_value={'pl_loss': 0.05, 'directional': 0.03}
461
+ """
462
+ self.positions[position_id] = {
463
+ 'entry_price': entry_price,
464
+ 'entry_date': entry_date,
465
+ 'stop_type': stop_type,
466
+ 'stop_value': stop_value,
467
+ 'atr': atr,
468
+ 'trailing_distance': trailing_distance,
469
+ 'highest_price': entry_price if not use_pnl_pct else 0,
470
+ 'lowest_price': entry_price if not use_pnl_pct else 0,
471
+ 'max_profit': 0,
472
+ 'use_pnl_pct': use_pnl_pct,
473
+ 'is_short_bias': is_short_bias,
474
+ **kwargs # Store additional parameters for combined stop
475
+ }
476
+
477
+ def check_stop(self, position_id, current_price, current_date, position_type='LONG', **kwargs):
478
+ """
479
+ Check if stop-loss triggered
480
+
481
+ NEW: Supports 'combined' stop type
482
+ """
483
+ if position_id not in self.positions:
484
+ return False, None, None
485
+
486
+ pos = self.positions[position_id]
487
+ stop_type = pos['stop_type']
488
+ use_pnl_pct = pos.get('use_pnl_pct', False)
489
+
490
+ # Update tracking
491
+ if use_pnl_pct:
492
+ pnl_pct = current_price
493
+ pos['highest_price'] = max(pos['highest_price'], pnl_pct)
494
+ pos['lowest_price'] = min(pos['lowest_price'], pnl_pct)
495
+ pos['max_profit'] = max(pos['max_profit'], pnl_pct)
496
+ else:
497
+ if position_type == 'LONG':
498
+ pos['highest_price'] = max(pos['highest_price'], current_price)
499
+ current_profit = current_price - pos['entry_price']
500
+ else:
501
+ pos['lowest_price'] = min(pos['lowest_price'], current_price)
502
+ current_profit = pos['entry_price'] - current_price
503
+
504
+ pos['max_profit'] = max(pos['max_profit'], current_profit)
505
+
506
+ # Route to appropriate check method
507
+ if stop_type == 'fixed_pct':
508
+ if use_pnl_pct:
509
+ return self._check_fixed_pct_stop_pnl(pos, current_price)
510
+ else:
511
+ return self._check_fixed_pct_stop(pos, current_price, position_type)
512
+
513
+ elif stop_type == 'trailing':
514
+ if use_pnl_pct:
515
+ return self._check_trailing_stop_pnl(pos, current_price)
516
+ else:
517
+ return self._check_trailing_stop(pos, current_price, position_type)
518
+
519
+ elif stop_type == 'time_based':
520
+ return self._check_time_stop(pos, current_date)
521
+
522
+ elif stop_type == 'volatility':
523
+ return self._check_volatility_stop(pos, current_price, position_type)
524
+
525
+ elif stop_type == 'pl_loss':
526
+ return self._check_pl_loss_stop(pos, kwargs)
527
+
528
+ elif stop_type == 'directional':
529
+ return self._check_directional_stop(pos, kwargs)
530
+
531
+ # NEW: COMBINED STOP (requires BOTH conditions)
532
+ elif stop_type == 'combined':
533
+ return self._check_combined_stop(pos, kwargs)
534
+
535
+ else:
536
+ return False, None, None
537
+
538
+ # ========================================================
539
+ # EXISTING STOP METHODS (unchanged)
540
+ # ========================================================
541
+
542
+ def _check_fixed_pct_stop(self, pos, current_price, position_type):
543
+ """Fixed percentage stop-loss (price-based)"""
544
+ entry = pos['entry_price']
545
+ stop_pct = pos['stop_value']
546
+
547
+ if position_type == 'LONG':
548
+ stop_level = entry * (1 - stop_pct)
549
+ triggered = current_price <= stop_level
550
+ else:
551
+ stop_level = entry * (1 + stop_pct)
552
+ triggered = current_price >= stop_level
553
+
554
+ return triggered, stop_level, 'fixed_pct'
555
+
556
+ def _check_fixed_pct_stop_pnl(self, pos, pnl_pct):
557
+ """Fixed percentage stop-loss (P&L%-based for options)"""
558
+ stop_pct = pos['stop_value']
559
+ stop_level = -stop_pct * 100
560
+
561
+ triggered = pnl_pct <= stop_level
562
+
563
+ return triggered, stop_level, 'fixed_pct'
564
+
565
+ def _check_trailing_stop(self, pos, current_price, position_type):
566
+ """Trailing stop-loss (price-based)"""
567
+ if pos['trailing_distance'] is None:
568
+ pos['trailing_distance'] = pos['stop_value']
569
+
570
+ distance = pos['trailing_distance']
571
+
572
+ if position_type == 'LONG':
573
+ stop_level = pos['highest_price'] * (1 - distance)
574
+ triggered = current_price <= stop_level
575
+ else:
576
+ stop_level = pos['lowest_price'] * (1 + distance)
577
+ triggered = current_price >= stop_level
578
+
579
+ return triggered, stop_level, 'trailing'
580
+
581
+ def _check_trailing_stop_pnl(self, pos, pnl_pct):
582
+ """Trailing stop-loss (P&L%-based for options)"""
583
+ if pos['trailing_distance'] is None:
584
+ pos['trailing_distance'] = pos['stop_value']
585
+
586
+ distance = pos['trailing_distance'] * 100
587
+
588
+ stop_level = pos['highest_price'] - distance
589
+
590
+ triggered = pnl_pct <= stop_level
591
+
592
+ return triggered, stop_level, 'trailing'
593
+
594
+ def _check_time_stop(self, pos, current_date):
595
+ """Time-based stop"""
596
+ days_held = (current_date - pos['entry_date']).days
597
+ max_days = pos['stop_value']
598
+
599
+ triggered = days_held >= max_days
600
+ return triggered, None, 'time_based'
601
+
602
+ def _check_volatility_stop(self, pos, current_price, position_type):
603
+ """ATR-based stop"""
604
+ if pos['atr'] is None:
605
+ return False, None, None
606
+
607
+ entry = pos['entry_price']
608
+ atr_multiplier = pos['stop_value']
609
+ stop_distance = pos['atr'] * atr_multiplier
610
+
611
+ if position_type == 'LONG':
612
+ stop_level = entry - stop_distance
613
+ triggered = current_price <= stop_level
614
+ else:
615
+ stop_level = entry + stop_distance
616
+ triggered = current_price >= stop_level
617
+
618
+ return triggered, stop_level, 'volatility'
619
+
620
+ def _check_pl_loss_stop(self, pos, kwargs):
621
+ """Stop-loss based on actual P&L"""
622
+ pnl_pct = kwargs.get('pnl_pct')
623
+
624
+ if pnl_pct is None:
625
+ current_pnl = kwargs.get('current_pnl', 0)
626
+ total_cost = kwargs.get('total_cost', pos.get('total_cost', 1))
627
+
628
+ if total_cost > 0:
629
+ pnl_pct = (current_pnl / total_cost) * 100
630
+ else:
631
+ pnl_pct = 0
632
+
633
+ stop_threshold = -pos['stop_value'] * 100
634
+ triggered = pnl_pct <= stop_threshold
635
+
636
+ return triggered, stop_threshold, 'pl_loss'
637
+
638
+ def _check_directional_stop(self, pos, kwargs):
639
+ """Stop-loss based on underlying price movement"""
640
+ underlying_change_pct = kwargs.get('underlying_change_pct')
641
+
642
+ if underlying_change_pct is None:
643
+ current = kwargs.get('underlying_price')
644
+ entry = kwargs.get('underlying_entry_price', pos.get('underlying_entry_price'))
645
+
646
+ if current is not None and entry is not None and entry != 0:
647
+ underlying_change_pct = ((current - entry) / entry) * 100
648
+ else:
649
+ underlying_change_pct = 0
650
+
651
+ threshold = pos['stop_value'] * 100
652
+ is_short_bias = pos.get('is_short_bias', False)
653
+
654
+ if is_short_bias:
655
+ triggered = underlying_change_pct >= threshold
656
+ else:
657
+ triggered = underlying_change_pct <= -threshold
658
+
659
+ return triggered, threshold, 'directional'
660
+
661
+ # ========================================================
662
+ # NEW: COMBINED STOP (REQUIRES BOTH CONDITIONS)
663
+ # ========================================================
664
+
665
+ def _check_combined_stop(self, pos, kwargs):
666
+ """
667
+ Combined stop: Requires BOTH pl_loss AND directional conditions
668
+
669
+ This is the key feature from code 2:
670
+ - Must have P&L loss > threshold
671
+ - AND underlying must move adversely > threshold
672
+
673
+ Args:
674
+ pos: Position dict with stop_value = {'pl_loss': 0.05, 'directional': 0.03}
675
+ kwargs: Must contain pnl_pct and underlying_change_pct
676
+
677
+ Returns:
678
+ tuple: (triggered, thresholds_dict, 'combined')
679
+ """
680
+ stop_config = pos['stop_value']
681
+
682
+ if not isinstance(stop_config, dict):
683
+ # Fallback: treat as simple fixed stop
684
+ return False, None, 'combined'
685
+
686
+ pl_threshold = stop_config.get('pl_loss', 0.05)
687
+ dir_threshold = stop_config.get('directional', 0.03)
688
+
689
+ # Check P&L condition
690
+ pnl_pct = kwargs.get('pnl_pct', 0)
691
+ is_losing = pnl_pct <= (-pl_threshold * 100)
692
+
693
+ # Check directional condition
694
+ underlying_change_pct = kwargs.get('underlying_change_pct')
695
+
696
+ if underlying_change_pct is None:
697
+ current = kwargs.get('underlying_price')
698
+ entry = kwargs.get('underlying_entry_price', pos.get('underlying_entry_price'))
699
+
700
+ if current is not None and entry is not None and entry != 0:
701
+ underlying_change_pct = ((current - entry) / entry) * 100
702
+ else:
703
+ underlying_change_pct = 0
704
+
705
+ is_short_bias = pos.get('is_short_bias', False)
706
+
707
+ if is_short_bias:
708
+ # Bearish position: adverse move is UP
709
+ adverse_move = underlying_change_pct >= (dir_threshold * 100)
710
+ else:
711
+ # Bullish position: adverse move is DOWN
712
+ adverse_move = underlying_change_pct <= (-dir_threshold * 100)
713
+
714
+ # CRITICAL: Both conditions must be true
715
+ triggered = is_losing and adverse_move
716
+
717
+ # Return detailed thresholds for reporting
718
+ thresholds = {
719
+ 'pl_threshold': -pl_threshold * 100,
720
+ 'dir_threshold': dir_threshold * 100,
721
+ 'actual_pnl_pct': pnl_pct,
722
+ 'actual_underlying_change': underlying_change_pct,
723
+ 'pl_condition': is_losing,
724
+ 'dir_condition': adverse_move
725
+ }
726
+
727
+ return triggered, thresholds, 'combined'
728
+
729
+ # ========================================================
730
+ # UTILITY METHODS
731
+ # ========================================================
732
+
733
+ def remove_position(self, position_id):
734
+ """Remove position from tracking"""
735
+ if position_id in self.positions:
736
+ del self.positions[position_id]
737
+
738
+ def get_position_info(self, position_id):
739
+ """Get position stop-loss info"""
740
+ if position_id not in self.positions:
741
+ return None
742
+
743
+ pos = self.positions[position_id]
744
+ return {
745
+ 'stop_type': pos['stop_type'],
746
+ 'stop_value': pos['stop_value'],
747
+ 'max_profit_before_stop': pos['max_profit']
748
+ }
749
+
750
+
751
+ # ============================================================
752
+ # POSITION MANAGER (unchanged but compatible with combined stop)
753
+ # ============================================================
754
+ class PositionManager:
755
+ """Universal Position Manager with automatic mode detection"""
756
+
757
+ def __init__(self, config, debug=False):
758
+ self.positions = {}
759
+ self.closed_trades = []
760
+ self.config = config
761
+ self.debug = debug
762
+
763
+ self.sl_enabled = config.get('stop_loss_enabled', False)
764
+ if self.sl_enabled:
765
+ self.sl_config = config.get('stop_loss_config', {})
766
+ self.sl_manager = StopLossManager()
767
+ else:
768
+ self.sl_config = None
769
+ self.sl_manager = None
770
+
771
+ def open_position(self, position_id, symbol, entry_date, entry_price,
772
+ quantity, position_type='LONG', **kwargs):
773
+ """Open position with automatic stop-loss"""
774
+
775
+ if entry_price == 0 and self.sl_enabled:
776
+ if 'total_cost' not in kwargs or kwargs['total_cost'] == 0:
777
+ raise ValueError(
778
+ f"\n{'='*70}\n"
779
+ f"ERROR: P&L% mode requires 'total_cost' parameter\n"
780
+ f"{'='*70}\n"
781
+ )
782
+
783
+ position = {
784
+ 'id': position_id,
785
+ 'symbol': symbol,
786
+ 'entry_date': entry_date,
787
+ 'entry_price': entry_price,
788
+ 'quantity': quantity,
789
+ 'type': position_type,
790
+ 'highest_price': entry_price,
791
+ 'lowest_price': entry_price,
792
+ **kwargs
793
+ }
794
+
795
+ self.positions[position_id] = position
796
+
797
+ if self.sl_enabled and self.sl_manager:
798
+ sl_type = self.sl_config.get('type', 'fixed_pct')
799
+ sl_value = self.sl_config.get('value', 0.05)
800
+
801
+ use_pnl_pct = (entry_price == 0)
802
+ is_short_bias = kwargs.get('is_short_bias', False)
803
+
804
+ # Pass underlying_entry_price for combined stop
805
+ self.sl_manager.add_position(
806
+ position_id=position_id,
807
+ entry_price=entry_price,
808
+ entry_date=entry_date,
809
+ stop_type=sl_type,
810
+ stop_value=sl_value,
811
+ atr=kwargs.get('atr', None),
812
+ trailing_distance=self.sl_config.get('trailing_distance', None),
813
+ use_pnl_pct=use_pnl_pct,
814
+ is_short_bias=is_short_bias,
815
+ underlying_entry_price=kwargs.get('entry_stock_price') # For combined stop
816
+ )
817
+
818
+ if self.debug:
819
+ mode = "P&L%" if entry_price == 0 else "Price"
820
+ bias = " (SHORT BIAS)" if kwargs.get('is_short_bias') else ""
821
+ print(f"[PositionManager] OPEN {position_id}: {symbol} @ {entry_price} (Mode: {mode}{bias})")
822
+
823
+ return position
824
+
825
+ def check_positions(self, current_date, price_data):
826
+ """Check all positions for stop-loss triggers"""
827
+ if not self.sl_enabled:
828
+ return []
829
+
830
+ to_close = []
831
+
832
+ for position_id, position in self.positions.items():
833
+ if position_id not in price_data:
834
+ continue
835
+
836
+ if isinstance(price_data[position_id], dict):
837
+ data = price_data[position_id]
838
+ current_price = data.get('price', position['entry_price'])
839
+ current_pnl = data.get('pnl', 0)
840
+ current_pnl_pct = data.get('pnl_pct', 0)
841
+
842
+ # NEW: Pass underlying data for combined stop
843
+ underlying_price = data.get('underlying_price')
844
+ underlying_entry_price = data.get('underlying_entry_price')
845
+ underlying_change_pct = data.get('underlying_change_pct')
846
+ else:
847
+ current_price = price_data[position_id]
848
+ current_pnl = (current_price - position['entry_price']) * position['quantity']
849
+ current_pnl_pct = (current_price - position['entry_price']) / position['entry_price'] if position['entry_price'] != 0 else 0
850
+ underlying_price = None
851
+ underlying_entry_price = None
852
+ underlying_change_pct = None
853
+
854
+ position['highest_price'] = max(position['highest_price'], current_price)
855
+ position['lowest_price'] = min(position['lowest_price'], current_price)
856
+
857
+ if position['entry_price'] == 0:
858
+ check_value = current_pnl_pct
859
+ else:
860
+ check_value = current_price
861
+
862
+ # Pass all data to stop manager
863
+ stop_kwargs = {
864
+ 'pnl_pct': current_pnl_pct,
865
+ 'current_pnl': current_pnl,
866
+ 'total_cost': position.get('total_cost', 1),
867
+ 'underlying_price': underlying_price,
868
+ 'underlying_entry_price': underlying_entry_price or position.get('entry_stock_price'),
869
+ 'underlying_change_pct': underlying_change_pct
870
+ }
871
+
872
+ triggered, stop_level, stop_type = self.sl_manager.check_stop(
873
+ position_id=position_id,
874
+ current_price=check_value,
875
+ current_date=current_date,
876
+ position_type=position['type'],
877
+ **stop_kwargs
878
+ )
879
+
880
+ if triggered:
881
+ to_close.append({
882
+ 'position_id': position_id,
883
+ 'symbol': position['symbol'],
884
+ 'stop_type': stop_type,
885
+ 'stop_level': stop_level,
886
+ 'current_price': current_price,
887
+ 'pnl': current_pnl,
888
+ 'pnl_pct': current_pnl_pct
889
+ })
890
+
891
+ if self.debug:
892
+ mode = "P&L%" if position['entry_price'] == 0 else "Price"
893
+ print(f"[PositionManager] STOP-LOSS: {position_id} ({stop_type}, {mode}) @ {check_value:.2f}")
894
+
895
+ return to_close
896
+
897
+ def close_position(self, position_id, exit_date, exit_price,
898
+ close_reason='manual', pnl=None, **kwargs):
899
+ """Close position"""
900
+ if position_id not in self.positions:
901
+ if self.debug:
902
+ print(f"[PositionManager] WARNING: Position {position_id} not found")
903
+ return None
904
+
905
+ position = self.positions.pop(position_id)
906
+
907
+ if pnl is None:
908
+ pnl = (exit_price - position['entry_price']) * position['quantity']
909
+
910
+ if position['entry_price'] != 0:
911
+ pnl_pct = (exit_price - position['entry_price']) / position['entry_price'] * 100
912
+ else:
913
+ if 'total_cost' in position and position['total_cost'] != 0:
914
+ pnl_pct = (pnl / position['total_cost']) * 100
915
+ elif 'total_cost' in kwargs and kwargs['total_cost'] != 0:
916
+ pnl_pct = (pnl / kwargs['total_cost']) * 100
917
+ else:
918
+ pnl_pct = 0.0
919
+
920
+ trade = {
921
+ 'entry_date': position['entry_date'],
922
+ 'exit_date': exit_date,
923
+ 'symbol': position['symbol'],
924
+ 'signal': position['type'],
925
+ 'entry_price': position['entry_price'],
926
+ 'exit_price': exit_price,
927
+ 'quantity': position['quantity'],
928
+ 'pnl': pnl,
929
+ 'return_pct': pnl_pct,
930
+ 'exit_reason': close_reason,
931
+ 'stop_type': self.sl_config.get('type', 'none') if self.sl_enabled else 'none',
932
+ **kwargs
933
+ }
934
+
935
+ for key in ['call_strike', 'put_strike', 'expiration', 'contracts',
936
+ 'short_strike', 'long_strike', 'opt_type', 'spread_type',
937
+ 'entry_z_score', 'is_short_bias', 'entry_lean', 'exit_lean',
938
+ 'call_iv_entry', 'put_iv_entry', 'iv_lean_entry']:
939
+ if key in position:
940
+ trade[key] = position[key]
941
+
942
+ for key in ['short_entry_bid', 'short_entry_ask', 'short_entry_mid',
943
+ 'long_entry_bid', 'long_entry_ask', 'long_entry_mid',
944
+ 'underlying_entry_price']:
945
+ if key in position:
946
+ trade[key] = position[key]
947
+
948
+ for key in ['short_exit_bid', 'short_exit_ask',
949
+ 'long_exit_bid', 'long_exit_ask',
950
+ 'underlying_exit_price', 'underlying_change_pct',
951
+ 'stop_threshold', 'actual_value',
952
+ 'call_iv_exit', 'put_iv_exit', 'iv_lean_exit',
953
+ 'spy_intraday_high', 'spy_intraday_low', 'spy_intraday_close',
954
+ 'spy_stop_trigger_time', 'spy_stop_trigger_price',
955
+ 'spy_stop_trigger_bid', 'spy_stop_trigger_ask', 'spy_stop_trigger_last',
956
+ 'intraday_data_points', 'intraday_data_available', 'stop_triggered_by']:
957
+ if key in kwargs:
958
+ trade[key] = kwargs[key]
959
+
960
+ self.closed_trades.append(trade)
961
+
962
+ if self.sl_enabled and self.sl_manager:
963
+ self.sl_manager.remove_position(position_id)
964
+
965
+ if self.debug:
966
+ print(f"[PositionManager] CLOSE {position_id}: P&L=${pnl:.2f} ({pnl_pct:.2f}%) - {close_reason}")
967
+
968
+ return trade
969
+
970
+ def get_open_positions(self):
971
+ return list(self.positions.values())
972
+
973
+ def get_closed_trades(self):
974
+ return self.closed_trades
975
+
976
+ def close_all_positions(self, final_date, price_data, reason='end_of_backtest'):
977
+ """Close all open positions at end of backtest"""
978
+ for position_id in list(self.positions.keys()):
979
+ if position_id in price_data:
980
+ position = self.positions[position_id]
981
+
982
+ if isinstance(price_data[position_id], dict):
983
+ data = price_data[position_id]
984
+ exit_price = data.get('price', position['entry_price'])
985
+ pnl = data.get('pnl', None)
986
+ else:
987
+ exit_price = price_data[position_id]
988
+ pnl = None
989
+
990
+ if pnl is None and position['entry_price'] == 0:
991
+ if isinstance(price_data[position_id], dict) and 'pnl' in price_data[position_id]:
992
+ pnl = price_data[position_id]['pnl']
993
+
994
+ self.close_position(
995
+ position_id=position_id,
996
+ exit_date=final_date,
997
+ exit_price=exit_price,
998
+ close_reason=reason,
999
+ pnl=pnl
1000
+ )
1001
+
1002
+
1003
+ # ============================================================
1004
+ # BACKTEST ANALYZER (unchanged)
468
1005
  # ============================================================
469
1006
  class BacktestAnalyzer:
470
1007
  """Calculate all metrics from BacktestResults"""
@@ -476,11 +1013,12 @@ class BacktestAnalyzer:
476
1013
  def calculate_all_metrics(self):
477
1014
  r = self.results
478
1015
 
479
- # Profitability
1016
+ self.metrics['initial_capital'] = r.initial_capital
1017
+ self.metrics['final_equity'] = r.final_capital
1018
+
480
1019
  self.metrics['total_pnl'] = r.final_capital - r.initial_capital
481
1020
  self.metrics['total_return'] = (self.metrics['total_pnl'] / r.initial_capital) * 100
482
1021
 
483
- # CAGR
484
1022
  if len(r.equity_dates) > 0:
485
1023
  start_date = min(r.equity_dates)
486
1024
  end_date = max(r.equity_dates)
@@ -501,7 +1039,6 @@ class BacktestAnalyzer:
501
1039
  self.metrics['cagr'] = 0
502
1040
  self.metrics['show_cagr'] = False
503
1041
 
504
- # Risk metrics
505
1042
  self.metrics['sharpe'] = self._sharpe_ratio(r.daily_returns)
506
1043
  self.metrics['sortino'] = self._sortino_ratio(r.daily_returns)
507
1044
  self.metrics['max_drawdown'] = r.max_drawdown
@@ -510,7 +1047,6 @@ class BacktestAnalyzer:
510
1047
  self.metrics['omega'] = self._omega_ratio(r.daily_returns)
511
1048
  self.metrics['ulcer'] = self._ulcer_index(r.equity_curve)
512
1049
 
513
- # VaR
514
1050
  self.metrics['var_95'], self.metrics['var_95_pct'] = self._calculate_var(r.daily_returns, 0.95)
515
1051
  self.metrics['var_99'], self.metrics['var_99_pct'] = self._calculate_var(r.daily_returns, 0.99)
516
1052
  self.metrics['cvar_95'], self.metrics['cvar_95_pct'] = self._calculate_cvar(r.daily_returns, 0.95)
@@ -520,25 +1056,20 @@ class BacktestAnalyzer:
520
1056
  self.metrics['var_99_dollar'] = self.metrics['var_99'] * avg_equity
521
1057
  self.metrics['cvar_95_dollar'] = self.metrics['cvar_95'] * avg_equity
522
1058
 
523
- # Distribution
524
1059
  self.metrics['tail_ratio'] = self._tail_ratio(r.daily_returns)
525
1060
  self.metrics['skewness'], self.metrics['kurtosis'] = self._skewness_kurtosis(r.daily_returns)
526
1061
 
527
- # Alpha/Beta
528
1062
  self.metrics['alpha'], self.metrics['beta'], self.metrics['r_squared'] = self._alpha_beta(r)
529
1063
 
530
- # Trading stats
531
1064
  if len(r.trades) > 0:
532
1065
  self._calculate_trading_stats(r.trades)
533
1066
  else:
534
1067
  self._set_empty_trading_stats()
535
1068
 
536
- # Efficiency
537
1069
  running_max = np.maximum.accumulate(r.equity_curve)
538
1070
  max_dd_dollars = np.min(np.array(r.equity_curve) - running_max)
539
1071
  self.metrics['recovery_factor'] = self.metrics['total_pnl'] / abs(max_dd_dollars) if max_dd_dollars != 0 else 0
540
1072
 
541
- # Exposure time
542
1073
  if len(r.trades) > 0 and 'start_date' in r.config and 'end_date' in r.config:
543
1074
  total_days = (pd.to_datetime(r.config['end_date']) - pd.to_datetime(r.config['start_date'])).days
544
1075
  self.metrics['exposure_time'] = self._exposure_time(r.trades, total_days)
@@ -724,15 +1255,116 @@ class BacktestAnalyzer:
724
1255
  days_with_positions = set()
725
1256
  for trade in trades:
726
1257
  entry = pd.to_datetime(trade['entry_date'])
727
- exit = pd.to_datetime(trade['exit_date'])
728
- date_range = pd.date_range(start=entry, end=exit, freq='D')
1258
+ exit_ = pd.to_datetime(trade['exit_date'])
1259
+ date_range = pd.date_range(start=entry, end=exit_, freq='D')
729
1260
  days_with_positions.update(date_range.date)
730
1261
  exposure_pct = (len(days_with_positions) / total_days) * 100
731
1262
  return min(exposure_pct, 100.0)
732
1263
 
733
1264
 
734
1265
  # ============================================================
735
- # RESULTS REPORTER
1266
+ # STOP-LOSS METRICS (unchanged)
1267
+ # ============================================================
1268
+ def calculate_stoploss_metrics(analyzer):
1269
+ """Calculate stop-loss specific metrics"""
1270
+ if len(analyzer.results.trades) == 0:
1271
+ _set_empty_stoploss_metrics(analyzer)
1272
+ return analyzer.metrics
1273
+
1274
+ trades_df = pd.DataFrame(analyzer.results.trades)
1275
+
1276
+ if 'exit_reason' not in trades_df.columns:
1277
+ _set_empty_stoploss_metrics(analyzer)
1278
+ return analyzer.metrics
1279
+
1280
+ sl_trades = trades_df[trades_df['exit_reason'].str.contains('stop_loss', na=False)]
1281
+ profit_target_trades = trades_df[trades_df['exit_reason'] == 'profit_target']
1282
+
1283
+ analyzer.metrics['stoploss_count'] = len(sl_trades)
1284
+ analyzer.metrics['stoploss_pct'] = (len(sl_trades) / len(trades_df)) * 100 if len(trades_df) > 0 else 0
1285
+ analyzer.metrics['profit_target_count'] = len(profit_target_trades)
1286
+ analyzer.metrics['profit_target_pct'] = (len(profit_target_trades) / len(trades_df)) * 100 if len(trades_df) > 0 else 0
1287
+
1288
+ if len(sl_trades) > 0:
1289
+ analyzer.metrics['avg_stoploss_pnl'] = sl_trades['pnl'].mean()
1290
+ analyzer.metrics['total_stoploss_loss'] = sl_trades['pnl'].sum()
1291
+ analyzer.metrics['worst_stoploss'] = sl_trades['pnl'].min()
1292
+
1293
+ if 'return_pct' in sl_trades.columns:
1294
+ analyzer.metrics['avg_stoploss_return_pct'] = sl_trades['return_pct'].mean()
1295
+ else:
1296
+ analyzer.metrics['avg_stoploss_return_pct'] = 0
1297
+
1298
+ if 'entry_date' in sl_trades.columns and 'exit_date' in sl_trades.columns:
1299
+ sl_trades_copy = sl_trades.copy()
1300
+ sl_trades_copy['entry_date'] = pd.to_datetime(sl_trades_copy['entry_date'])
1301
+ sl_trades_copy['exit_date'] = pd.to_datetime(sl_trades_copy['exit_date'])
1302
+ sl_trades_copy['days_held'] = (sl_trades_copy['exit_date'] - sl_trades_copy['entry_date']).dt.days
1303
+ analyzer.metrics['avg_days_to_stoploss'] = sl_trades_copy['days_held'].mean()
1304
+ analyzer.metrics['min_days_to_stoploss'] = sl_trades_copy['days_held'].min()
1305
+ analyzer.metrics['max_days_to_stoploss'] = sl_trades_copy['days_held'].max()
1306
+ else:
1307
+ analyzer.metrics['avg_days_to_stoploss'] = 0
1308
+ analyzer.metrics['min_days_to_stoploss'] = 0
1309
+ analyzer.metrics['max_days_to_stoploss'] = 0
1310
+
1311
+ if 'stop_type' in sl_trades.columns:
1312
+ stop_types = sl_trades['stop_type'].value_counts().to_dict()
1313
+ analyzer.metrics['stoploss_by_type'] = stop_types
1314
+ else:
1315
+ analyzer.metrics['stoploss_by_type'] = {}
1316
+ else:
1317
+ analyzer.metrics['avg_stoploss_pnl'] = 0
1318
+ analyzer.metrics['total_stoploss_loss'] = 0
1319
+ analyzer.metrics['worst_stoploss'] = 0
1320
+ analyzer.metrics['avg_stoploss_return_pct'] = 0
1321
+ analyzer.metrics['avg_days_to_stoploss'] = 0
1322
+ analyzer.metrics['min_days_to_stoploss'] = 0
1323
+ analyzer.metrics['max_days_to_stoploss'] = 0
1324
+ analyzer.metrics['stoploss_by_type'] = {}
1325
+
1326
+ if len(profit_target_trades) > 0 and len(sl_trades) > 0:
1327
+ avg_profit_target = profit_target_trades['pnl'].mean()
1328
+ avg_stoploss = abs(sl_trades['pnl'].mean())
1329
+ analyzer.metrics['profit_to_loss_ratio'] = avg_profit_target / avg_stoploss if avg_stoploss > 0 else 0
1330
+ else:
1331
+ analyzer.metrics['profit_to_loss_ratio'] = 0
1332
+
1333
+ if 'max_profit_before_stop' in sl_trades.columns:
1334
+ early_exits = sl_trades[sl_trades['max_profit_before_stop'] > 0]
1335
+ analyzer.metrics['early_exit_count'] = len(early_exits)
1336
+ analyzer.metrics['early_exit_pct'] = (len(early_exits) / len(sl_trades)) * 100 if len(sl_trades) > 0 else 0
1337
+ if len(early_exits) > 0:
1338
+ analyzer.metrics['avg_missed_profit'] = early_exits['max_profit_before_stop'].mean()
1339
+ else:
1340
+ analyzer.metrics['avg_missed_profit'] = 0
1341
+ else:
1342
+ analyzer.metrics['early_exit_count'] = 0
1343
+ analyzer.metrics['early_exit_pct'] = 0
1344
+ analyzer.metrics['avg_missed_profit'] = 0
1345
+
1346
+ exit_reasons = trades_df['exit_reason'].value_counts().to_dict()
1347
+ analyzer.metrics['exit_reasons'] = exit_reasons
1348
+
1349
+ return analyzer.metrics
1350
+
1351
+
1352
+ def _set_empty_stoploss_metrics(analyzer):
1353
+ analyzer.metrics.update({
1354
+ 'stoploss_count': 0, 'stoploss_pct': 0,
1355
+ 'profit_target_count': 0, 'profit_target_pct': 0,
1356
+ 'avg_stoploss_pnl': 0, 'total_stoploss_loss': 0,
1357
+ 'worst_stoploss': 0, 'avg_stoploss_return_pct': 0,
1358
+ 'avg_days_to_stoploss': 0, 'min_days_to_stoploss': 0,
1359
+ 'max_days_to_stoploss': 0, 'stoploss_by_type': {},
1360
+ 'profit_to_loss_ratio': 0, 'early_exit_count': 0,
1361
+ 'early_exit_pct': 0, 'avg_missed_profit': 0,
1362
+ 'exit_reasons': {}
1363
+ })
1364
+
1365
+
1366
+ # ============================================================
1367
+ # RESULTS REPORTER (unchanged)
736
1368
  # ============================================================
737
1369
  class ResultsReporter:
738
1370
  """Print comprehensive metrics report"""
@@ -747,15 +1379,6 @@ class ResultsReporter:
747
1379
  print("="*80)
748
1380
  print()
749
1381
 
750
- if hasattr(r, 'debug_info') and len(r.debug_info) > 0:
751
- print("DEBUG INFORMATION")
752
- print("-"*80)
753
- for debug_msg in r.debug_info[:10]:
754
- print(debug_msg)
755
- if len(r.debug_info) > 10:
756
- print(f"... and {len(r.debug_info) - 10} more messages")
757
- print()
758
-
759
1382
  print("PROFITABILITY METRICS")
760
1383
  print("-"*80)
761
1384
  print(f"Initial Capital: ${r.initial_capital:>15,.2f}")
@@ -775,7 +1398,7 @@ class ResultsReporter:
775
1398
  print(f"Sortino Ratio: {m['sortino']:>15.2f} (downside risk, >2 good)")
776
1399
  print(f"Calmar Ratio: {m['calmar']:>15.2f} (return/drawdown, >3 good)")
777
1400
  if m['omega'] != 0:
778
- omega_display = f"{m['omega']:.2f}" if m['omega'] < 999 else "inf"
1401
+ omega_display = f"{m['omega']:.2f}" if m['omega'] < 999 else ""
779
1402
  print(f"Omega Ratio: {omega_display:>15s} (gains/losses, >1 good)")
780
1403
  print(f"Maximum Drawdown: {m['max_drawdown']:>15.2f}% (peak to trough)")
781
1404
  if m['ulcer'] != 0:
@@ -797,16 +1420,16 @@ class ResultsReporter:
797
1420
  if m['beta'] != 0 or m['alpha'] != 0:
798
1421
  print(f"Alpha (vs {r.benchmark_symbol}): {m['alpha']:>15.2f}% (excess return)")
799
1422
  print(f"Beta (vs {r.benchmark_symbol}): {m['beta']:>15.2f} (<1 defensive, >1 aggressive)")
800
- print(f"R^2 (vs {r.benchmark_symbol}): {m['r_squared']:>15.2f} (market correlation 0-1)")
1423
+ print(f"R² (vs {r.benchmark_symbol}): {m['r_squared']:>15.2f} (market correlation 0-1)")
801
1424
 
802
1425
  if abs(m['total_return']) > 200 or m['volatility'] > 150:
803
1426
  print()
804
- print("⚠️ UNREALISTIC RESULTS DETECTED:")
1427
+ print("WARNING: UNREALISTIC RESULTS DETECTED")
805
1428
  if abs(m['total_return']) > 200:
806
- print(f" Total return {m['total_return']:.1f}% is extremely high")
1429
+ print(f" Total return {m['total_return']:.1f}% is extremely high")
807
1430
  if m['volatility'] > 150:
808
- print(f" Volatility {m['volatility']:.1f}% is higher than leveraged ETFs")
809
- print(" Review configuration before trusting results")
1431
+ print(f" Volatility {m['volatility']:.1f}% is higher than leveraged ETFs")
1432
+ print(" Review configuration before trusting results")
810
1433
 
811
1434
  print()
812
1435
 
@@ -838,14 +1461,60 @@ class ResultsReporter:
838
1461
  print("="*80)
839
1462
 
840
1463
 
1464
+ def print_stoploss_section(analyzer):
1465
+ """Print stop-loss analysis section"""
1466
+ m = analyzer.metrics
1467
+
1468
+ if m.get('stoploss_count', 0) == 0:
1469
+ return
1470
+
1471
+ print("STOP-LOSS ANALYSIS")
1472
+ print("-"*80)
1473
+
1474
+ print(f"Stop-Loss Trades: {m['stoploss_count']:>15} ({m['stoploss_pct']:.1f}% of total)")
1475
+ print(f"Profit Target Trades: {m['profit_target_count']:>15} ({m['profit_target_pct']:.1f}% of total)")
1476
+
1477
+ print(f"Avg Stop-Loss P&L: ${m['avg_stoploss_pnl']:>15,.2f}")
1478
+ print(f"Total Loss from SL: ${m['total_stoploss_loss']:>15,.2f}")
1479
+ print(f"Worst Stop-Loss: ${m['worst_stoploss']:>15,.2f}")
1480
+ print(f"Avg SL Return: {m['avg_stoploss_return_pct']:>15.2f}%")
1481
+
1482
+ if m['avg_days_to_stoploss'] > 0:
1483
+ print(f"Avg Days to SL: {m['avg_days_to_stoploss']:>15.1f}")
1484
+ print(f"Min/Max Days to SL: {m['min_days_to_stoploss']:>7} / {m['max_days_to_stoploss']:<7}")
1485
+
1486
+ if m['profit_to_loss_ratio'] > 0:
1487
+ print(f"Profit/Loss Ratio: {m['profit_to_loss_ratio']:>15.2f} (avg profit target / avg stop-loss)")
1488
+
1489
+ if m['early_exit_count'] > 0:
1490
+ print(f"Early Exits: {m['early_exit_count']:>15} ({m['early_exit_pct']:.1f}% of SL trades)")
1491
+ print(f"Avg Missed Profit: ${m['avg_missed_profit']:>15,.2f} (profit before stop triggered)")
1492
+
1493
+ if m['stoploss_by_type']:
1494
+ print(f"\nStop-Loss Types:")
1495
+ for stop_type, count in m['stoploss_by_type'].items():
1496
+ pct = (count / m['stoploss_count']) * 100
1497
+ print(f" {stop_type:20s} {count:>5} trades ({pct:.1f}%)")
1498
+
1499
+ if m.get('exit_reasons'):
1500
+ print(f"\nExit Reasons Distribution:")
1501
+ total_trades = sum(m['exit_reasons'].values())
1502
+ for reason, count in sorted(m['exit_reasons'].items(), key=lambda x: x[1], reverse=True):
1503
+ pct = (count / total_trades) * 100
1504
+ print(f" {reason:20s} {count:>5} trades ({pct:.1f}%)")
1505
+
1506
+ print()
1507
+ print("="*80)
1508
+
1509
+
841
1510
  # ============================================================
842
- # CHART GENERATOR
1511
+ # CHART GENERATOR (only core charts, optimization charts separate)
843
1512
  # ============================================================
844
1513
  class ChartGenerator:
845
1514
  """Generate 6 professional charts"""
846
1515
 
847
1516
  @staticmethod
848
- def create_all_charts(analyzer, filename='backtest_results.png'):
1517
+ def create_all_charts(analyzer, filename='backtest_results.png', show_plots=True):
849
1518
  r = analyzer.results
850
1519
 
851
1520
  if len(r.trades) == 0:
@@ -859,7 +1528,6 @@ class ChartGenerator:
859
1528
  dates = pd.to_datetime(r.equity_dates)
860
1529
  equity_array = np.array(r.equity_curve)
861
1530
 
862
- # 1. Equity Curve
863
1531
  ax1 = axes[0, 0]
864
1532
  ax1.plot(dates, equity_array, linewidth=2.5, color='#2196F3')
865
1533
  ax1.axhline(y=r.initial_capital, color='gray', linestyle='--', alpha=0.7)
@@ -873,7 +1541,6 @@ class ChartGenerator:
873
1541
  ax1.set_ylabel('Equity ($)')
874
1542
  ax1.grid(True, alpha=0.3)
875
1543
 
876
- # 2. Drawdown
877
1544
  ax2 = axes[0, 1]
878
1545
  running_max = np.maximum.accumulate(equity_array)
879
1546
  drawdown = (equity_array - running_max) / running_max * 100
@@ -883,7 +1550,6 @@ class ChartGenerator:
883
1550
  ax2.set_ylabel('Drawdown (%)')
884
1551
  ax2.grid(True, alpha=0.3)
885
1552
 
886
- # 3. P&L Distribution
887
1553
  ax3 = axes[1, 0]
888
1554
  pnl_values = trades_df['pnl'].values
889
1555
  ax3.hist(pnl_values, bins=40, color='#4CAF50', alpha=0.7, edgecolor='black')
@@ -892,37 +1558,34 @@ class ChartGenerator:
892
1558
  ax3.set_xlabel('P&L ($)')
893
1559
  ax3.grid(True, alpha=0.3, axis='y')
894
1560
 
895
- # 4. Signal Performance
896
1561
  ax4 = axes[1, 1]
897
1562
  if 'signal' in trades_df.columns:
898
1563
  signal_pnl = trades_df.groupby('signal')['pnl'].sum()
899
1564
  colors = ['#4CAF50' if x > 0 else '#f44336' for x in signal_pnl.values]
900
- ax4.bar(signal_pnl.index, signal_pnl.values, color=colors, alpha=0.7, edgecolor='black')
1565
+ ax4.bar(signal_pnl.index, signal_pnl.values, color=colors, alpha=0.7)
901
1566
  ax4.set_title('P&L by Signal', fontsize=12, fontweight='bold')
902
1567
  else:
903
1568
  ax4.text(0.5, 0.5, 'No signal data', ha='center', va='center', transform=ax4.transAxes)
904
1569
  ax4.axhline(y=0, color='black', linewidth=1)
905
1570
  ax4.grid(True, alpha=0.3, axis='y')
906
1571
 
907
- # 5. Monthly Returns
908
1572
  ax5 = axes[2, 0]
909
1573
  trades_df['exit_date'] = pd.to_datetime(trades_df['exit_date'])
910
1574
  trades_df['month'] = trades_df['exit_date'].dt.to_period('M')
911
1575
  monthly_pnl = trades_df.groupby('month')['pnl'].sum()
912
1576
  colors = ['#4CAF50' if x > 0 else '#f44336' for x in monthly_pnl.values]
913
- ax5.bar(range(len(monthly_pnl)), monthly_pnl.values, color=colors, alpha=0.7, edgecolor='black')
1577
+ ax5.bar(range(len(monthly_pnl)), monthly_pnl.values, color=colors, alpha=0.7)
914
1578
  ax5.set_title('Monthly P&L', fontsize=12, fontweight='bold')
915
1579
  ax5.set_xticks(range(len(monthly_pnl)))
916
1580
  ax5.set_xticklabels([str(m) for m in monthly_pnl.index], rotation=45, ha='right')
917
1581
  ax5.axhline(y=0, color='black', linewidth=1)
918
1582
  ax5.grid(True, alpha=0.3, axis='y')
919
1583
 
920
- # 6. Top Symbols
921
1584
  ax6 = axes[2, 1]
922
1585
  if 'symbol' in trades_df.columns:
923
1586
  symbol_pnl = trades_df.groupby('symbol')['pnl'].sum().sort_values(ascending=True).tail(10)
924
1587
  colors = ['#4CAF50' if x > 0 else '#f44336' for x in symbol_pnl.values]
925
- ax6.barh(range(len(symbol_pnl)), symbol_pnl.values, color=colors, alpha=0.7, edgecolor='black')
1588
+ ax6.barh(range(len(symbol_pnl)), symbol_pnl.values, color=colors, alpha=0.7)
926
1589
  ax6.set_yticks(range(len(symbol_pnl)))
927
1590
  ax6.set_yticklabels(symbol_pnl.index, fontsize=9)
928
1591
  ax6.set_title('Top Symbols', fontsize=12, fontweight='bold')
@@ -933,13 +1596,103 @@ class ChartGenerator:
933
1596
 
934
1597
  plt.tight_layout()
935
1598
  plt.savefig(filename, dpi=300, bbox_inches='tight')
936
- plt.show()
1599
+
1600
+ if show_plots:
1601
+ plt.show()
1602
+ else:
1603
+ plt.close() # Закрываем без показа
937
1604
 
938
1605
  print(f"Chart saved: {filename}")
939
1606
 
940
1607
 
1608
+ def create_stoploss_charts(analyzer, filename='stoploss_analysis.png', show_plots=True):
1609
+ """Create 4 stop-loss specific charts"""
1610
+ r = analyzer.results
1611
+ m = analyzer.metrics
1612
+
1613
+ if m.get('stoploss_count', 0) == 0:
1614
+ print("No stop-loss trades to visualize")
1615
+ return
1616
+
1617
+ trades_df = pd.DataFrame(r.trades)
1618
+
1619
+ if 'exit_reason' not in trades_df.columns:
1620
+ print("No exit_reason data available")
1621
+ return
1622
+
1623
+ fig, axes = plt.subplots(2, 2, figsize=(16, 12))
1624
+ fig.suptitle('Stop-Loss Analysis', fontsize=16, fontweight='bold', y=0.995)
1625
+
1626
+ ax1 = axes[0, 0]
1627
+ if m.get('exit_reasons'):
1628
+ reasons = pd.Series(m['exit_reasons']).sort_values(ascending=True)
1629
+ colors = ['#f44336' if 'stop_loss' in str(r) else '#4CAF50' if r == 'profit_target' else '#2196F3'
1630
+ for r in reasons.index]
1631
+ ax1.barh(range(len(reasons)), reasons.values, color=colors, alpha=0.7, edgecolor='black')
1632
+ ax1.set_yticks(range(len(reasons)))
1633
+ ax1.set_yticklabels([r.replace('_', ' ').title() for r in reasons.index])
1634
+ ax1.set_title('Exit Reasons Distribution', fontsize=12, fontweight='bold')
1635
+ ax1.set_xlabel('Number of Trades')
1636
+ ax1.grid(True, alpha=0.3, axis='x')
1637
+
1638
+ total = sum(reasons.values)
1639
+ for i, v in enumerate(reasons.values):
1640
+ ax1.text(v, i, f' {(v/total)*100:.1f}%', va='center', fontweight='bold')
1641
+
1642
+ ax2 = axes[0, 1]
1643
+ sl_trades = trades_df[trades_df['exit_reason'].str.contains('stop_loss', na=False)]
1644
+ if len(sl_trades) > 0:
1645
+ ax2.hist(sl_trades['pnl'], bins=30, color='#f44336', alpha=0.7, edgecolor='black')
1646
+ ax2.axvline(x=0, color='black', linestyle='--', linewidth=2)
1647
+ ax2.axvline(x=sl_trades['pnl'].mean(), color='yellow', linestyle='--', linewidth=2, label='Mean')
1648
+ ax2.set_title('Stop-Loss P&L Distribution', fontsize=12, fontweight='bold')
1649
+ ax2.set_xlabel('P&L ($)')
1650
+ ax2.set_ylabel('Frequency')
1651
+ ax2.legend()
1652
+ ax2.grid(True, alpha=0.3, axis='y')
1653
+
1654
+ ax3 = axes[1, 0]
1655
+ if len(sl_trades) > 0 and 'entry_date' in sl_trades.columns and 'exit_date' in sl_trades.columns:
1656
+ sl_trades_copy = sl_trades.copy()
1657
+ sl_trades_copy['entry_date'] = pd.to_datetime(sl_trades_copy['entry_date'])
1658
+ sl_trades_copy['exit_date'] = pd.to_datetime(sl_trades_copy['exit_date'])
1659
+ sl_trades_copy['days_held'] = (sl_trades_copy['exit_date'] - sl_trades_copy['entry_date']).dt.days
1660
+
1661
+ ax3.hist(sl_trades_copy['days_held'], bins=30, color='#FF9800', alpha=0.7, edgecolor='black')
1662
+ ax3.axvline(x=sl_trades_copy['days_held'].mean(), color='red', linestyle='--', linewidth=2, label='Mean')
1663
+ ax3.set_title('Days Until Stop-Loss Triggered', fontsize=12, fontweight='bold')
1664
+ ax3.set_xlabel('Days Held')
1665
+ ax3.set_ylabel('Frequency')
1666
+ ax3.legend()
1667
+ ax3.grid(True, alpha=0.3, axis='y')
1668
+
1669
+ ax4 = axes[1, 1]
1670
+ if 'stop_type' in sl_trades.columns:
1671
+ stop_types = sl_trades['stop_type'].value_counts()
1672
+ colors_types = plt.cm.Set3(range(len(stop_types)))
1673
+ wedges, texts, autotexts = ax4.pie(stop_types.values, labels=stop_types.index,
1674
+ autopct='%1.1f%%', colors=colors_types,
1675
+ startangle=90)
1676
+ for autotext in autotexts:
1677
+ autotext.set_color('black')
1678
+ autotext.set_fontweight('bold')
1679
+ ax4.set_title('Stop-Loss Types', fontsize=12, fontweight='bold')
1680
+ else:
1681
+ ax4.text(0.5, 0.5, 'No stop_type data', ha='center', va='center', transform=ax4.transAxes)
1682
+
1683
+ plt.tight_layout()
1684
+ plt.savefig(filename, dpi=300, bbox_inches='tight')
1685
+
1686
+ if show_plots:
1687
+ plt.show()
1688
+ else:
1689
+ plt.close()
1690
+
1691
+ print(f"Stop-loss charts saved: {filename}")
1692
+
1693
+
941
1694
  # ============================================================
942
- # RESULTS EXPORTER
1695
+ # RESULTS EXPORTER (unchanged)
943
1696
  # ============================================================
944
1697
  class ResultsExporter:
945
1698
  """Export results to CSV"""
@@ -954,15 +1707,81 @@ class ResultsExporter:
954
1707
  return
955
1708
 
956
1709
  trades_df = pd.DataFrame(r.trades)
1710
+
957
1711
  trades_df['entry_date'] = pd.to_datetime(trades_df['entry_date']).dt.strftime('%Y-%m-%d')
958
1712
  trades_df['exit_date'] = pd.to_datetime(trades_df['exit_date']).dt.strftime('%Y-%m-%d')
1713
+
1714
+ # Round numeric columns to 5 decimal places
1715
+ numeric_columns = trades_df.select_dtypes(include=[np.number]).columns
1716
+ for col in numeric_columns:
1717
+ trades_df[col] = trades_df[col].round(5)
1718
+
1719
+ core_columns = [
1720
+ 'entry_date', 'exit_date', 'symbol', 'signal',
1721
+ 'pnl', 'return_pct', 'exit_reason', 'stop_type'
1722
+ ]
1723
+
1724
+ options_columns = [
1725
+ 'short_strike', 'long_strike', 'expiration', 'opt_type',
1726
+ 'spread_type', 'contracts'
1727
+ ]
1728
+
1729
+ bidask_columns = [
1730
+ 'short_entry_bid', 'short_entry_ask', 'short_entry_mid',
1731
+ 'long_entry_bid', 'long_entry_ask', 'long_entry_mid',
1732
+ 'short_exit_bid', 'short_exit_ask',
1733
+ 'long_exit_bid', 'long_exit_ask'
1734
+ ]
1735
+
1736
+ underlying_columns = [
1737
+ 'underlying_entry_price', 'underlying_exit_price',
1738
+ 'underlying_change_pct'
1739
+ ]
1740
+
1741
+ stop_columns = [
1742
+ 'stop_threshold', 'actual_value'
1743
+ ]
1744
+
1745
+ strategy_columns = [
1746
+ 'entry_z_score', 'is_short_bias', 'entry_price',
1747
+ 'exit_price', 'quantity', 'entry_lean', 'exit_lean',
1748
+ # IV EOD fields
1749
+ 'call_iv_entry', 'put_iv_entry', 'call_iv_exit', 'put_iv_exit',
1750
+ 'iv_lean_entry', 'iv_lean_exit'
1751
+ ]
1752
+
1753
+ # NEW: Intraday stop-loss columns
1754
+ intraday_columns = [
1755
+ 'spy_intraday_high', 'spy_intraday_low', 'spy_intraday_close',
1756
+ 'spy_stop_trigger_time', 'spy_stop_trigger_price',
1757
+ 'spy_stop_trigger_bid', 'spy_stop_trigger_ask', 'spy_stop_trigger_last',
1758
+ 'intraday_data_points', 'intraday_data_available', 'stop_triggered_by'
1759
+ ]
1760
+
1761
+ ordered_columns = []
1762
+ for col in (core_columns + options_columns + bidask_columns +
1763
+ underlying_columns + stop_columns + strategy_columns + intraday_columns):
1764
+ if col in trades_df.columns:
1765
+ ordered_columns.append(col)
1766
+
1767
+ remaining = [col for col in trades_df.columns if col not in ordered_columns]
1768
+ ordered_columns.extend(remaining)
1769
+
1770
+ trades_df = trades_df[ordered_columns]
1771
+
1772
+ # Round numeric columns to 2 decimals
1773
+ numeric_columns = trades_df.select_dtypes(include=['float64', 'float32', 'float']).columns
1774
+ for col in numeric_columns:
1775
+ trades_df[col] = trades_df[col].round(5)
1776
+
959
1777
  trades_df.to_csv(f'{prefix}_trades.csv', index=False)
960
- print(f"Exported: {prefix}_trades.csv")
1778
+ print(f"Exported: {prefix}_trades.csv ({len(ordered_columns)} columns)")
961
1779
 
962
1780
  equity_df = pd.DataFrame({
963
1781
  'date': pd.to_datetime(r.equity_dates).strftime('%Y-%m-%d'),
964
1782
  'equity': r.equity_curve
965
1783
  })
1784
+ equity_df['equity'] = equity_df['equity'].round(5)
966
1785
  equity_df.to_csv(f'{prefix}_equity.csv', index=False)
967
1786
  print(f"Exported: {prefix}_equity.csv")
968
1787
 
@@ -979,28 +1798,54 @@ class ResultsExporter:
979
1798
  f.write(f"Trades: {m['total_trades']}\n")
980
1799
 
981
1800
  print(f"Exported: {prefix}_summary.txt")
1801
+
1802
+ # Export metrics as JSON with rounded values
1803
+ import json
1804
+ metrics_rounded = {}
1805
+ for key, value in m.items():
1806
+ if isinstance(value, (int, float)):
1807
+ metrics_rounded[key] = round(float(value), 5) if isinstance(value, float) else value
1808
+ else:
1809
+ metrics_rounded[key] = value
1810
+
1811
+ with open(f'{prefix}_metrics.json', 'w') as f:
1812
+ json.dump(metrics_rounded, f, indent=2)
1813
+
1814
+ print(f"Exported: {prefix}_metrics.json")
982
1815
 
983
1816
 
984
1817
  # ============================================================
985
- # RUN BACKTEST
1818
+ # RUN BACKTEST (unchanged)
986
1819
  # ============================================================
987
1820
  def run_backtest(strategy_function, config, print_report=True,
988
1821
  create_charts=True, export_results=True,
989
1822
  chart_filename='backtest_results.png',
990
- export_prefix='backtest'):
991
- """Run complete backtest with one command"""
1823
+ export_prefix='backtest',
1824
+ progress_context=None):
1825
+ """Run complete backtest"""
992
1826
 
993
- print("="*80)
994
- print(" "*25 + "STARTING BACKTEST")
995
- print("="*80)
996
- print(f"Strategy: {config.get('strategy_name', 'Unknown')}")
997
- print(f"Period: {config.get('start_date')} to {config.get('end_date')}")
998
- print(f"Capital: ${config.get('initial_capital', 0):,.0f}")
999
- print("="*80 + "\n")
1827
+ # Check if running inside optimization
1828
+ is_optimization = progress_context and progress_context.get('is_optimization', False)
1829
+
1830
+ if not progress_context and not is_optimization:
1831
+ print("="*80)
1832
+ print(" "*25 + "STARTING BACKTEST")
1833
+ print("="*80)
1834
+ print(f"Strategy: {config.get('strategy_name', 'Unknown')}")
1835
+ print(f"Period: {config.get('start_date')} to {config.get('end_date')}")
1836
+ print(f"Capital: ${config.get('initial_capital', 0):,.0f}")
1837
+ print("="*80 + "\n")
1838
+
1839
+ if progress_context:
1840
+ config['_progress_context'] = progress_context
1000
1841
 
1001
1842
  results = strategy_function(config)
1002
1843
 
1003
- print("\n[*] Calculating metrics...")
1844
+ if '_progress_context' in config:
1845
+ del config['_progress_context']
1846
+
1847
+ if not is_optimization:
1848
+ print("\n[*] Calculating metrics...")
1004
1849
  analyzer = BacktestAnalyzer(results)
1005
1850
  analyzer.calculate_all_metrics()
1006
1851
 
@@ -1008,26 +1853,1121 @@ def run_backtest(strategy_function, config, print_report=True,
1008
1853
  print("\n" + "="*80)
1009
1854
  ResultsReporter.print_full_report(analyzer)
1010
1855
 
1856
+ # Export charts during optimization if requested
1011
1857
  if create_charts and len(results.trades) > 0:
1012
- print(f"\n[*] Creating charts: {chart_filename}")
1858
+ if not is_optimization:
1859
+ print(f"\n[*] Creating charts: {chart_filename}")
1013
1860
  try:
1014
- ChartGenerator.create_all_charts(analyzer, chart_filename)
1861
+ # Don't show plots during optimization, just save them
1862
+ ChartGenerator.create_all_charts(analyzer, chart_filename, show_plots=not is_optimization)
1015
1863
  except Exception as e:
1016
- print(f"[ERROR] Charts failed: {e}")
1864
+ if not is_optimization:
1865
+ print(f"[ERROR] Charts failed: {e}")
1017
1866
 
1867
+ # Export results during optimization if requested
1018
1868
  if export_results and len(results.trades) > 0:
1019
- print(f"\n[*] Exporting: {export_prefix}_*")
1869
+ if not is_optimization:
1870
+ print(f"\n[*] Exporting: {export_prefix}_*")
1020
1871
  try:
1021
1872
  ResultsExporter.export_all(analyzer, export_prefix)
1022
1873
  except Exception as e:
1023
- print(f"[ERROR] Export failed: {e}")
1874
+ if not is_optimization:
1875
+ print(f"[ERROR] Export failed: {e}")
1876
+
1877
+ return analyzer
1878
+
1879
+
1880
+ def run_backtest_with_stoploss(strategy_function, config, print_report=True,
1881
+ create_charts=True, export_results=True,
1882
+ chart_filename='backtest_results.png',
1883
+ export_prefix='backtest',
1884
+ create_stoploss_report=True,
1885
+ create_stoploss_charts=True,
1886
+ progress_context=None):
1887
+ """Enhanced run_backtest with stop-loss analysis"""
1888
+
1889
+ analyzer = run_backtest(
1890
+ strategy_function, config,
1891
+ print_report=False,
1892
+ create_charts=create_charts,
1893
+ export_results=export_results,
1894
+ chart_filename=chart_filename,
1895
+ export_prefix=export_prefix,
1896
+ progress_context=progress_context
1897
+ )
1898
+
1899
+ calculate_stoploss_metrics(analyzer)
1900
+
1901
+ if print_report:
1902
+ print("\n" + "="*80)
1903
+ ResultsReporter.print_full_report(analyzer)
1904
+
1905
+ if create_stoploss_report and analyzer.metrics.get('stoploss_count', 0) > 0:
1906
+ print_stoploss_section(analyzer)
1907
+
1908
+ if create_stoploss_charts and analyzer.metrics.get('stoploss_count', 0) > 0:
1909
+ print(f"\n[*] Creating stop-loss analysis charts...")
1910
+ try:
1911
+ stoploss_chart_name = chart_filename.replace('.png', '_stoploss.png') if chart_filename else 'stoploss_analysis.png'
1912
+ create_stoploss_charts(analyzer, stoploss_chart_name)
1913
+ except Exception as e:
1914
+ print(f"[ERROR] Stop-loss charts failed: {e}")
1024
1915
 
1025
1916
  return analyzer
1026
1917
 
1027
1918
 
1919
+ # ============================================================
1920
+ # STOP-LOSS CONFIG (ENHANCED WITH COMBINED)
1921
+ # ============================================================
1922
+ class StopLossConfig:
1923
+ """
1924
+ Universal stop-loss configuration builder (ENHANCED)
1925
+
1926
+ NEW METHOD:
1927
+ - combined(): Requires BOTH pl_loss AND directional conditions
1928
+ """
1929
+
1930
+ @staticmethod
1931
+ def _normalize_pct(value):
1932
+ """Convert any number to decimal (0.30)"""
1933
+ if value >= 1:
1934
+ return value / 100
1935
+ return value
1936
+
1937
+ @staticmethod
1938
+ def _format_pct(value):
1939
+ """Format percentage for display"""
1940
+ if value >= 1:
1941
+ return f"{value:.0f}%"
1942
+ return f"{value*100:.0f}%"
1943
+
1944
+ @staticmethod
1945
+ def none():
1946
+ """No stop-loss"""
1947
+ return {
1948
+ 'enabled': False,
1949
+ 'type': 'none',
1950
+ 'value': 0,
1951
+ 'name': 'No Stop-Loss',
1952
+ 'description': 'No stop-loss protection'
1953
+ }
1954
+
1955
+ @staticmethod
1956
+ def fixed(pct):
1957
+ """Fixed percentage stop-loss"""
1958
+ decimal = StopLossConfig._normalize_pct(pct)
1959
+ display = StopLossConfig._format_pct(pct)
1960
+
1961
+ return {
1962
+ 'enabled': True,
1963
+ 'type': 'fixed_pct',
1964
+ 'value': decimal,
1965
+ 'name': f'Fixed {display}',
1966
+ 'description': f'Fixed stop at {display} loss'
1967
+ }
1968
+
1969
+ @staticmethod
1970
+ def trailing(pct, trailing_distance=None):
1971
+ """Trailing stop-loss"""
1972
+ decimal = StopLossConfig._normalize_pct(pct)
1973
+ display = StopLossConfig._format_pct(pct)
1974
+
1975
+ config = {
1976
+ 'enabled': True,
1977
+ 'type': 'trailing',
1978
+ 'value': decimal,
1979
+ 'name': f'Trailing {display}',
1980
+ 'description': f'Trailing stop at {display} from peak'
1981
+ }
1982
+
1983
+ if trailing_distance is not None:
1984
+ config['trailing_distance'] = StopLossConfig._normalize_pct(trailing_distance)
1985
+
1986
+ return config
1987
+
1988
+ @staticmethod
1989
+ def time_based(days):
1990
+ """Time-based stop"""
1991
+ return {
1992
+ 'enabled': True,
1993
+ 'type': 'time_based',
1994
+ 'value': days,
1995
+ 'name': f'Time {days}d',
1996
+ 'description': f'Exit after {days} days'
1997
+ }
1998
+
1999
+ @staticmethod
2000
+ def volatility(atr_multiplier):
2001
+ """ATR-based stop"""
2002
+ return {
2003
+ 'enabled': True,
2004
+ 'type': 'volatility',
2005
+ 'value': atr_multiplier,
2006
+ 'name': f'ATR {atr_multiplier:.1f}x',
2007
+ 'description': f'Stop at {atr_multiplier:.1f}× ATR',
2008
+ 'requires_atr': True
2009
+ }
2010
+
2011
+ @staticmethod
2012
+ def pl_loss(pct):
2013
+ """P&L-based stop using real bid/ask prices"""
2014
+ decimal = StopLossConfig._normalize_pct(pct)
2015
+ display = StopLossConfig._format_pct(pct)
2016
+
2017
+ return {
2018
+ 'enabled': True,
2019
+ 'type': 'pl_loss',
2020
+ 'value': decimal,
2021
+ 'name': f'P&L Loss {display}',
2022
+ 'description': f'Stop when P&L drops to -{display}'
2023
+ }
2024
+
2025
+ @staticmethod
2026
+ def directional(pct):
2027
+ """Directional stop based on underlying movement"""
2028
+ decimal = StopLossConfig._normalize_pct(pct)
2029
+ display = StopLossConfig._format_pct(pct)
2030
+
2031
+ return {
2032
+ 'enabled': True,
2033
+ 'type': 'directional',
2034
+ 'value': decimal,
2035
+ 'name': f'Directional {display}',
2036
+ 'description': f'Stop when underlying moves {display}'
2037
+ }
2038
+
2039
+ # ========================================================
2040
+ # NEW: COMBINED STOP (REQUIRES BOTH CONDITIONS)
2041
+ # ========================================================
2042
+
2043
+ @staticmethod
2044
+ def combined(pl_loss_pct, directional_pct):
2045
+ """
2046
+ Combined stop: Requires BOTH conditions (from code 2)
2047
+
2048
+ Args:
2049
+ pl_loss_pct: P&L loss threshold (e.g., 5 or 0.05 = -5%)
2050
+ directional_pct: Underlying move threshold (e.g., 3 or 0.03 = 3%)
2051
+
2052
+ Example:
2053
+ StopLossConfig.combined(5, 3)
2054
+ # Triggers only when BOTH:
2055
+ # 1. P&L drops to -5%
2056
+ # 2. Underlying moves 3% adversely
2057
+ """
2058
+ pl_decimal = StopLossConfig._normalize_pct(pl_loss_pct)
2059
+ dir_decimal = StopLossConfig._normalize_pct(directional_pct)
2060
+
2061
+ pl_display = StopLossConfig._format_pct(pl_loss_pct)
2062
+ dir_display = StopLossConfig._format_pct(directional_pct)
2063
+
2064
+ return {
2065
+ 'enabled': True,
2066
+ 'type': 'combined',
2067
+ 'value': {
2068
+ 'pl_loss': pl_decimal,
2069
+ 'directional': dir_decimal
2070
+ },
2071
+ 'name': f'Combined (P&L {pl_display} + Dir {dir_display})',
2072
+ 'description': f'Stop when P&L<-{pl_display} AND underlying moves {dir_display}'
2073
+ }
2074
+
2075
+ # ========================================================
2076
+ # BACKWARD COMPATIBILITY
2077
+ # ========================================================
2078
+
2079
+ @staticmethod
2080
+ def time(days):
2081
+ """Alias for time_based()"""
2082
+ return StopLossConfig.time_based(days)
2083
+
2084
+ @staticmethod
2085
+ def atr(multiplier):
2086
+ """Alias for volatility()"""
2087
+ return StopLossConfig.volatility(multiplier)
2088
+
2089
+ # ========================================================
2090
+ # PRESETS (WITH COMBINED STOPS)
2091
+ # ========================================================
2092
+
2093
+ @staticmethod
2094
+ def presets():
2095
+ """Generate all standard stop-loss presets (UPDATED WITH COMBINED)"""
2096
+ return {
2097
+ 'none': StopLossConfig.none(),
2098
+
2099
+ 'fixed_20': StopLossConfig.fixed(20),
2100
+ 'fixed_30': StopLossConfig.fixed(30),
2101
+ 'fixed_40': StopLossConfig.fixed(40),
2102
+ 'fixed_50': StopLossConfig.fixed(50),
2103
+ 'fixed_70': StopLossConfig.fixed(70),
2104
+
2105
+ 'trailing_20': StopLossConfig.trailing(20),
2106
+ 'trailing_30': StopLossConfig.trailing(30),
2107
+ 'trailing_50': StopLossConfig.trailing(50),
2108
+
2109
+ 'time_5d': StopLossConfig.time(5),
2110
+ 'time_10d': StopLossConfig.time(10),
2111
+ 'time_20d': StopLossConfig.time(20),
2112
+
2113
+ 'atr_2x': StopLossConfig.atr(2.0),
2114
+ 'atr_3x': StopLossConfig.atr(3.0),
2115
+
2116
+ 'pl_loss_5': StopLossConfig.pl_loss(5),
2117
+ 'pl_loss_10': StopLossConfig.pl_loss(10),
2118
+ 'pl_loss_15': StopLossConfig.pl_loss(15),
2119
+
2120
+ 'directional_3': StopLossConfig.directional(3),
2121
+ 'directional_5': StopLossConfig.directional(5),
2122
+ 'directional_7': StopLossConfig.directional(7),
2123
+
2124
+ # NEW: COMBINED STOPS
2125
+ 'combined_5_3': StopLossConfig.combined(5, 3),
2126
+ 'combined_7_5': StopLossConfig.combined(7, 5),
2127
+ 'combined_10_3': StopLossConfig.combined(10, 3),
2128
+ }
2129
+
2130
+ @staticmethod
2131
+ def apply(base_config, stop_config):
2132
+ """Apply stop-loss configuration to base config"""
2133
+ merged = base_config.copy()
2134
+
2135
+ merged['stop_loss_enabled'] = stop_config.get('enabled', False)
2136
+
2137
+ if merged['stop_loss_enabled']:
2138
+ sl_config = {
2139
+ 'type': stop_config['type'],
2140
+ 'value': stop_config['value']
2141
+ }
2142
+
2143
+ if 'trailing_distance' in stop_config:
2144
+ sl_config['trailing_distance'] = stop_config['trailing_distance']
2145
+
2146
+ merged['stop_loss_config'] = sl_config
2147
+
2148
+ return merged
2149
+
2150
+
2151
+ def create_stoploss_comparison_chart(results, filename='stoploss_comparison.png', show_plots=True):
2152
+ """Create comparison chart"""
2153
+ try:
2154
+ fig, axes = plt.subplots(2, 2, figsize=(16, 12))
2155
+ fig.suptitle('Stop-Loss Configuration Comparison', fontsize=16, fontweight='bold')
2156
+
2157
+ names = [r['config']['name'] for r in results.values()]
2158
+ returns = [r['total_return'] for r in results.values()]
2159
+ sharpes = [r['sharpe'] for r in results.values()]
2160
+ drawdowns = [r['max_drawdown'] for r in results.values()]
2161
+ stop_counts = [r['stoploss_count'] for r in results.values()]
2162
+
2163
+ ax1 = axes[0, 0]
2164
+ colors = ['#4CAF50' if r > 0 else '#f44336' for r in returns]
2165
+ ax1.barh(range(len(names)), returns, color=colors, alpha=0.7, edgecolor='black')
2166
+ ax1.set_yticks(range(len(names)))
2167
+ ax1.set_yticklabels(names, fontsize=9)
2168
+ ax1.set_xlabel('Total Return (%)')
2169
+ ax1.set_title('Total Return by Stop-Loss Type', fontsize=12, fontweight='bold')
2170
+ ax1.axvline(x=0, color='black', linestyle='-', linewidth=1)
2171
+ ax1.grid(True, alpha=0.3, axis='x')
2172
+
2173
+ ax2 = axes[0, 1]
2174
+ colors_sharpe = ['#4CAF50' if s > 1 else '#FF9800' if s > 0 else '#f44336' for s in sharpes]
2175
+ ax2.barh(range(len(names)), sharpes, color=colors_sharpe, alpha=0.7, edgecolor='black')
2176
+ ax2.set_yticks(range(len(names)))
2177
+ ax2.set_yticklabels(names, fontsize=9)
2178
+ ax2.set_xlabel('Sharpe Ratio')
2179
+ ax2.set_title('Sharpe Ratio by Stop-Loss Type', fontsize=12, fontweight='bold')
2180
+ ax2.axvline(x=1, color='green', linestyle='--', linewidth=1, label='Good (>1)')
2181
+ ax2.axvline(x=0, color='black', linestyle='-', linewidth=1)
2182
+ ax2.legend()
2183
+ ax2.grid(True, alpha=0.3, axis='x')
2184
+
2185
+ ax3 = axes[1, 0]
2186
+ ax3.barh(range(len(names)), drawdowns, color='#f44336', alpha=0.7, edgecolor='black')
2187
+ ax3.set_yticks(range(len(names)))
2188
+ ax3.set_yticklabels(names, fontsize=9)
2189
+ ax3.set_xlabel('Maximum Drawdown (%)')
2190
+ ax3.set_title('Maximum Drawdown (Lower is Better)', fontsize=12, fontweight='bold')
2191
+ ax3.grid(True, alpha=0.3, axis='x')
2192
+
2193
+ ax4 = axes[1, 1]
2194
+ ax4.barh(range(len(names)), stop_counts, color='#2196F3', alpha=0.7, edgecolor='black')
2195
+ ax4.set_yticks(range(len(names)))
2196
+ ax4.set_yticklabels(names, fontsize=9)
2197
+ ax4.set_xlabel('Number of Stop-Loss Exits')
2198
+ ax4.set_title('Stop-Loss Frequency', fontsize=12, fontweight='bold')
2199
+ ax4.grid(True, alpha=0.3, axis='x')
2200
+
2201
+ plt.tight_layout()
2202
+ plt.savefig(filename, dpi=300, bbox_inches='tight')
2203
+
2204
+ if show_plots:
2205
+ plt.show()
2206
+ else:
2207
+ plt.close()
2208
+
2209
+ print(f"Comparison chart saved: {filename}")
2210
+
2211
+ except Exception as e:
2212
+ print(f"Failed to create comparison chart: {e}")
2213
+
2214
+
2215
+
2216
+ # ============================================================
2217
+ # DATA PRELOADING FUNCTION (FOR OPTIMIZATION)
2218
+ # ============================================================
2219
+ def preload_options_data(config, progress_widgets=None):
2220
+ """
2221
+ Предзагрузка опционных данных для оптимизации.
2222
+ Загружает данные ОДИН РАЗ и возвращает кеш.
2223
+
2224
+ Returns:
2225
+ tuple: (lean_df, options_cache)
2226
+ - lean_df: DataFrame с историей IV lean
2227
+ - options_cache: dict {date: DataFrame} с опционными данными
2228
+ """
2229
+ if progress_widgets:
2230
+ progress_bar, status_label, monitor, start_time = progress_widgets
2231
+ status_label.value = "<b style='color:#0066cc'>🔄 Preloading options data (ONCE)...</b>"
2232
+ progress_bar.value = 5
2233
+
2234
+ # Extract config
2235
+ from datetime import datetime, timedelta
2236
+ import pandas as pd
2237
+ import numpy as np
2238
+ import gc
2239
+
2240
+ start_date = datetime.strptime(config['start_date'], '%Y-%m-%d').date()
2241
+ end_date = datetime.strptime(config['end_date'], '%Y-%m-%d').date()
2242
+ symbol = config['symbol']
2243
+ dte_target = config.get('dte_target', 30)
2244
+ lookback_period = config.get('lookback_period', 60)
2245
+ chunk_months = config.get('chunk_months', 3)
2246
+
2247
+ # Calculate date chunks
2248
+ data_start = start_date - timedelta(days=lookback_period + 60)
2249
+
2250
+ date_chunks = []
2251
+ current_chunk_start = data_start
2252
+ while current_chunk_start <= end_date:
2253
+ chunk_end = min(
2254
+ current_chunk_start + timedelta(days=chunk_months * 31),
2255
+ end_date
2256
+ )
2257
+ date_chunks.append((current_chunk_start, chunk_end))
2258
+ current_chunk_start = chunk_end + timedelta(days=1)
2259
+
2260
+ # Store lean calculations
2261
+ lean_history = []
2262
+ options_cache = {} # {date: DataFrame with bid/ask data}
2263
+
2264
+ # Track time for ETA
2265
+ preload_start_time = time.time()
2266
+
2267
+ try:
2268
+ import ivolatility as ivol
2269
+ getOptionsData = ivol.setMethod('/equities/eod/options-rawiv')
2270
+
2271
+ # Process each chunk
2272
+ for chunk_idx, (chunk_start, chunk_end) in enumerate(date_chunks):
2273
+ if progress_widgets:
2274
+ # Use update_progress for full display with ETA, CPU, RAM
2275
+ update_progress(
2276
+ progress_bar, status_label, monitor,
2277
+ current=chunk_idx + 1,
2278
+ total=len(date_chunks),
2279
+ start_time=preload_start_time,
2280
+ message=f"🔄 Loading chunk {chunk_idx+1}/{len(date_chunks)}"
2281
+ )
2282
+
2283
+ raw_data = getOptionsData(
2284
+ symbol=symbol,
2285
+ from_=chunk_start.strftime('%Y-%m-%d'),
2286
+ to=chunk_end.strftime('%Y-%m-%d')
2287
+ )
2288
+
2289
+ if raw_data is None:
2290
+ continue
2291
+
2292
+ df = pd.DataFrame(raw_data)
2293
+
2294
+ if df.empty:
2295
+ continue
2296
+
2297
+ # Essential columns
2298
+ essential_cols = ['date', 'expiration', 'strike', 'Call/Put', 'iv', 'Adjusted close']
2299
+ if 'bid' in df.columns:
2300
+ essential_cols.append('bid')
2301
+ if 'ask' in df.columns:
2302
+ essential_cols.append('ask')
2303
+
2304
+ df = df[essential_cols].copy()
2305
+
2306
+ # Process bid/ask
2307
+ if 'bid' in df.columns:
2308
+ df['bid'] = pd.to_numeric(df['bid'], errors='coerce').astype('float32')
2309
+ else:
2310
+ df['bid'] = np.nan
2311
+
2312
+ if 'ask' in df.columns:
2313
+ df['ask'] = pd.to_numeric(df['ask'], errors='coerce').astype('float32')
2314
+ else:
2315
+ df['ask'] = np.nan
2316
+
2317
+ # Calculate mid price
2318
+ df['mid'] = (df['bid'] + df['ask']) / 2
2319
+ df['mid'] = df['mid'].fillna(df['iv'])
2320
+
2321
+ df['date'] = pd.to_datetime(df['date']).dt.date
2322
+ df['expiration'] = pd.to_datetime(df['expiration']).dt.date
2323
+ df['strike'] = pd.to_numeric(df['strike'], errors='coerce').astype('float32')
2324
+ df['iv'] = pd.to_numeric(df['iv'], errors='coerce').astype('float32')
2325
+ df['Adjusted close'] = pd.to_numeric(df['Adjusted close'], errors='coerce').astype('float32')
2326
+
2327
+ df['dte'] = (pd.to_datetime(df['expiration']) - pd.to_datetime(df['date'])).dt.days
2328
+ df['dte'] = df['dte'].astype('int16')
2329
+
2330
+ df = df.dropna(subset=['strike', 'iv', 'Adjusted close'])
2331
+
2332
+ if df.empty:
2333
+ del df
2334
+ gc.collect()
2335
+ continue
2336
+
2337
+ # Cache options data for position tracking
2338
+ for date_val in df['date'].unique():
2339
+ if date_val not in options_cache:
2340
+ options_cache[date_val] = df[df['date'] == date_val].copy()
2341
+
2342
+ # Calculate lean for this chunk
2343
+ trading_dates = sorted(df['date'].unique())
2344
+
2345
+ for current_date in trading_dates:
2346
+ day_data = df[df['date'] == current_date]
2347
+
2348
+ if day_data.empty:
2349
+ continue
2350
+
2351
+ stock_price = float(day_data['Adjusted close'].iloc[0])
2352
+
2353
+ dte_filtered = day_data[
2354
+ (day_data['dte'] >= dte_target - 7) &
2355
+ (day_data['dte'] <= dte_target + 7)
2356
+ ]
2357
+
2358
+ if dte_filtered.empty:
2359
+ continue
2360
+
2361
+ dte_filtered = dte_filtered.copy()
2362
+ dte_filtered['strike_diff'] = abs(dte_filtered['strike'] - stock_price)
2363
+ atm_idx = dte_filtered['strike_diff'].idxmin()
2364
+ atm_strike = float(dte_filtered.loc[atm_idx, 'strike'])
2365
+
2366
+ atm_options = dte_filtered[dte_filtered['strike'] == atm_strike]
2367
+ atm_call = atm_options[atm_options['Call/Put'] == 'C']
2368
+ atm_put = atm_options[atm_options['Call/Put'] == 'P']
2369
+
2370
+ if not atm_call.empty and not atm_put.empty:
2371
+ call_iv = float(atm_call['iv'].iloc[0])
2372
+ put_iv = float(atm_put['iv'].iloc[0])
2373
+
2374
+ if pd.notna(call_iv) and pd.notna(put_iv) and call_iv > 0 and put_iv > 0:
2375
+ iv_lean = call_iv - put_iv
2376
+
2377
+ lean_history.append({
2378
+ 'date': current_date,
2379
+ 'stock_price': stock_price,
2380
+ 'iv_lean': iv_lean
2381
+ })
2382
+
2383
+ del df, raw_data
2384
+ gc.collect()
2385
+
2386
+ lean_df = pd.DataFrame(lean_history)
2387
+ lean_df['stock_price'] = lean_df['stock_price'].astype('float32')
2388
+ lean_df['iv_lean'] = lean_df['iv_lean'].astype('float32')
2389
+
2390
+ del lean_history
2391
+ gc.collect()
2392
+
2393
+ if progress_widgets:
2394
+ status_label.value = f"<b style='color:#00cc00'>✓ Data preloaded: {len(lean_df)} days, {len(options_cache)} cached dates</b>"
2395
+ progress_bar.value = 35
2396
+
2397
+ print(f"✓ Data preloaded: {len(lean_df)} days, {len(options_cache)} cached dates")
2398
+
2399
+ return lean_df, options_cache
2400
+
2401
+ except Exception as e:
2402
+ print(f"Error preloading data: {e}")
2403
+ return pd.DataFrame(), {}
2404
+
2405
+
2406
+ # ============================================================
2407
+ # NEW: OPTIMIZATION FRAMEWORK
2408
+ # ============================================================
2409
+ def optimize_parameters(base_config, param_grid, strategy_function,
2410
+ optimization_metric='sharpe', min_trades=5,
2411
+ max_drawdown_limit=None, parallel=False,
2412
+ export_each_combo=True # ← НОВЫЙ ПАРАМЕТР
2413
+ ):
2414
+ """
2415
+ Optimize strategy parameters across multiple combinations
2416
+
2417
+ Args:
2418
+ base_config: Base configuration dict
2419
+ param_grid: Dict of parameters to optimize
2420
+ Example: {'z_score_entry': [1.0, 1.5, 2.0], 'z_score_exit': [0.1, 0.3, 0.5]}
2421
+ strategy_function: Strategy function to run
2422
+ optimization_metric: Metric to optimize ('sharpe', 'total_return', 'profit_factor', 'calmar')
2423
+ min_trades: Minimum number of trades required
2424
+ max_drawdown_limit: Maximum acceptable drawdown (e.g., 0.10 for 10%)
2425
+ parallel: Use parallel processing (not implemented yet)
2426
+ export_each_combo: If True, exports files for each combination # ←
2427
+
2428
+ Returns:
2429
+ tuple: (results_df, best_params, results_folder)
2430
+ """
2431
+
2432
+ # ═══ ДОБАВИТЬ В НАЧАЛО ФУНКЦИИ ═══
2433
+ # Create results folder
2434
+ results_folder = create_optimization_folder()
2435
+ print(f"📊 Results will be saved to: {results_folder}\n")
2436
+
2437
+ # Record start time
2438
+ optimization_start_time = datetime.now()
2439
+ start_time_str = optimization_start_time.strftime('%Y-%m-%d %H:%M:%S')
2440
+
2441
+ print("\n" + "="*80)
2442
+ print(" "*20 + "PARAMETER OPTIMIZATION")
2443
+ print("="*80)
2444
+ print(f"Strategy: {base_config.get('strategy_name', 'Unknown')}")
2445
+ print(f"Period: {base_config.get('start_date')} to {base_config.get('end_date')}")
2446
+ print(f"Optimization Metric: {optimization_metric}")
2447
+ print(f"Min Trades: {min_trades}")
2448
+ print(f"🕐 Started: {start_time_str}")
2449
+ if max_drawdown_limit:
2450
+ print(f"Max Drawdown Limit: {max_drawdown_limit*100:.0f}%")
2451
+ print("="*80 + "\n")
2452
+
2453
+ # Generate all combinations
2454
+ param_names = list(param_grid.keys())
2455
+ param_values = list(param_grid.values())
2456
+ all_combinations = list(product(*param_values))
2457
+
2458
+ total_combinations = len(all_combinations)
2459
+ print(f"Testing {total_combinations} parameter combinations...")
2460
+ print(f"Parameters: {param_names}")
2461
+ print(f"Grid: {param_grid}\n")
2462
+
2463
+ # Create SHARED progress context for all backtests
2464
+ try:
2465
+ from IPython.display import display
2466
+ import ipywidgets as widgets
2467
+
2468
+ progress_bar = widgets.FloatProgress(
2469
+ value=0, min=0, max=100,
2470
+ description='Optimizing:',
2471
+ bar_style='info',
2472
+ layout=widgets.Layout(width='100%', height='30px')
2473
+ )
2474
+
2475
+ status_label = widgets.HTML(value="<b>Starting optimization...</b>")
2476
+ display(widgets.VBox([progress_bar, status_label]))
2477
+
2478
+ monitor = ResourceMonitor()
2479
+ opt_start_time = time.time()
2480
+
2481
+ # Create shared progress context (will suppress individual backtest progress)
2482
+ shared_progress = {
2483
+ 'progress_widgets': (progress_bar, status_label, monitor, opt_start_time),
2484
+ 'is_optimization': True
2485
+ }
2486
+ has_widgets = True
2487
+ except:
2488
+ shared_progress = None
2489
+ has_widgets = False
2490
+ print("Running optimization (no progress bar)...")
2491
+
2492
+ # ═══════════════════════════════════════════════════════════════════════════
2493
+ # PRELOAD DATA ONCE (FOR ALL OPTIMIZATION ITERATIONS)
2494
+ # ═══════════════════════════════════════════════════════════════════════════
2495
+ print("\n" + "="*80)
2496
+ print("📥 PRELOADING OPTIONS DATA (loads ONCE, reused for all combinations)")
2497
+ print("="*80)
2498
+
2499
+ preloaded_lean_df, preloaded_options_cache = preload_options_data(
2500
+ base_config,
2501
+ progress_widgets=shared_progress['progress_widgets'] if shared_progress else None
2502
+ )
2503
+
2504
+ if preloaded_lean_df.empty:
2505
+ print("\n❌ ERROR: Failed to preload data. Cannot proceed with optimization.")
2506
+ return pd.DataFrame(), None
2507
+
2508
+ print(f"✓ Preloading complete! Data will be reused for all {total_combinations} combinations")
2509
+ print("="*80 + "\n")
2510
+
2511
+ # ═══════════════════════════════════════════════════════════════════════════
2512
+ # RESET PROGRESS BAR FOR OPTIMIZATION LOOP
2513
+ # ═══════════════════════════════════════════════════════════════════════════
2514
+ if has_widgets:
2515
+ progress_bar.value = 0
2516
+ progress_bar.bar_style = 'info'
2517
+ status_label.value = "<b style='color:#0066cc'>Starting optimization loop...</b>"
2518
+
2519
+ # Run backtests
2520
+ results = []
2521
+ start_time = time.time()
2522
+
2523
+ for idx, param_combo in enumerate(all_combinations, 1):
2524
+ # Create test config
2525
+ test_config = base_config.copy()
2526
+
2527
+ # Update parameters
2528
+ for param_name, param_value in zip(param_names, param_combo):
2529
+ test_config[param_name] = param_value
2530
+
2531
+ # Update name
2532
+ param_str = "_".join([f"{k}={v}" for k, v in zip(param_names, param_combo)])
2533
+ test_config['strategy_name'] = f"{base_config.get('strategy_name', 'Strategy')} [{param_str}]"
2534
+
2535
+ # ═══ ADD PRELOADED DATA TO CONFIG ═══
2536
+ test_config['_preloaded_lean_df'] = preloaded_lean_df
2537
+ test_config['_preloaded_options_cache'] = preloaded_options_cache
2538
+
2539
+ # Update progress
2540
+ if has_widgets:
2541
+ # Use update_progress for full display with ETA, CPU, RAM
2542
+ update_progress(
2543
+ progress_bar, status_label, monitor,
2544
+ current=idx,
2545
+ total=total_combinations,
2546
+ start_time=start_time,
2547
+ message=f"Testing: {param_str}"
2548
+ )
2549
+ else:
2550
+ if idx % max(1, total_combinations // 10) == 0:
2551
+ print(f"[{idx}/{total_combinations}] {param_str}")
2552
+
2553
+ # ═══ ИЗМЕНИТЬ ВЫЗОВ run_backtest (строки ~2240-2248) ═══
2554
+ try:
2555
+ # Create compact parameter string (e.g., Z1.0_E0.1_PT20)
2556
+ param_parts = []
2557
+ for name, value in zip(param_names, param_combo):
2558
+ if 'z_score_entry' in name:
2559
+ param_parts.append(f"Z{value}")
2560
+ elif 'z_score_exit' in name:
2561
+ param_parts.append(f"E{value}")
2562
+ elif 'profit_target' in name:
2563
+ if value is None:
2564
+ param_parts.append("PTNo")
2565
+ else:
2566
+ param_parts.append(f"PT{int(value*100)}")
2567
+ elif 'min_days' in name:
2568
+ param_parts.append(f"D{value}")
2569
+ else:
2570
+ # Generic short name for other params
2571
+ short_name = ''.join([c for c in name if c.isupper() or c.isdigit()])[:3]
2572
+ param_parts.append(f"{short_name}{value}")
2573
+
2574
+ compact_params = "_".join(param_parts)
2575
+
2576
+ # Create combo folder: c01_Z1.0_E0.1_PT20
2577
+ combo_folder = os.path.join(results_folder, f'c{idx:02d}_{compact_params}')
2578
+ os.makedirs(combo_folder, exist_ok=True)
2579
+
2580
+ # File prefix: c01_Z1.0_E0.1_PT20
2581
+ combo_prefix = f"c{idx:02d}_{compact_params}"
2582
+
2583
+ # Run backtest WITH EXPORT AND CHARTS (saved but not displayed)
2584
+ analyzer = run_backtest(
2585
+ strategy_function,
2586
+ test_config,
2587
+ print_report=False,
2588
+ create_charts=export_each_combo, # ← СОЗДАЕМ ГРАФИКИ (сохраняются, но не показываются)
2589
+ export_results=export_each_combo, # ← ИЗМЕНЕНО
2590
+ progress_context=shared_progress,
2591
+ chart_filename=os.path.join(combo_folder, 'equity_curve.png') if export_each_combo else None, # ← ГРАФИКИ СОХРАНЯЮТСЯ
2592
+ export_prefix=os.path.join(combo_folder, combo_prefix) if export_each_combo else None # ← ДОБАВЛЕНО
2593
+ )
2594
+
2595
+ # Check validity
2596
+ is_valid = True
2597
+ invalid_reason = ""
2598
+
2599
+ if analyzer.metrics['total_trades'] < min_trades:
2600
+ is_valid = False
2601
+ invalid_reason = f"Too few trades ({analyzer.metrics['total_trades']})"
2602
+
2603
+ if max_drawdown_limit and analyzer.metrics['max_drawdown'] > (max_drawdown_limit * 100):
2604
+ is_valid = False
2605
+ invalid_reason = f"Excessive drawdown ({analyzer.metrics['max_drawdown']:.1f}%)"
2606
+
2607
+ # Print compact statistics for this combination
2608
+ status_symbol = "✓" if is_valid else "✗"
2609
+ status_color = "#00cc00" if is_valid else "#ff6666"
2610
+
2611
+ print(f"\n[{idx}/{total_combinations}] {param_str}")
2612
+ print("-" * 80)
2613
+ if is_valid:
2614
+ print(f" {status_symbol} Return: {analyzer.metrics['total_return']:>7.2f}% | "
2615
+ f"Sharpe: {analyzer.metrics['sharpe']:>6.2f} | "
2616
+ f"Max DD: {analyzer.metrics['max_drawdown']:>6.2f}% | "
2617
+ f"Trades: {analyzer.metrics['total_trades']:>3} | "
2618
+ f"Win Rate: {analyzer.metrics['win_rate']:>5.1f}% | "
2619
+ f"PF: {analyzer.metrics['profit_factor']:>5.2f}")
2620
+ else:
2621
+ print(f" {status_symbol} INVALID: {invalid_reason}")
2622
+
2623
+ # Update widget status with last result
2624
+ if has_widgets:
2625
+ result_text = f"Return: {analyzer.metrics['total_return']:.1f}% | Sharpe: {analyzer.metrics['sharpe']:.2f}" if is_valid else invalid_reason
2626
+
2627
+ # Get resource usage
2628
+ cpu_pct = monitor.get_cpu_percent()
2629
+ mem_info = monitor.get_memory_info()
2630
+ ram_mb = mem_info[0] # process_mb
2631
+ resource_text = f"CPU: {cpu_pct:.0f}% | RAM: {ram_mb:.0f}MB"
2632
+
2633
+ status_label.value = (
2634
+ f"<b style='color:{status_color}'>[{idx}/{total_combinations}] {param_str}</b><br>"
2635
+ f"<span style='color:#666'>{result_text}</span><br>"
2636
+ f"<span style='color:#999;font-size:10px'>{resource_text}</span>"
2637
+ )
2638
+
2639
+ # Store results
2640
+ result = {
2641
+ 'combination_id': idx,
2642
+ 'is_valid': is_valid,
2643
+ 'invalid_reason': invalid_reason,
2644
+ **{name: value for name, value in zip(param_names, param_combo)},
2645
+ 'total_return': analyzer.metrics['total_return'],
2646
+ 'sharpe': analyzer.metrics['sharpe'],
2647
+ 'sortino': analyzer.metrics['sortino'],
2648
+ 'calmar': analyzer.metrics['calmar'],
2649
+ 'max_drawdown': analyzer.metrics['max_drawdown'],
2650
+ 'win_rate': analyzer.metrics['win_rate'],
2651
+ 'profit_factor': analyzer.metrics['profit_factor'],
2652
+ 'total_trades': analyzer.metrics['total_trades'],
2653
+ 'avg_win': analyzer.metrics['avg_win'],
2654
+ 'avg_loss': analyzer.metrics['avg_loss'],
2655
+ 'volatility': analyzer.metrics['volatility'],
2656
+ }
2657
+
2658
+ results.append(result)
2659
+
2660
+ # Show intermediate summary every 10 combinations (or at end)
2661
+ if idx % 10 == 0 or idx == total_combinations:
2662
+ valid_so_far = [r for r in results if r['is_valid']]
2663
+ if valid_so_far:
2664
+ print("\n" + "="*80)
2665
+ print(f"INTERMEDIATE SUMMARY ({idx}/{total_combinations} tested)")
2666
+ print("="*80)
2667
+
2668
+ # Sort by optimization metric
2669
+ if optimization_metric == 'sharpe':
2670
+ valid_so_far.sort(key=lambda x: x['sharpe'], reverse=True)
2671
+ elif optimization_metric == 'total_return':
2672
+ valid_so_far.sort(key=lambda x: x['total_return'], reverse=True)
2673
+ elif optimization_metric == 'profit_factor':
2674
+ valid_so_far.sort(key=lambda x: x['profit_factor'], reverse=True)
2675
+ elif optimization_metric == 'calmar':
2676
+ valid_so_far.sort(key=lambda x: x['calmar'], reverse=True)
2677
+
2678
+ # Show top 3
2679
+ print(f"\n🏆 TOP 3 BY {optimization_metric.upper()}:")
2680
+ print("-"*80)
2681
+ for rank, res in enumerate(valid_so_far[:3], 1):
2682
+ params_display = ", ".join([f"{name}={res[name]}" for name in param_names])
2683
+ print(f" {rank}. [{params_display}]")
2684
+ print(f" Return: {res['total_return']:>7.2f}% | "
2685
+ f"Sharpe: {res['sharpe']:>6.2f} | "
2686
+ f"Max DD: {res['max_drawdown']:>6.2f}% | "
2687
+ f"Trades: {res['total_trades']:>3}")
2688
+
2689
+ print(f"\nValid: {len(valid_so_far)}/{idx} | "
2690
+ f"Invalid: {idx - len(valid_so_far)}/{idx}")
2691
+ print("="*80 + "\n")
2692
+
2693
+ except Exception as e:
2694
+ print(f"\n[{idx}/{total_combinations}] {param_str}")
2695
+ print("-" * 80)
2696
+ print(f" ✗ ERROR: {str(e)[:100]}")
2697
+
2698
+ result = {
2699
+ 'combination_id': idx,
2700
+ 'is_valid': False,
2701
+ 'invalid_reason': f"Error: {str(e)[:50]}",
2702
+ **{name: value for name, value in zip(param_names, param_combo)},
2703
+ 'total_return': 0, 'sharpe': 0, 'sortino': 0, 'calmar': 0,
2704
+ 'max_drawdown': 0, 'win_rate': 0, 'profit_factor': 0,
2705
+ 'total_trades': 0, 'avg_win': 0, 'avg_loss': 0, 'volatility': 0
2706
+ }
2707
+ results.append(result)
2708
+
2709
+ elapsed = time.time() - start_time
2710
+
2711
+ if has_widgets:
2712
+ progress_bar.value = 100
2713
+ progress_bar.bar_style = 'success'
2714
+ status_label.value = f"<b style='color:#00cc00'>✓ Optimization complete in {int(elapsed)}s</b>"
2715
+
2716
+ # Create results DataFrame
2717
+ results_df = pd.DataFrame(results)
2718
+
2719
+ # Round numeric columns to 2 decimals
2720
+ numeric_columns = results_df.select_dtypes(include=['float64', 'float32', 'float']).columns
2721
+ for col in numeric_columns:
2722
+ results_df[col] = results_df[col].round(5)
2723
+
2724
+ # ═══ ДОБАВИТЬ СОХРАНЕНИЕ SUMMARY В ПАПКУ ═══
2725
+ summary_path = os.path.join(results_folder, 'optimization_summary.csv')
2726
+ results_df.to_csv(summary_path, index=False)
2727
+ print(f"\n✓ Summary saved: {summary_path}")
2728
+
2729
+ # Find best parameters
2730
+ valid_results = results_df[results_df['is_valid'] == True].copy()
2731
+
2732
+ if len(valid_results) == 0:
2733
+ print("\n" + "="*80)
2734
+ print("WARNING: No valid combinations found!")
2735
+ print("Try relaxing constraints or checking parameter ranges")
2736
+ print("="*80)
2737
+ return results_df, None, results_folder
2738
+
2739
+ # Select best based on metric
2740
+ if optimization_metric == 'sharpe':
2741
+ best_idx = valid_results['sharpe'].idxmax()
2742
+ elif optimization_metric == 'total_return':
2743
+ best_idx = valid_results['total_return'].idxmax()
2744
+ elif optimization_metric == 'profit_factor':
2745
+ best_idx = valid_results['profit_factor'].idxmax()
2746
+ elif optimization_metric == 'calmar':
2747
+ best_idx = valid_results['calmar'].idxmax()
2748
+ else:
2749
+ best_idx = valid_results['sharpe'].idxmax()
2750
+
2751
+ best_result = valid_results.loc[best_idx]
2752
+
2753
+ # Extract best parameters
2754
+ best_params = {name: best_result[name] for name in param_names}
2755
+
2756
+ # Calculate total time
2757
+ optimization_end_time = datetime.now()
2758
+ total_duration = optimization_end_time - optimization_start_time
2759
+ end_time_str = optimization_end_time.strftime('%Y-%m-%d %H:%M:%S')
2760
+ duration_str = format_time(total_duration.total_seconds())
2761
+
2762
+ # Print summary
2763
+ print("="*80)
2764
+ print(" "*20 + "OPTIMIZATION COMPLETE")
2765
+ print("="*80)
2766
+ print(f"\nTotal Combinations Tested: {total_combinations}")
2767
+ print(f"Valid Combinations: {len(valid_results)}")
2768
+ print(f"Invalid Combinations: {len(results_df) - len(valid_results)}")
2769
+ print(f"🕐 Started: {start_time_str}")
2770
+ print(f"🕐 Finished: {end_time_str}")
2771
+ print(f"⏱️ Total Duration: {duration_str}")
2772
+
2773
+ print(f"\n{'='*80}")
2774
+ print(" "*20 + "BEST PARAMETERS")
2775
+ print("="*80)
2776
+ for param_name, param_value in best_params.items():
2777
+ print(f"{param_name:25s}: {param_value}")
2778
+
2779
+ print(f"\n{'='*80}")
2780
+ print(" "*20 + "BEST PERFORMANCE")
2781
+ print("="*80)
2782
+ print(f"Total Return: {best_result['total_return']:>10.2f}%")
2783
+ print(f"Sharpe Ratio: {best_result['sharpe']:>10.2f}")
2784
+ print(f"Sortino Ratio: {best_result['sortino']:>10.2f}")
2785
+ print(f"Calmar Ratio: {best_result['calmar']:>10.2f}")
2786
+ print(f"Max Drawdown: {best_result['max_drawdown']:>10.2f}%")
2787
+ print(f"Win Rate: {best_result['win_rate']:>10.1f}%")
2788
+ print(f"Profit Factor: {best_result['profit_factor']:>10.2f}")
2789
+ print(f"Total Trades: {best_result['total_trades']:>10.0f}")
2790
+ print(f"Avg Win: ${best_result['avg_win']:>10.2f}")
2791
+ print(f"Avg Loss: ${best_result['avg_loss']:>10.2f}")
2792
+ print("="*80)
2793
+
2794
+ # ═══════════════════════════════════════════════════════════════════════════
2795
+ # НОВОЕ! ПОЛНЫЙ БЭКТЕСТ ЛУЧШЕЙ КОМБИНАЦИИ СО ВСЕМИ ГРАФИКАМИ
2796
+ # ═══════════════════════════════════════════════════════════════════════════
2797
+ print("\n" + "="*80)
2798
+ print(" "*15 + "RUNNING FULL BACKTEST FOR BEST COMBINATION")
2799
+ print("="*80)
2800
+ print("\n📊 Creating detailed report for best combination...")
2801
+ print(f"Parameters: {', '.join([f'{k}={v}' for k, v in best_params.items()])}\n")
2802
+
2803
+ # Create config for best combination
2804
+ best_config = base_config.copy()
2805
+ best_config.update(best_params)
2806
+ best_config['_preloaded_lean_df'] = preloaded_lean_df
2807
+ best_config['_preloaded_options_cache'] = preloaded_options_cache
2808
+
2809
+ # Create folder for best combination
2810
+ best_combo_folder = os.path.join(results_folder, 'best_combination')
2811
+ os.makedirs(best_combo_folder, exist_ok=True)
2812
+
2813
+ # Run FULL backtest with ALL charts and exports
2814
+ # Note: progress_context=None, so plt.show() will be called but fail due to renderer
2815
+ # We'll display charts explicitly afterwards using IPython.display.Image
2816
+ best_analyzer = run_backtest(
2817
+ strategy_function,
2818
+ best_config,
2819
+ print_report=True, # ← ПОКАЗЫВАЕМ ПОЛНЫЙ ОТЧЕТ
2820
+ create_charts=True, # ← СОЗДАЕМ ВСЕ ГРАФИКИ
2821
+ export_results=True, # ← ЭКСПОРТИРУЕМ ВСЕ ФАЙЛЫ
2822
+ progress_context=None, # ← Обычный режим
2823
+ chart_filename=os.path.join(best_combo_folder, 'equity_curve.png'),
2824
+ export_prefix=os.path.join(best_combo_folder, 'best')
2825
+ )
2826
+
2827
+ # Save detailed metrics to optimization_metrics.csv
2828
+ metrics_data = {
2829
+ 'metric': list(best_analyzer.metrics.keys()),
2830
+ 'value': list(best_analyzer.metrics.values())
2831
+ }
2832
+ metrics_df = pd.DataFrame(metrics_data)
2833
+ metrics_path = os.path.join(results_folder, 'optimization_metrics.csv')
2834
+ metrics_df.to_csv(metrics_path, index=False)
2835
+
2836
+ print(f"\n✓ Detailed metrics saved: {metrics_path}")
2837
+ print(f"✓ Best combination results saved to: {best_combo_folder}/")
2838
+
2839
+ # ═══════════════════════════════════════════════════════════════════════════
2840
+ # ОТОБРАЖЕНИЕ ГРАФИКОВ ЛУЧШЕЙ КОМБИНАЦИИ В NOTEBOOK
2841
+ # ═══════════════════════════════════════════════════════════════════════════
2842
+ try:
2843
+ # Charts are displayed in the notebook, not here
2844
+ chart_file = os.path.join(best_combo_folder, 'equity_curve.png')
2845
+ if os.path.exists(chart_file):
2846
+ print(f"\n📈 Best combination charts saved to: {chart_file}")
2847
+ except Exception as e:
2848
+ print(f"\n⚠ Could not display charts (saved to {best_combo_folder}/): {e}")
2849
+
2850
+ print("="*80 + "\n")
2851
+
2852
+ return results_df, best_params, results_folder
2853
+
2854
+
2855
+ def plot_optimization_results(results_df, param_names, filename='optimization_results.png'):
2856
+ """
2857
+ Create visualization of optimization results
2858
+
2859
+ Args:
2860
+ results_df: Results DataFrame from optimize_parameters()
2861
+ param_names: List of parameter names
2862
+ filename: Output filename
2863
+ """
2864
+ import matplotlib.pyplot as plt
2865
+ import seaborn as sns
2866
+
2867
+ valid_results = results_df[results_df['is_valid'] == True].copy()
2868
+
2869
+ if valid_results.empty:
2870
+ print("No valid results to plot")
2871
+ return
2872
+
2873
+ sns.set_style("whitegrid")
2874
+
2875
+ fig = plt.figure(figsize=(18, 12))
2876
+
2877
+ # 1. Sharpe vs Total Return scatter
2878
+ ax1 = plt.subplot(2, 3, 1)
2879
+ scatter = ax1.scatter(
2880
+ valid_results['total_return'],
2881
+ valid_results['sharpe'],
2882
+ c=valid_results['max_drawdown'],
2883
+ s=valid_results['total_trades']*10,
2884
+ alpha=0.6,
2885
+ cmap='RdYlGn_r'
2886
+ )
2887
+ ax1.set_xlabel('Total Return (%)', fontsize=10)
2888
+ ax1.set_ylabel('Sharpe Ratio', fontsize=10)
2889
+ ax1.set_title('Sharpe vs Return (size=trades, color=drawdown)', fontsize=11, fontweight='bold')
2890
+ plt.colorbar(scatter, ax=ax1, label='Max Drawdown (%)')
2891
+ ax1.grid(True, alpha=0.3)
2892
+
2893
+ # 2. Parameter heatmap (if 2 parameters)
2894
+ if len(param_names) == 2:
2895
+ ax2 = plt.subplot(2, 3, 2)
2896
+ pivot_data = valid_results.pivot_table(
2897
+ values='sharpe',
2898
+ index=param_names[0],
2899
+ columns=param_names[1],
2900
+ aggfunc='mean'
2901
+ )
2902
+ sns.heatmap(pivot_data, annot=True, fmt='.2f', cmap='RdYlGn', ax=ax2)
2903
+ ax2.set_title(f'Sharpe Ratio Heatmap', fontsize=11, fontweight='bold')
2904
+ else:
2905
+ ax2 = plt.subplot(2, 3, 2)
2906
+ ax2.text(0.5, 0.5, 'Heatmap requires\nexactly 2 parameters',
2907
+ ha='center', va='center', fontsize=12)
2908
+ ax2.axis('off')
2909
+
2910
+ # 3. Win Rate vs Profit Factor
2911
+ ax3 = plt.subplot(2, 3, 3)
2912
+ scatter3 = ax3.scatter(
2913
+ valid_results['win_rate'],
2914
+ valid_results['profit_factor'],
2915
+ c=valid_results['sharpe'],
2916
+ s=100,
2917
+ alpha=0.6,
2918
+ cmap='viridis'
2919
+ )
2920
+ ax3.set_xlabel('Win Rate (%)', fontsize=10)
2921
+ ax3.set_ylabel('Profit Factor', fontsize=10)
2922
+ ax3.set_title('Win Rate vs Profit Factor (color=Sharpe)', fontsize=11, fontweight='bold')
2923
+ plt.colorbar(scatter3, ax=ax3, label='Sharpe Ratio')
2924
+ ax3.grid(True, alpha=0.3)
2925
+
2926
+ # 4. Distribution of Sharpe Ratios
2927
+ ax4 = plt.subplot(2, 3, 4)
2928
+ ax4.hist(valid_results['sharpe'], bins=20, color='steelblue', alpha=0.7, edgecolor='black')
2929
+ ax4.axvline(valid_results['sharpe'].mean(), color='red', linestyle='--', linewidth=2, label='Mean')
2930
+ ax4.axvline(valid_results['sharpe'].median(), color='green', linestyle='--', linewidth=2, label='Median')
2931
+ ax4.set_xlabel('Sharpe Ratio', fontsize=10)
2932
+ ax4.set_ylabel('Frequency', fontsize=10)
2933
+ ax4.set_title('Distribution of Sharpe Ratios', fontsize=11, fontweight='bold')
2934
+ ax4.legend()
2935
+ ax4.grid(True, alpha=0.3, axis='y')
2936
+
2937
+ # 5. Total Trades distribution
2938
+ ax5 = plt.subplot(2, 3, 5)
2939
+ ax5.hist(valid_results['total_trades'], bins=15, color='coral', alpha=0.7, edgecolor='black')
2940
+ ax5.set_xlabel('Total Trades', fontsize=10)
2941
+ ax5.set_ylabel('Frequency', fontsize=10)
2942
+ ax5.set_title('Distribution of Trade Counts', fontsize=11, fontweight='bold')
2943
+ ax5.grid(True, alpha=0.3, axis='y')
2944
+
2945
+ # 6. Top 10 combinations
2946
+ ax6 = plt.subplot(2, 3, 6)
2947
+ top_10 = valid_results.nlargest(10, 'sharpe')[['combination_id', 'sharpe']].sort_values('sharpe')
2948
+ ax6.barh(range(len(top_10)), top_10['sharpe'], color='green', alpha=0.7)
2949
+ ax6.set_yticks(range(len(top_10)))
2950
+ ax6.set_yticklabels([f"#{int(x)}" for x in top_10['combination_id']])
2951
+ ax6.set_xlabel('Sharpe Ratio', fontsize=10)
2952
+ ax6.set_title('Top 10 Combinations by Sharpe', fontsize=11, fontweight='bold')
2953
+ ax6.grid(True, alpha=0.3, axis='x')
2954
+
2955
+ plt.tight_layout()
2956
+ plt.savefig(filename, dpi=150, bbox_inches='tight')
2957
+ print(f"\nVisualization saved: {filename}")
2958
+ plt.close() # Закрываем без показа, так как отображаем через display(Image)
2959
+
2960
+
2961
+ # Export all
1028
2962
  __all__ = [
1029
2963
  'BacktestResults', 'BacktestAnalyzer', 'ResultsReporter',
1030
- 'ChartGenerator', 'ResultsExporter', 'run_backtest',
2964
+ 'ChartGenerator', 'ResultsExporter', 'run_backtest', 'run_backtest_with_stoploss',
1031
2965
  'init_api', 'api_call', 'APIHelper', 'APIManager',
1032
- 'ResourceMonitor', 'create_progress_bar', 'update_progress', 'format_time'
2966
+ 'ResourceMonitor', 'create_progress_bar', 'update_progress', 'format_time',
2967
+ 'StopLossManager', 'PositionManager', 'StopLossConfig',
2968
+ 'calculate_stoploss_metrics', 'print_stoploss_section', 'create_stoploss_charts',
2969
+ 'create_stoploss_comparison_chart',
2970
+ 'optimize_parameters', 'plot_optimization_results',
2971
+ 'create_optimization_folder',
2972
+ 'preload_options_data' # ← ДОБАВЛЕНО
1033
2973
  ]