ivolatility-backtesting 1.2.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ivolatility-backtesting might be problematic. Click here for more details.

@@ -1,25 +1,38 @@
1
1
  """
2
2
  ivolatility_backtesting.py - UNIVERSAL BACKTEST FRAMEWORK
3
- Version 2.0 - With DataFrame/Dict Auto-Normalization
3
+ Version 4.1 - Dual RAM Display (Process + Container)
4
4
 
5
5
  Key Features:
6
- - Handles both dict and DataFrame API responses automatically
7
- - Universal BacktestResults interface
8
- - 30+ calculated metrics
9
- - Automatic reporting, charts, and exports
10
- - One-command runner: run_backtest()
6
+ - ResourceMonitor: CPU/RAM tracking (cgroups v2 + psutil fallback)
7
+ - Enhanced progress bar with ETA, CPU%, RAM
8
+ - Shows BOTH Python process RAM AND container total RAM
9
+ - api_call(): Auto-normalization for dict/DataFrame responses
10
+ - 30+ metrics, charts, exports
11
+ - One-command: run_backtest()
11
12
 
12
13
  Usage:
13
14
  from ivolatility_backtesting import *
14
15
 
15
- # Initialize API
16
16
  init_api(os.getenv("API_KEY"))
17
17
 
18
- # Use api_call for normalized responses
19
- data = api_call('/equities/eod/stock-prices', symbol='AAPL', from_='2024-01-01')
18
+ data = api_call('/equities/eod/stock-prices',
19
+ symbol='AAPL',
20
+ from_='2024-01-01',
21
+ to='2024-12-31',
22
+ debug=True)
20
23
 
21
- # Run backtest
22
24
  analyzer = run_backtest(my_strategy, CONFIG)
25
+
26
+ Resource Monitoring:
27
+ - CPU: Process CPU % (smoothed over 5 readings)
28
+ - RAM: Shows BOTH metrics when in container:
29
+ * Green: Python process memory (your strategy)
30
+ * Blue: Total container memory (includes Jupyter, cache, etc.)
31
+
32
+ Progress Display Example:
33
+ Processing 2024-07-30 (144/252)
34
+ ETA: 5m 23s | CPU: 46.8% | RAM: 856MB (42%) Python | 1280MB (64%) Container
35
+ Container: 1.0 cores, 2.0GB limit
23
36
  """
24
37
 
25
38
  import pandas as pd
@@ -32,26 +45,68 @@ import os
32
45
  import time
33
46
  import psutil
34
47
 
35
- # Set style
36
48
  sns.set_style('darkgrid')
37
49
  plt.rcParams['figure.figsize'] = (15, 8)
38
50
 
39
51
 
40
52
  # ============================================================
41
- # RESOURCE MONITOR - NEW!
53
+ # RESOURCE MONITOR
42
54
  # ============================================================
43
55
  class ResourceMonitor:
44
- """Monitor CPU and RAM using cgroups v2 and psutil fallback"""
56
+ """Monitor CPU and RAM - shows PROCESS resources (Python), not full container"""
45
57
 
46
- def __init__(self):
58
+ def __init__(self, show_container_total=False):
47
59
  self.process = psutil.Process()
48
60
  self.cpu_count = psutil.cpu_count()
49
61
  self.last_cpu_time = None
50
62
  self.last_check_time = None
51
63
  self.use_cgroups = self._check_cgroups_v2()
64
+ self.show_container_total = show_container_total # False = process RAM, True = container RAM
65
+
66
+ # CPU smoothing for more stable readings
67
+ self.cpu_history = []
68
+ self.cpu_history_max = 5 # Average over last 5 readings
69
+
70
+ # Determine actual CPU quota for containers
71
+ if self.use_cgroups:
72
+ quota = self._read_cpu_quota()
73
+ if quota and quota > 0:
74
+ self.cpu_count = quota # Override with container quota
75
+
76
+ self.context = "Container" if self.use_cgroups else "Host"
77
+
78
+ def _read_cpu_quota(self):
79
+ """Read CPU quota from cgroups v2 (returns cores, e.g., 1.5)"""
80
+ try:
81
+ with open('/sys/fs/cgroup/cpu.max', 'r') as f:
82
+ line = f.read().strip()
83
+ if line == 'max':
84
+ return None # No limit
85
+ parts = line.split()
86
+ if len(parts) == 2:
87
+ quota = int(parts[0]) # microseconds
88
+ period = int(parts[1]) # microseconds
89
+ return quota / period # cores (e.g., 100000/100000 = 1.0)
90
+ except:
91
+ pass
92
+ return None
93
+
94
+ def get_context_info(self):
95
+ """Returns monitoring context and resource limits"""
96
+ if self.use_cgroups:
97
+ current, max_mem = self._read_cgroup_memory()
98
+ ram_info = ""
99
+ if max_mem:
100
+ max_mem_gb = max_mem / (1024**3)
101
+ ram_info = f", {max_mem_gb:.1f}GB limit"
102
+
103
+ mem_type = "container total" if self.show_container_total else "process only"
104
+ return f"Container (CPU: {self.cpu_count:.1f} cores{ram_info}) - RAM: {mem_type}"
105
+ else:
106
+ total_ram_gb = psutil.virtual_memory().total / (1024**3)
107
+ return f"Host ({self.cpu_count} cores, {total_ram_gb:.0f}GB RAM) - RAM: process"
52
108
 
53
109
  def _check_cgroups_v2(self):
54
- """Check if cgroups v2 is available"""
55
110
  try:
56
111
  return os.path.exists('/sys/fs/cgroup/cpu.stat') and \
57
112
  os.path.exists('/sys/fs/cgroup/memory.current')
@@ -59,7 +114,6 @@ class ResourceMonitor:
59
114
  return False
60
115
 
61
116
  def _read_cgroup_cpu(self):
62
- """Read CPU usage from cgroups v2"""
63
117
  try:
64
118
  with open('/sys/fs/cgroup/cpu.stat', 'r') as f:
65
119
  for line in f:
@@ -70,7 +124,6 @@ class ResourceMonitor:
70
124
  return None
71
125
 
72
126
  def _read_cgroup_memory(self):
73
- """Read memory usage from cgroups v2"""
74
127
  try:
75
128
  with open('/sys/fs/cgroup/memory.current', 'r') as f:
76
129
  current = int(f.read().strip())
@@ -86,7 +139,7 @@ class ResourceMonitor:
86
139
  return None, None
87
140
 
88
141
  def get_cpu_percent(self):
89
- """Get CPU usage percentage with cgroups v2 fallback to psutil"""
142
+ """Get CPU% with smoothing - shows container limits if in container, host if not"""
90
143
  if self.use_cgroups:
91
144
  current_time = time.time()
92
145
  current_cpu = self._read_cgroup_cpu()
@@ -96,58 +149,95 @@ class ResourceMonitor:
96
149
  cpu_delta = current_cpu - self.last_cpu_time
97
150
 
98
151
  if time_delta > 0:
99
- # Convert microseconds to percentage
152
+ # Calculate based on container CPU quota
100
153
  cpu_percent = (cpu_delta / (time_delta * 1_000_000)) * 100
154
+
155
+ # Clamp to container limits
101
156
  cpu_percent = min(cpu_percent, 100 * self.cpu_count)
102
157
 
158
+ # Add to history for smoothing
159
+ self.cpu_history.append(cpu_percent)
160
+ if len(self.cpu_history) > self.cpu_history_max:
161
+ self.cpu_history.pop(0)
162
+
103
163
  self.last_cpu_time = current_cpu
104
164
  self.last_check_time = current_time
105
165
 
106
- return round(cpu_percent, 1)
166
+ # Return smoothed average
167
+ return round(sum(self.cpu_history) / len(self.cpu_history), 1)
107
168
 
108
169
  self.last_cpu_time = current_cpu
109
170
  self.last_check_time = current_time
110
171
 
111
- # Fallback to psutil
172
+ # Fallback: host resources with smoothing
112
173
  try:
113
174
  cpu = self.process.cpu_percent(interval=0.1)
114
- return round(cpu, 1) if cpu > 0 else round(psutil.cpu_percent(interval=0.1), 1)
175
+ if cpu == 0:
176
+ cpu = psutil.cpu_percent(interval=0.1)
177
+
178
+ self.cpu_history.append(cpu)
179
+ if len(self.cpu_history) > self.cpu_history_max:
180
+ self.cpu_history.pop(0)
181
+
182
+ return round(sum(self.cpu_history) / len(self.cpu_history), 1)
115
183
  except:
116
184
  return 0.0
117
185
 
118
186
  def get_memory_info(self):
119
- """Get memory usage (MB and %) with cgroups v2 fallback to psutil"""
120
- if self.use_cgroups:
121
- current, max_mem = self._read_cgroup_memory()
122
- if current and max_mem:
123
- mb = current / (1024 * 1024)
124
- percent = (current / max_mem) * 100
125
- return round(mb, 1), round(percent, 1)
187
+ """
188
+ Get memory usage - returns BOTH process and container/host
126
189
 
127
- # Fallback to psutil
190
+ Returns:
191
+ tuple: (process_mb, process_pct, container_mb, container_pct)
192
+ If no container, container values = process values
193
+ """
128
194
  try:
195
+ # Get process memory (Python only)
129
196
  mem = self.process.memory_info()
130
- mb = mem.rss / (1024 * 1024)
197
+ process_mb = mem.rss / (1024 * 1024)
198
+
199
+ if self.use_cgroups:
200
+ # Get container total and limit
201
+ current, max_mem = self._read_cgroup_memory()
202
+ if max_mem:
203
+ process_percent = (mem.rss / max_mem) * 100
204
+
205
+ if current:
206
+ container_mb = current / (1024 * 1024)
207
+ container_percent = (current / max_mem) * 100
208
+ return (
209
+ round(process_mb, 1),
210
+ round(process_percent, 1),
211
+ round(container_mb, 1),
212
+ round(container_percent, 1)
213
+ )
214
+
215
+ # No container data, return process only
216
+ return (
217
+ round(process_mb, 1),
218
+ round(process_percent, 1),
219
+ round(process_mb, 1),
220
+ round(process_percent, 1)
221
+ )
222
+
223
+ # Host: calculate % of total RAM
131
224
  total = psutil.virtual_memory().total
132
225
  percent = (mem.rss / total) * 100
133
- return round(mb, 1), round(percent, 1)
226
+
227
+ # On host, process = "container" (no container isolation)
228
+ return (
229
+ round(process_mb, 1),
230
+ round(percent, 1),
231
+ round(process_mb, 1),
232
+ round(percent, 1)
233
+ )
234
+
134
235
  except:
135
- return 0.0, 0.0
236
+ return 0.0, 0.0, 0.0, 0.0
136
237
 
137
238
 
138
239
  def create_progress_bar():
139
- """
140
- Create enhanced progress bar with ETA, CPU%, RAM
141
-
142
- Returns:
143
- tuple: (progress_bar, status_label, resource_monitor, start_time)
144
-
145
- Example:
146
- progress_bar, status_label, monitor, start_time = create_progress_bar()
147
-
148
- for i in range(total):
149
- update_progress(progress_bar, status_label, monitor, i, total, start_time)
150
- """
240
+ """Create enhanced progress bar with ETA, CPU%, RAM"""
151
241
  from IPython.display import display
152
242
  import ipywidgets as widgets
153
243
 
@@ -173,21 +263,11 @@ def create_progress_bar():
173
263
 
174
264
  def update_progress(progress_bar, status_label, monitor, current, total, start_time, message="Processing"):
175
265
  """
176
- Update progress bar with ETA, CPU%, RAM
177
-
178
- Args:
179
- progress_bar: Progress widget
180
- status_label: Status HTML widget
181
- monitor: ResourceMonitor instance
182
- current: Current iteration (0-based)
183
- total: Total iterations
184
- start_time: Start timestamp
185
- message: Status message
266
+ Update progress bar with ETA, CPU%, RAM (shows BOTH process and container)
186
267
  """
187
268
  progress = (current / total) * 100
188
269
  progress_bar.value = progress
189
270
 
190
- # Calculate ETA
191
271
  elapsed = time.time() - start_time
192
272
  if current > 0:
193
273
  eta_seconds = (elapsed / current) * (total - current)
@@ -195,14 +275,32 @@ def update_progress(progress_bar, status_label, monitor, current, total, start_t
195
275
  else:
196
276
  eta_str = "calculating..."
197
277
 
198
- # Get resources
199
278
  cpu = monitor.get_cpu_percent()
200
- ram_mb, ram_pct = monitor.get_memory_info()
279
+ process_mb, process_pct, container_mb, container_pct = monitor.get_memory_info()
280
+
281
+ # Build RAM display - show both if different, otherwise just one
282
+ if abs(container_mb - process_mb) > 10: # Significant difference (>10MB)
283
+ ram_display = (
284
+ f"RAM: <span style='color:#4CAF50'>{process_mb}MB ({process_pct}%)</span> Python | "
285
+ f"<span style='color:#2196F3'>{container_mb}MB ({container_pct}%)</span> Container"
286
+ )
287
+ else:
288
+ # Same values (on host or small difference)
289
+ ram_display = f"RAM: {process_mb}MB ({process_pct}%)"
290
+
291
+ # Context info
292
+ if monitor.use_cgroups:
293
+ context_info = f"Container: {monitor.cpu_count:.1f} cores"
294
+ current, max_mem = monitor._read_cgroup_memory()
295
+ if max_mem:
296
+ context_info += f", {max_mem / (1024**3):.1f}GB limit"
297
+ else:
298
+ context_info = f"Host: {monitor.cpu_count} cores"
201
299
 
202
- # Update status
203
300
  status_label.value = (
204
301
  f"<b style='color:#0066cc'>{message} ({current}/{total})</b><br>"
205
- f"<span style='color:#666'>ETA: {eta_str} | CPU: {cpu}% | RAM: {ram_mb}MB ({ram_pct}%)</span>"
302
+ f"<span style='color:#666'>ETA: {eta_str} | CPU: {cpu}% | {ram_display}</span><br>"
303
+ f"<span style='color:#999;font-size:10px'>{context_info}</span>"
206
304
  )
207
305
 
208
306
 
@@ -222,25 +320,15 @@ def format_time(seconds):
222
320
  # API HELPER - AUTOMATIC NORMALIZATION
223
321
  # ============================================================
224
322
  class APIHelper:
225
- """
226
- Normalizes API responses to consistent format
227
- Handles: dict, DataFrame, or None
228
- """
323
+ """Normalizes API responses to consistent format"""
229
324
 
230
325
  @staticmethod
231
326
  def normalize_response(response, debug=False):
232
- """
233
- Convert API response to dict format
234
-
235
- Returns:
236
- dict with 'data' key or None
237
- """
238
327
  if response is None:
239
328
  if debug:
240
329
  print("[APIHelper] Response is None")
241
330
  return None
242
331
 
243
- # Case 1: Dict with 'data' key
244
332
  if isinstance(response, dict):
245
333
  if 'data' in response:
246
334
  if debug:
@@ -251,7 +339,6 @@ class APIHelper:
251
339
  print("[APIHelper] Dict without 'data' key")
252
340
  return None
253
341
 
254
- # Case 2: DataFrame - convert to dict
255
342
  if isinstance(response, pd.DataFrame):
256
343
  if response.empty:
257
344
  if debug:
@@ -263,15 +350,11 @@ class APIHelper:
263
350
  print(f"[APIHelper] DataFrame converted: {len(records)} records")
264
351
  return {'data': records, 'status': 'success'}
265
352
 
266
- # Case 3: Unknown type
267
353
  if debug:
268
354
  print(f"[APIHelper] Unexpected type: {type(response)}")
269
355
  return None
270
356
 
271
357
 
272
- # ============================================================
273
- # API MANAGER
274
- # ============================================================
275
358
  class APIManager:
276
359
  """Centralized API key management"""
277
360
  _api_key = None
@@ -279,7 +362,6 @@ class APIManager:
279
362
 
280
363
  @classmethod
281
364
  def initialize(cls, api_key):
282
- """Initialize API with key"""
283
365
  if not api_key:
284
366
  raise ValueError("API key cannot be empty")
285
367
  cls._api_key = api_key
@@ -288,7 +370,6 @@ class APIManager:
288
370
 
289
371
  @classmethod
290
372
  def get_method(cls, endpoint):
291
- """Get API method with cached instances"""
292
373
  if cls._api_key is None:
293
374
  api_key = os.getenv("API_KEY")
294
375
  if not api_key:
@@ -302,55 +383,20 @@ class APIManager:
302
383
  return cls._methods[endpoint]
303
384
 
304
385
 
305
- # ============================================================
306
- # PUBLIC API FUNCTIONS
307
- # ============================================================
308
386
  def init_api(api_key=None):
309
- """
310
- Initialize IVolatility API
311
-
312
- Example:
313
- init_api("your-api-key")
314
- # or
315
- init_api() # Auto-load from API_KEY env variable
316
- """
387
+ """Initialize IVolatility API"""
317
388
  if api_key is None:
318
389
  api_key = os.getenv("API_KEY")
319
390
  APIManager.initialize(api_key)
320
391
 
321
392
 
322
393
  def api_call(endpoint, debug=False, **kwargs):
323
- """
324
- Make API call with automatic response normalization
325
-
326
- Args:
327
- endpoint: API endpoint path
328
- debug: Enable debug output (prints full URL with API key)
329
- **kwargs: API parameters
330
-
331
- Returns:
332
- dict with 'data' key or None
333
-
334
- Example:
335
- # Automatic handling of dict or DataFrame
336
- data = api_call('/equities/eod/stock-prices',
337
- symbol='AAPL',
338
- from_='2024-01-01',
339
- debug=True)
340
-
341
- if data:
342
- df = pd.DataFrame(data['data'])
343
- """
394
+ """Make API call with automatic response normalization"""
344
395
  try:
345
- # Build full URL for debugging
346
396
  if debug and APIManager._api_key:
347
397
  base_url = "https://restapi.ivolatility.com"
348
-
349
- # Convert Python parameter names to API parameter names
350
- # from_ -> from, to_ -> to
351
398
  url_params = {}
352
399
  for key, value in kwargs.items():
353
- # Remove trailing underscore for reserved Python keywords
354
400
  clean_key = key.rstrip('_') if key.endswith('_') else key
355
401
  url_params[clean_key] = value
356
402
 
@@ -365,7 +411,7 @@ def api_call(endpoint, debug=False, **kwargs):
365
411
  normalized = APIHelper.normalize_response(response, debug=debug)
366
412
 
367
413
  if normalized is None and debug:
368
- print(f"[api_call] Failed to get data")
414
+ print(f"[api_call] Failed to get data")
369
415
  print(f"[api_call] Endpoint: {endpoint}")
370
416
  print(f"[api_call] Params: {kwargs}")
371
417
 
@@ -373,7 +419,7 @@ def api_call(endpoint, debug=False, **kwargs):
373
419
 
374
420
  except Exception as e:
375
421
  if debug:
376
- print(f"[api_call] Exception: {e}")
422
+ print(f"[api_call] Exception: {e}")
377
423
  print(f"[api_call] Endpoint: {endpoint}")
378
424
  print(f"[api_call] Params: {kwargs}")
379
425
  return None
@@ -385,16 +431,9 @@ def api_call(endpoint, debug=False, **kwargs):
385
431
  class BacktestResults:
386
432
  """Universal container for backtest results"""
387
433
 
388
- def __init__(self,
389
- equity_curve,
390
- equity_dates,
391
- trades,
392
- initial_capital,
393
- config,
394
- benchmark_prices=None,
395
- benchmark_symbol='SPY',
396
- daily_returns=None,
397
- debug_info=None):
434
+ def __init__(self, equity_curve, equity_dates, trades, initial_capital,
435
+ config, benchmark_prices=None, benchmark_symbol='SPY',
436
+ daily_returns=None, debug_info=None):
398
437
 
399
438
  self.equity_curve = equity_curve
400
439
  self.equity_dates = equity_dates
@@ -425,24 +464,23 @@ class BacktestResults:
425
464
 
426
465
 
427
466
  # ============================================================
428
- # BACKTEST ANALYZER
467
+ # BACKTEST ANALYZER (30+ METRICS)
429
468
  # ============================================================
430
469
  class BacktestAnalyzer:
431
- """Calculate 30+ metrics from BacktestResults"""
470
+ """Calculate all metrics from BacktestResults"""
432
471
 
433
472
  def __init__(self, results):
434
473
  self.results = results
435
474
  self.metrics = {}
436
-
475
+
437
476
  def calculate_all_metrics(self):
438
- """Calculate all available metrics"""
439
477
  r = self.results
440
478
 
441
479
  # Profitability
442
480
  self.metrics['total_pnl'] = r.final_capital - r.initial_capital
443
481
  self.metrics['total_return'] = (self.metrics['total_pnl'] / r.initial_capital) * 100
444
482
 
445
- # CAGR with zero-division protection
483
+ # CAGR
446
484
  if len(r.equity_dates) > 0:
447
485
  start_date = min(r.equity_dates)
448
486
  end_date = max(r.equity_dates)
@@ -467,12 +505,7 @@ class BacktestAnalyzer:
467
505
  self.metrics['sharpe'] = self._sharpe_ratio(r.daily_returns)
468
506
  self.metrics['sortino'] = self._sortino_ratio(r.daily_returns)
469
507
  self.metrics['max_drawdown'] = r.max_drawdown
470
-
471
- if len(r.daily_returns) > 0:
472
- self.metrics['volatility'] = np.std(r.daily_returns) * np.sqrt(252) * 100
473
- else:
474
- self.metrics['volatility'] = 0
475
-
508
+ self.metrics['volatility'] = np.std(r.daily_returns) * np.sqrt(252) * 100 if len(r.daily_returns) > 0 else 0
476
509
  self.metrics['calmar'] = abs(self.metrics['total_return'] / r.max_drawdown) if r.max_drawdown > 0 else 0
477
510
  self.metrics['omega'] = self._omega_ratio(r.daily_returns)
478
511
  self.metrics['ulcer'] = self._ulcer_index(r.equity_curve)
@@ -496,37 +529,9 @@ class BacktestAnalyzer:
496
529
 
497
530
  # Trading stats
498
531
  if len(r.trades) > 0:
499
- trades_df = pd.DataFrame(r.trades)
500
- winning = trades_df[trades_df['pnl'] > 0]
501
- losing = trades_df[trades_df['pnl'] <= 0]
502
-
503
- self.metrics['total_trades'] = len(trades_df)
504
- self.metrics['winning_trades'] = len(winning)
505
- self.metrics['losing_trades'] = len(losing)
506
- self.metrics['win_rate'] = (len(winning) / len(trades_df)) * 100 if len(trades_df) > 0 else 0
507
-
508
- wins_sum = winning['pnl'].sum() if len(winning) > 0 else 0
509
- losses_sum = abs(losing['pnl'].sum()) if len(losing) > 0 else 0
510
- self.metrics['profit_factor'] = wins_sum / losses_sum if losses_sum > 0 else float('inf')
511
-
512
- self.metrics['avg_win'] = winning['pnl'].mean() if len(winning) > 0 else 0
513
- self.metrics['avg_loss'] = losing['pnl'].mean() if len(losing) > 0 else 0
514
- self.metrics['best_trade'] = trades_df['pnl'].max()
515
- self.metrics['worst_trade'] = trades_df['pnl'].min()
516
-
517
- if len(winning) > 0 and len(losing) > 0:
518
- self.metrics['avg_win_loss_ratio'] = abs(self.metrics['avg_win'] / self.metrics['avg_loss'])
519
- else:
520
- self.metrics['avg_win_loss_ratio'] = 0
521
-
522
- self.metrics['max_win_streak'], self.metrics['max_loss_streak'] = self._win_loss_streaks(r.trades)
532
+ self._calculate_trading_stats(r.trades)
523
533
  else:
524
- self.metrics.update({
525
- 'total_trades': 0, 'winning_trades': 0, 'losing_trades': 0,
526
- 'win_rate': 0, 'profit_factor': 0, 'avg_win': 0, 'avg_loss': 0,
527
- 'best_trade': 0, 'worst_trade': 0, 'avg_win_loss_ratio': 0,
528
- 'max_win_streak': 0, 'max_loss_streak': 0
529
- })
534
+ self._set_empty_trading_stats()
530
535
 
531
536
  # Efficiency
532
537
  running_max = np.maximum.accumulate(r.equity_curve)
@@ -542,6 +547,40 @@ class BacktestAnalyzer:
542
547
 
543
548
  return self.metrics
544
549
 
550
+ def _calculate_trading_stats(self, trades):
551
+ trades_df = pd.DataFrame(trades)
552
+ winning = trades_df[trades_df['pnl'] > 0]
553
+ losing = trades_df[trades_df['pnl'] <= 0]
554
+
555
+ self.metrics['total_trades'] = len(trades_df)
556
+ self.metrics['winning_trades'] = len(winning)
557
+ self.metrics['losing_trades'] = len(losing)
558
+ self.metrics['win_rate'] = (len(winning) / len(trades_df)) * 100 if len(trades_df) > 0 else 0
559
+
560
+ wins_sum = winning['pnl'].sum() if len(winning) > 0 else 0
561
+ losses_sum = abs(losing['pnl'].sum()) if len(losing) > 0 else 0
562
+ self.metrics['profit_factor'] = wins_sum / losses_sum if losses_sum > 0 else float('inf')
563
+
564
+ self.metrics['avg_win'] = winning['pnl'].mean() if len(winning) > 0 else 0
565
+ self.metrics['avg_loss'] = losing['pnl'].mean() if len(losing) > 0 else 0
566
+ self.metrics['best_trade'] = trades_df['pnl'].max()
567
+ self.metrics['worst_trade'] = trades_df['pnl'].min()
568
+
569
+ if len(winning) > 0 and len(losing) > 0:
570
+ self.metrics['avg_win_loss_ratio'] = abs(self.metrics['avg_win'] / self.metrics['avg_loss'])
571
+ else:
572
+ self.metrics['avg_win_loss_ratio'] = 0
573
+
574
+ self.metrics['max_win_streak'], self.metrics['max_loss_streak'] = self._win_loss_streaks(trades)
575
+
576
+ def _set_empty_trading_stats(self):
577
+ self.metrics.update({
578
+ 'total_trades': 0, 'winning_trades': 0, 'losing_trades': 0,
579
+ 'win_rate': 0, 'profit_factor': 0, 'avg_win': 0, 'avg_loss': 0,
580
+ 'best_trade': 0, 'worst_trade': 0, 'avg_win_loss_ratio': 0,
581
+ 'max_win_streak': 0, 'max_loss_streak': 0
582
+ })
583
+
545
584
  def _sharpe_ratio(self, returns):
546
585
  if len(returns) < 2:
547
586
  return 0
@@ -760,7 +799,6 @@ class ResultsReporter:
760
799
  print(f"Beta (vs {r.benchmark_symbol}): {m['beta']:>15.2f} (<1 defensive, >1 aggressive)")
761
800
  print(f"R^2 (vs {r.benchmark_symbol}): {m['r_squared']:>15.2f} (market correlation 0-1)")
762
801
 
763
- # Warning for unrealistic results
764
802
  if abs(m['total_return']) > 200 or m['volatility'] > 150:
765
803
  print()
766
804
  print("⚠️ UNREALISTIC RESULTS DETECTED:")
@@ -809,7 +847,6 @@ class ChartGenerator:
809
847
  @staticmethod
810
848
  def create_all_charts(analyzer, filename='backtest_results.png'):
811
849
  r = analyzer.results
812
- m = analyzer.metrics
813
850
 
814
851
  if len(r.trades) == 0:
815
852
  print("No trades to visualize")
@@ -817,13 +854,12 @@ class ChartGenerator:
817
854
 
818
855
  trades_df = pd.DataFrame(r.trades)
819
856
  fig, axes = plt.subplots(3, 2, figsize=(18, 14))
820
- fig.suptitle('Backtest Results - Comprehensive Analysis',
821
- fontsize=16, fontweight='bold', y=0.995)
857
+ fig.suptitle('Backtest Results', fontsize=16, fontweight='bold', y=0.995)
822
858
 
823
859
  dates = pd.to_datetime(r.equity_dates)
824
860
  equity_array = np.array(r.equity_curve)
825
861
 
826
- # Equity Curve
862
+ # 1. Equity Curve
827
863
  ax1 = axes[0, 0]
828
864
  ax1.plot(dates, equity_array, linewidth=2.5, color='#2196F3')
829
865
  ax1.axhline(y=r.initial_capital, color='gray', linestyle='--', alpha=0.7)
@@ -833,78 +869,66 @@ class ChartGenerator:
833
869
  ax1.fill_between(dates, r.initial_capital, equity_array,
834
870
  where=(equity_array < r.initial_capital),
835
871
  alpha=0.3, color='red', interpolate=True)
836
- ax1.set_title('Portfolio Equity Curve', fontsize=12, fontweight='bold')
872
+ ax1.set_title('Equity Curve', fontsize=12, fontweight='bold')
837
873
  ax1.set_ylabel('Equity ($)')
838
874
  ax1.grid(True, alpha=0.3)
839
- ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, p: f'${x/1000:.0f}K'))
840
875
 
841
- # Drawdown
876
+ # 2. Drawdown
842
877
  ax2 = axes[0, 1]
843
878
  running_max = np.maximum.accumulate(equity_array)
844
879
  drawdown = (equity_array - running_max) / running_max * 100
845
880
  ax2.fill_between(dates, 0, drawdown, alpha=0.6, color='#f44336')
846
881
  ax2.plot(dates, drawdown, color='#d32f2f', linewidth=2)
847
- max_dd_idx = np.argmin(drawdown)
848
- ax2.scatter(dates[max_dd_idx], drawdown[max_dd_idx], color='darkred', s=100, zorder=5, marker='v')
849
- ax2.set_title('Drawdown Over Time', fontsize=12, fontweight='bold')
882
+ ax2.set_title('Drawdown', fontsize=12, fontweight='bold')
850
883
  ax2.set_ylabel('Drawdown (%)')
851
884
  ax2.grid(True, alpha=0.3)
852
885
 
853
- # P&L Distribution
886
+ # 3. P&L Distribution
854
887
  ax3 = axes[1, 0]
855
888
  pnl_values = trades_df['pnl'].values
856
889
  ax3.hist(pnl_values, bins=40, color='#4CAF50', alpha=0.7, edgecolor='black')
857
890
  ax3.axvline(x=0, color='red', linestyle='--', linewidth=2)
858
- ax3.axvline(x=np.median(pnl_values), color='blue', linestyle='--', linewidth=2)
859
- ax3.set_title('Trade P&L Distribution', fontsize=12, fontweight='bold')
891
+ ax3.set_title('P&L Distribution', fontsize=12, fontweight='bold')
860
892
  ax3.set_xlabel('P&L ($)')
861
- ax3.set_ylabel('Frequency')
862
893
  ax3.grid(True, alpha=0.3, axis='y')
863
894
 
864
- # Signal Performance
895
+ # 4. Signal Performance
865
896
  ax4 = axes[1, 1]
866
897
  if 'signal' in trades_df.columns:
867
898
  signal_pnl = trades_df.groupby('signal')['pnl'].sum()
868
899
  colors = ['#4CAF50' if x > 0 else '#f44336' for x in signal_pnl.values]
869
- bars = ax4.bar(signal_pnl.index, signal_pnl.values, color=colors, alpha=0.7, edgecolor='black')
870
- for bar in bars:
871
- height = bar.get_height()
872
- ax4.text(bar.get_x() + bar.get_width()/2., height,
873
- f'${height:,.0f}', ha='center', va='bottom' if height > 0 else 'top', fontweight='bold')
874
- ax4.set_title('P&L by Signal Type', fontsize=12, fontweight='bold')
900
+ ax4.bar(signal_pnl.index, signal_pnl.values, color=colors, alpha=0.7, edgecolor='black')
901
+ ax4.set_title('P&L by Signal', fontsize=12, fontweight='bold')
875
902
  else:
876
903
  ax4.text(0.5, 0.5, 'No signal data', ha='center', va='center', transform=ax4.transAxes)
877
- ax4.set_ylabel('Total P&L ($)')
878
- ax4.axhline(y=0, color='black', linestyle='-', linewidth=1)
904
+ ax4.axhline(y=0, color='black', linewidth=1)
879
905
  ax4.grid(True, alpha=0.3, axis='y')
880
906
 
881
- # Monthly Returns
907
+ # 5. Monthly Returns
882
908
  ax5 = axes[2, 0]
883
909
  trades_df['exit_date'] = pd.to_datetime(trades_df['exit_date'])
884
910
  trades_df['month'] = trades_df['exit_date'].dt.to_period('M')
885
911
  monthly_pnl = trades_df.groupby('month')['pnl'].sum()
886
- colors_monthly = ['#4CAF50' if x > 0 else '#f44336' for x in monthly_pnl.values]
887
- ax5.bar(range(len(monthly_pnl)), monthly_pnl.values, color=colors_monthly, alpha=0.7, edgecolor='black')
912
+ colors = ['#4CAF50' if x > 0 else '#f44336' for x in monthly_pnl.values]
913
+ ax5.bar(range(len(monthly_pnl)), monthly_pnl.values, color=colors, alpha=0.7, edgecolor='black')
888
914
  ax5.set_title('Monthly P&L', fontsize=12, fontweight='bold')
889
- ax5.set_ylabel('P&L ($)')
890
915
  ax5.set_xticks(range(len(monthly_pnl)))
891
916
  ax5.set_xticklabels([str(m) for m in monthly_pnl.index], rotation=45, ha='right')
892
- ax5.axhline(y=0, color='black', linestyle='-', linewidth=1)
917
+ ax5.axhline(y=0, color='black', linewidth=1)
893
918
  ax5.grid(True, alpha=0.3, axis='y')
894
919
 
895
- # Top Symbols
920
+ # 6. Top Symbols
896
921
  ax6 = axes[2, 1]
897
922
  if 'symbol' in trades_df.columns:
898
923
  symbol_pnl = trades_df.groupby('symbol')['pnl'].sum().sort_values(ascending=True).tail(10)
899
- colors_symbols = ['#4CAF50' if x > 0 else '#f44336' for x in symbol_pnl.values]
900
- ax6.barh(range(len(symbol_pnl)), symbol_pnl.values, color=colors_symbols, alpha=0.7, edgecolor='black')
924
+ colors = ['#4CAF50' if x > 0 else '#f44336' for x in symbol_pnl.values]
925
+ ax6.barh(range(len(symbol_pnl)), symbol_pnl.values, color=colors, alpha=0.7, edgecolor='black')
901
926
  ax6.set_yticks(range(len(symbol_pnl)))
902
927
  ax6.set_yticklabels(symbol_pnl.index, fontsize=9)
903
- ax6.set_title('Top 10 Symbols by P&L', fontsize=12, fontweight='bold')
928
+ ax6.set_title('Top Symbols', fontsize=12, fontweight='bold')
904
929
  else:
905
930
  ax6.text(0.5, 0.5, 'No symbol data', ha='center', va='center', transform=ax6.transAxes)
906
- ax6.set_xlabel('Total P&L ($)')
907
- ax6.axvline(x=0, color='black', linestyle='-', linewidth=1)
931
+ ax6.axvline(x=0, color='black', linewidth=1)
908
932
  ax6.grid(True, alpha=0.3, axis='x')
909
933
 
910
934
  plt.tight_layout()
@@ -918,7 +942,7 @@ class ChartGenerator:
918
942
  # RESULTS EXPORTER
919
943
  # ============================================================
920
944
  class ResultsExporter:
921
- """Export results to CSV files"""
945
+ """Export results to CSV"""
922
946
 
923
947
  @staticmethod
924
948
  def export_all(analyzer, prefix='backtest'):
@@ -929,14 +953,12 @@ class ResultsExporter:
929
953
  print("No trades to export")
930
954
  return
931
955
 
932
- # Trades
933
956
  trades_df = pd.DataFrame(r.trades)
934
957
  trades_df['entry_date'] = pd.to_datetime(trades_df['entry_date']).dt.strftime('%Y-%m-%d')
935
958
  trades_df['exit_date'] = pd.to_datetime(trades_df['exit_date']).dt.strftime('%Y-%m-%d')
936
959
  trades_df.to_csv(f'{prefix}_trades.csv', index=False)
937
960
  print(f"Exported: {prefix}_trades.csv")
938
961
 
939
- # Equity
940
962
  equity_df = pd.DataFrame({
941
963
  'date': pd.to_datetime(r.equity_dates).strftime('%Y-%m-%d'),
942
964
  'equity': r.equity_curve
@@ -944,46 +966,35 @@ class ResultsExporter:
944
966
  equity_df.to_csv(f'{prefix}_equity.csv', index=False)
945
967
  print(f"Exported: {prefix}_equity.csv")
946
968
 
947
- # Summary
948
969
  with open(f'{prefix}_summary.txt', 'w') as f:
949
970
  f.write("BACKTEST SUMMARY\n")
950
971
  f.write("="*70 + "\n\n")
951
972
  f.write(f"Strategy: {r.config.get('strategy_name', 'Unknown')}\n")
952
- f.write(f"Period: {r.config.get('start_date', 'N/A')} to {r.config.get('end_date', 'N/A')}\n\n")
973
+ f.write(f"Period: {r.config.get('start_date')} to {r.config.get('end_date')}\n\n")
953
974
  f.write("PERFORMANCE\n")
954
975
  f.write("-"*70 + "\n")
955
- f.write(f"Initial Capital: ${r.initial_capital:,.2f}\n")
956
- f.write(f"Final Equity: ${r.final_capital:,.2f}\n")
957
976
  f.write(f"Total Return: {m['total_return']:.2f}%\n")
958
- f.write(f"Sharpe Ratio: {m['sharpe']:.2f}\n")
959
- f.write(f"Max Drawdown: {m['max_drawdown']:.2f}%\n")
960
- f.write(f"Win Rate: {m['win_rate']:.2f}%\n")
961
- f.write(f"Total Trades: {m['total_trades']}\n")
977
+ f.write(f"Sharpe: {m['sharpe']:.2f}\n")
978
+ f.write(f"Max DD: {m['max_drawdown']:.2f}%\n")
979
+ f.write(f"Trades: {m['total_trades']}\n")
962
980
 
963
981
  print(f"Exported: {prefix}_summary.txt")
964
982
 
965
983
 
966
984
  # ============================================================
967
- # ONE-COMMAND RUNNER
985
+ # RUN BACKTEST
968
986
  # ============================================================
969
- def run_backtest(strategy_function, config,
970
- print_report=True,
971
- create_charts=True,
972
- export_results=True,
987
+ def run_backtest(strategy_function, config, print_report=True,
988
+ create_charts=True, export_results=True,
973
989
  chart_filename='backtest_results.png',
974
990
  export_prefix='backtest'):
975
- """
976
- Run complete backtest with one command
977
-
978
- Example:
979
- analyzer = run_backtest(my_strategy, CONFIG)
980
- """
991
+ """Run complete backtest with one command"""
981
992
 
982
993
  print("="*80)
983
994
  print(" "*25 + "STARTING BACKTEST")
984
995
  print("="*80)
985
996
  print(f"Strategy: {config.get('strategy_name', 'Unknown')}")
986
- print(f"Period: {config.get('start_date', 'N/A')} to {config.get('end_date', 'N/A')}")
997
+ print(f"Period: {config.get('start_date')} to {config.get('end_date')}")
987
998
  print(f"Capital: ${config.get('initial_capital', 0):,.0f}")
988
999
  print("="*80 + "\n")
989
1000
 
@@ -1002,10 +1013,10 @@ def run_backtest(strategy_function, config,
1002
1013
  try:
1003
1014
  ChartGenerator.create_all_charts(analyzer, chart_filename)
1004
1015
  except Exception as e:
1005
- print(f"[ERROR] Chart creation failed: {e}")
1016
+ print(f"[ERROR] Charts failed: {e}")
1006
1017
 
1007
1018
  if export_results and len(results.trades) > 0:
1008
- print(f"\n[*] Exporting results: {export_prefix}_*")
1019
+ print(f"\n[*] Exporting: {export_prefix}_*")
1009
1020
  try:
1010
1021
  ResultsExporter.export_all(analyzer, export_prefix)
1011
1022
  except Exception as e:
@@ -1014,22 +1025,9 @@ def run_backtest(strategy_function, config,
1014
1025
  return analyzer
1015
1026
 
1016
1027
 
1017
- # ============================================================
1018
- # EXPORTS
1019
- # ============================================================
1020
1028
  __all__ = [
1021
- 'BacktestResults',
1022
- 'BacktestAnalyzer',
1023
- 'ResultsReporter',
1024
- 'ChartGenerator',
1025
- 'ResultsExporter',
1026
- 'run_backtest',
1027
- 'init_api',
1028
- 'api_call',
1029
- 'APIHelper',
1030
- 'APIManager',
1031
- 'ResourceMonitor',
1032
- 'create_progress_bar',
1033
- 'update_progress',
1034
- 'format_time'
1035
- ]
1029
+ 'BacktestResults', 'BacktestAnalyzer', 'ResultsReporter',
1030
+ 'ChartGenerator', 'ResultsExporter', 'run_backtest',
1031
+ 'init_api', 'api_call', 'APIHelper', 'APIManager',
1032
+ 'ResourceMonitor', 'create_progress_bar', 'update_progress', 'format_time'
1033
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ivolatility_backtesting
3
- Version: 1.2.0
3
+ Version: 1.3.1
4
4
  Summary: A universal backtesting framework for financial strategies using the IVolatility API.
5
5
  Author-email: IVolatility <support@ivolatility.com>
6
6
  Project-URL: Homepage, https://ivolatility.com
@@ -0,0 +1,7 @@
1
+ ivolatility_backtesting/__init__.py,sha256=abZYqTZwvzgSdSs55g3_zU8mtbNKveUndoDgKU8tnIo,577
2
+ ivolatility_backtesting/ivolatility_backtesting.py,sha256=xyvYFfUp4jNrfso5MpbUNBY-kK4lUnvyj0lmoMelCYQ,42141
3
+ ivolatility_backtesting-1.3.1.dist-info/licenses/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ ivolatility_backtesting-1.3.1.dist-info/METADATA,sha256=erCDHvApgidlPP3kF7PRkfQBc1Edwu9qAFiOzch1qr0,2052
5
+ ivolatility_backtesting-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
+ ivolatility_backtesting-1.3.1.dist-info/top_level.txt,sha256=Qv3irUBntr8b11WIKNN6zzCSguwaWC4nWR-ZKq8NsjY,24
7
+ ivolatility_backtesting-1.3.1.dist-info/RECORD,,
@@ -1,7 +0,0 @@
1
- ivolatility_backtesting/__init__.py,sha256=abZYqTZwvzgSdSs55g3_zU8mtbNKveUndoDgKU8tnIo,577
2
- ivolatility_backtesting/ivolatility_backtesting.py,sha256=_lo2QrdWTf8IVpp4AIIGw7_t88GhbSeHRAT4KEcwmBw,40916
3
- ivolatility_backtesting-1.2.0.dist-info/licenses/LICENSE,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- ivolatility_backtesting-1.2.0.dist-info/METADATA,sha256=SRFqAyNI-qOs2CeX3DZF0kJwbVnsQQMbOCkPs2LNOKc,2052
5
- ivolatility_backtesting-1.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
6
- ivolatility_backtesting-1.2.0.dist-info/top_level.txt,sha256=Qv3irUBntr8b11WIKNN6zzCSguwaWC4nWR-ZKq8NsjY,24
7
- ivolatility_backtesting-1.2.0.dist-info/RECORD,,