jasonlib-dev 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
jasonlib/__init__.py ADDED
@@ -0,0 +1,53 @@
1
+ """jasonlib - Core JASON pivot-based feature computation library.
2
+
3
+ Public API::
4
+
5
+ from jasonlib import compute_jason, compute_trend_analysis, compute_pivot_analysis
6
+ from jasonlib import JAssetClass, JInterval, TrendFunction, PivotSortOption
7
+ from jasonlib import CoinDataset, TrendAnalysisResult, PivotAnalysisResult
8
+ """
9
+
10
+ from jasonlib._calculator import compute_jason
11
+ from jasonlib._models import JAssetClass, JInterval
12
+ from jasonlib._trend_analysis import compute_trend_analysis
13
+ from jasonlib._trend_models import (
14
+ ChartData,
15
+ CoinDataset,
16
+ CoinSeries,
17
+ InsufficientDataError,
18
+ MarketTrendSummary,
19
+ PlotWindow,
20
+ SummaryMetric,
21
+ TrendAnalysisResult,
22
+ TrendCharts,
23
+ TrendFunction,
24
+ )
25
+ from jasonlib._pivot_models import (
26
+ PivotSortOption,
27
+ PivotAnalysisSummary,
28
+ PivotAnalysisParameters,
29
+ PivotAnalysisResult,
30
+ )
31
+ from jasonlib._pivot_analysis import compute_pivot_analysis
32
+
33
+ __all__ = [
34
+ "compute_jason",
35
+ "compute_trend_analysis",
36
+ "JAssetClass",
37
+ "JInterval",
38
+ "TrendFunction",
39
+ "CoinDataset",
40
+ "InsufficientDataError",
41
+ "TrendAnalysisResult",
42
+ "PlotWindow",
43
+ "SummaryMetric",
44
+ "MarketTrendSummary",
45
+ "CoinSeries",
46
+ "ChartData",
47
+ "TrendCharts",
48
+ "compute_pivot_analysis",
49
+ "PivotSortOption",
50
+ "PivotAnalysisSummary",
51
+ "PivotAnalysisParameters",
52
+ "PivotAnalysisResult",
53
+ ]
@@ -0,0 +1,248 @@
1
+ """Core JASON feature computation."""
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from tqdm import tqdm
6
+
7
+ from jasonlib._models import JAssetClass, JInterval
8
+ from jasonlib._numba_kernels import _calculate_metrics_for_window
9
+ from jasonlib._trading_calendar import (
10
+ TRADING_DAYS_PER_YEAR,
11
+ TRADING_HOURS_PER_DAY,
12
+ get_logger,
13
+ normalize_asset_class,
14
+ )
15
+
16
+ logger = get_logger("jasonlib.calculator")
17
+
18
+ _JASON_COLUMNS = [
19
+ "high_vol",
20
+ "high_days",
21
+ "high_z",
22
+ "high_pivot",
23
+ "high_json2",
24
+ "high_json2_plus_1d",
25
+ "high_json2_plus_2d",
26
+ "last_fx",
27
+ "low_json2",
28
+ "low_json2_plus_1d",
29
+ "low_json2_plus_2d",
30
+ "low_pivot",
31
+ "low_z",
32
+ "low_days",
33
+ "low_vol",
34
+ ]
35
+
36
+ _ROUNDING = {
37
+ "high_vol": 2,
38
+ "high_days": 1,
39
+ "high_z": 2,
40
+ "low_z": 2,
41
+ "low_days": 1,
42
+ "low_vol": 2,
43
+ }
44
+
45
+
46
+ def compute_jason(
47
+ df: pd.DataFrame,
48
+ interval: JInterval,
49
+ asset_class: JAssetClass,
50
+ lookback: int = 7,
51
+ n_pivots: int = 5,
52
+ min_days: int = 1,
53
+ ) -> pd.DataFrame:
54
+ """Compute JASON pivot-based features from OHLC candle data.
55
+
56
+ Identifies high/low pivot points over a rolling lookback window,
57
+ weights the most frequent pivots, and derives volatility-adjusted
58
+ support/resistance levels (JSON2), Z-scores, and days-since-pivot
59
+ metrics.
60
+
61
+ Args:
62
+ df: OHLCV candle data. Must be datetime-indexed (UTC
63
+ ``DatetimeIndex``). Required columns and dtypes:
64
+
65
+ ======== ========= ============================
66
+ Column Dtype Description
67
+ ======== ========= ============================
68
+ ``open`` float64 Opening price of the candle
69
+ ``high`` float64 Highest price in the candle
70
+ ``low`` float64 Lowest price in the candle
71
+ ``close`` float64 Closing price of the candle
72
+ ======== ========= ============================
73
+
74
+ Additional columns are ignored. Rows must be sorted ascending
75
+ by timestamp; the function will sort if they are not.
76
+ interval: Candle size. Determines the number of candles that fit
77
+ inside the *lookback* window.
78
+ asset_class: Trading calendar to use. Determines days per year
79
+ and hours per day for annualization. Required — no default.
80
+ lookback: Lookback period in trading days for pivot detection.
81
+ Default ``7``.
82
+ n_pivots: Number of top pivots (by frequency) to weight.
83
+ Default ``5``.
84
+ min_days: Minimum number of days for a pivot to be considered.
85
+ Default ``1``.
86
+
87
+ Returns:
88
+ ``pd.DataFrame`` indexed by ``DatetimeIndex`` (UTC), starting from
89
+ the first candle after the lookback window. Columns:
90
+
91
+ ====================== ========= ==========================================
92
+ Column Dtype Description
93
+ ====================== ========= ==========================================
94
+ ``high_vol`` float64 Annualized volatility (%) at high pivots
95
+ ``high_days`` float64 Trading days since the high pivot
96
+ ``high_z`` float64 Z-score from high pivot to last close
97
+ ``high_pivot`` float64 Weighted average high pivot price
98
+ ``high_json2`` float64 Support level (high_pivot * exp(-2*std))
99
+ ``high_json2_plus_1d`` float64 Support level projected +1 day
100
+ ``high_json2_plus_2d`` float64 Support level projected +2 days
101
+ ``last_fx`` float64 Last closing price at this timestamp
102
+ ``low_json2`` float64 Resistance level (low_pivot * exp(+2*std))
103
+ ``low_json2_plus_1d`` float64 Resistance level projected +1 day
104
+ ``low_json2_plus_2d`` float64 Resistance level projected +2 days
105
+ ``low_pivot`` float64 Weighted average low pivot price
106
+ ``low_z`` float64 Z-score from low pivot to last close
107
+ ``low_days`` float64 Trading days since the low pivot
108
+ ``low_vol`` float64 Annualized volatility (%) at low pivots
109
+ ====================== ========= ==========================================
110
+
111
+ Volatility and Z-score columns are rounded to 2 decimal places.
112
+ Days columns are rounded to 1 decimal place.
113
+ """
114
+ if df.empty:
115
+ return pd.DataFrame()
116
+
117
+ df = df.sort_index()
118
+
119
+ normalized_ac = normalize_asset_class(asset_class)
120
+ days_in_year = float(TRADING_DAYS_PER_YEAR[normalized_ac])
121
+ hours_per_day = TRADING_HOURS_PER_DAY[normalized_ac]
122
+ seconds_per_trading_day = hours_per_day * 3600
123
+
124
+ # Quadratic variation for volatility estimation
125
+ m_big = np.log(df["high"] / df["open"])
126
+ m_small = np.log(df["low"] / df["open"])
127
+ x = np.log(df["close"] / df["open"])
128
+ qv_series = (
129
+ 0.511 * (m_big - m_small) ** 2
130
+ - 0.019 * (x * (m_big + m_small) - 2 * m_big * m_small)
131
+ - 0.383 * x**2
132
+ )
133
+
134
+ highs = df["high"].to_numpy(dtype=np.float64)
135
+ lows = df["low"].to_numpy(dtype=np.float64)
136
+ closes = df["close"].to_numpy(dtype=np.float64)
137
+
138
+ candle_seconds = interval.seconds
139
+
140
+ if normalized_ac == JAssetClass.CRYPTO:
141
+ lookback_candles = int(lookback * 24 * 3600 // candle_seconds)
142
+ else:
143
+ lookback_candles = int(
144
+ lookback * seconds_per_trading_day // candle_seconds
145
+ )
146
+
147
+ window_size = lookback_candles + 1
148
+
149
+ s_in_year = days_in_year * seconds_per_trading_day
150
+ min_days_in_years = (min_days / days_in_year) - (candle_seconds / s_in_year)
151
+
152
+ qv_values = qv_series.to_numpy(dtype=np.float64)
153
+
154
+ t_seconds = np.arange(
155
+ window_size * candle_seconds, 0, -candle_seconds, dtype=np.float64
156
+ )
157
+ t_seconds = np.maximum(t_seconds, candle_seconds)
158
+ relative_time_in_years = t_seconds / s_in_year
159
+
160
+ num_points = len(df)
161
+ results: list[tuple] = []
162
+
163
+ for i in tqdm(
164
+ range(lookback_candles, num_points),
165
+ desc="Computing JASON",
166
+ leave=False,
167
+ ):
168
+ start_idx = i - lookback_candles
169
+ end_idx = i + 1
170
+
171
+ qv_window = qv_values[start_idx:end_idx]
172
+ qv_sum_rev = np.cumsum(qv_window[::-1])[::-1]
173
+ w_sum_rev = np.arange(len(qv_window), 0, -1, dtype=np.float64)
174
+ vols_window = np.sqrt(
175
+ (qv_sum_rev * s_in_year) / (w_sum_rev * candle_seconds)
176
+ )
177
+
178
+ res_tuple = _calculate_metrics_for_window(
179
+ highs[start_idx:end_idx],
180
+ lows[start_idx:end_idx],
181
+ closes[start_idx:end_idx],
182
+ vols_window,
183
+ relative_time_in_years,
184
+ n_pivots,
185
+ min_days_in_years,
186
+ days_in_year,
187
+ )
188
+ results.append(res_tuple)
189
+
190
+ if not results:
191
+ return pd.DataFrame()
192
+
193
+ result_df = pd.DataFrame(
194
+ results, index=df.index[lookback_candles:], columns=_JASON_COLUMNS
195
+ )
196
+ result_df = result_df.round(_ROUNDING)
197
+ result_df = _calculate_ztv(result_df)
198
+ return result_df
199
+
200
+
201
+
202
+
203
+ # ============================================================================
204
+ # ZTV AND LATEST METRICS
205
+ # ============================================================================
206
+
207
+ def _calculate_ztv(df: pd.DataFrame) -> pd.DataFrame:
208
+ """
209
+ Calculates all ZTV and Days metrics and returns a single DataFrame.
210
+
211
+ ZTV (Z-Time-Volatility) is a conviction score combining Z-Score, Time, and Volatility.
212
+ It measures the total power of a price move as a percentage of theoretical maximum.
213
+
214
+ Args:
215
+ jason_df: DataFrame with JASON features including:
216
+ - high_z, low_z: Z-scores
217
+ - high_vol, low_vol: Volatility at pivots
218
+ - high_days, low_days: Days since pivots
219
+
220
+ Returns:
221
+ DataFrame with additional columns:
222
+ - high_ztv, low_ztv: Raw ZTV values
223
+ - high_ztv_ratio, low_ztv_ratio: ZTV as ratio of max (0-1)
224
+ - high_days_ratio, low_days_ratio: Days as ratio of max (0-1)
225
+ """
226
+ # if jason_df.empty:
227
+ # return pd.DataFrame()
228
+
229
+ # df = jason_df.copy()
230
+
231
+
232
+ # ZTV Metrics Logic
233
+ MAX_Z = 2.5
234
+ MAX_DAYS_ZTV = 4.0
235
+ MAX_VOL = 100.0
236
+ max_ztv = MAX_VOL * MAX_Z * np.sqrt(MAX_DAYS_ZTV)
237
+
238
+ df['high_ztv'] = df['high_z'] * df['high_vol'] * np.sqrt(df['high_days'].astype(float))
239
+ df['low_ztv'] = df['low_z'] * df['low_vol'] * np.sqrt(df['low_days'].astype(float))
240
+ df['high_ztv_ratio'] = (df['high_ztv'] / max_ztv).clip(0, 1)
241
+ df['low_ztv_ratio'] = (df['low_ztv'] / max_ztv).clip(0, 1)
242
+
243
+ # Days Metrics Logic
244
+ MAX_DAYS_RATIO = 5.0
245
+ df['high_days_ratio'] = (df['high_days'] / MAX_DAYS_RATIO).clip(0, 1)
246
+ df['low_days_ratio'] = (df['low_days'] / MAX_DAYS_RATIO).clip(0, 1)
247
+
248
+ return df
jasonlib/_math.py ADDED
@@ -0,0 +1,52 @@
1
+ """Sigmoid utility functions for trend score calculations."""
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ def sigmoid_z(x: pd.Series, threshold: float = 2.0, k: float = 10.0) -> pd.Series:
8
+ """Apply a sigmoid function centered around a threshold.
9
+
10
+ Args:
11
+ x: Input Series.
12
+ threshold: Center point of the sigmoid.
13
+ k: Steepness factor.
14
+
15
+ Returns:
16
+ Sigmoid-transformed Series with same index.
17
+ """
18
+ numeric_x = x.astype(float).to_numpy()
19
+ result = 1 / (1 + np.exp(-k * (numeric_x - threshold)))
20
+ return pd.Series(result, index=x.index)
21
+
22
+
23
+ def sigmoid_diff(x: pd.Series, threshold: float = 1.0, k: float = 10.0) -> pd.Series:
24
+ """Apply a differential sigmoid creating symmetric positive/negative mapping.
25
+
26
+ Args:
27
+ x: Input Series.
28
+ threshold: Threshold for the differential.
29
+ k: Steepness factor.
30
+
31
+ Returns:
32
+ Differential sigmoid-transformed Series with same index.
33
+ """
34
+ numeric_x = x.astype(float).to_numpy()
35
+ result = 1 + 1 / (1 + np.exp(-k * (numeric_x - threshold))) - 1 / (1 + np.exp(-k * (-numeric_x + threshold)))
36
+ return pd.Series(result, index=x.index)
37
+
38
+
39
+ def sigmoid_vol(x: pd.Series, hist_vol: float = 50.0, k: float = 0.2) -> pd.Series:
40
+ """Apply a sigmoid function for volatility weighting.
41
+
42
+ Args:
43
+ x: Input Series of volatility values.
44
+ hist_vol: Historical volatility center point.
45
+ k: Steepness factor.
46
+
47
+ Returns:
48
+ Sigmoid-weighted Series with same index.
49
+ """
50
+ numeric_x = x.astype(float).to_numpy()
51
+ result = 1 / (1 + np.exp(-k * (numeric_x - hist_vol)))
52
+ return pd.Series(result, index=x.index)
jasonlib/_models.py ADDED
@@ -0,0 +1,62 @@
1
+ """JASON domain model enums."""
2
+
3
+ from enum import Enum
4
+
5
+ import pandas as pd
6
+
7
+
8
+ class JAssetClass(str, Enum):
9
+ """Supported asset classes for JASON calculations.
10
+
11
+ Determines the trading calendar used for time-aware computations:
12
+ - CRYPTO: 365 days/year, 24 hours/day
13
+ - EQUITY: 252 trading days/year, 6.5 hours/day (US market hours)
14
+ - COMMOD: 252 trading days/year, 23 hours/day (CME Globex)
15
+ - FX: 252 trading days/year, 23 hours/day (CME Globex)
16
+ - EQUITY_IDX: 252 trading days/year, 23 hours/day (CME Globex)
17
+ """
18
+
19
+ CRYPTO = "CRYPTO"
20
+ FX = "FX"
21
+ COMMOD = "COMMOD"
22
+ EQUITIES = "EQUITIES"
23
+ EQUITY_IDX = "EQUITY_IDX"
24
+
25
+
26
+ _INTERVAL_SECONDS = {
27
+ "1m": 60,
28
+ "5m": 300,
29
+ "15m": 900,
30
+ "30m": 1800,
31
+ "1h": 3600,
32
+ "4h": 14400,
33
+ "12h": 43200,
34
+ "1d": 86400,
35
+ }
36
+
37
+
38
+ class JInterval(str, Enum):
39
+ """Supported candle intervals for JASON calculations.
40
+
41
+ Uses MetaTrader-style naming: M for minutes, H for hours, D for days.
42
+ Each member's string value is the compact interval notation (e.g. "1h").
43
+ Use the ``seconds`` property to get the candle duration in seconds.
44
+ """
45
+
46
+ M1 = "1m"
47
+ M5 = "5m"
48
+ M15 = "15m"
49
+ M30 = "30m"
50
+ H1 = "1h"
51
+ H4 = "4h"
52
+ H12 = "12h"
53
+ D1 = "1d"
54
+
55
+ @property
56
+ def seconds(self) -> int:
57
+ """Return candle duration in seconds."""
58
+ return _INTERVAL_SECONDS[self.value]
59
+
60
+ def to_timedelta(self) -> pd.Timedelta:
61
+ """Convert interval to pandas Timedelta."""
62
+ return pd.Timedelta(seconds=self.seconds)
@@ -0,0 +1,195 @@
1
+ """Numba JIT-compiled computational kernels for JASON feature calculation."""
2
+
3
+ from typing import Tuple
4
+
5
+ import numpy as np
6
+ from numba import jit
7
+
8
+
9
+ @jit(nopython=True, cache=True)
10
+ def _unique_with_counts(arr: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
11
+ """Numba-compatible implementation of np.unique with return_counts=True.
12
+
13
+ Args:
14
+ arr: Input array to find unique values in.
15
+
16
+ Returns:
17
+ Tuple of (unique_values, counts).
18
+ """
19
+ if arr.size == 0:
20
+ return np.empty(0, dtype=arr.dtype), np.empty(0, dtype=np.int64)
21
+ a = np.sort(arr)
22
+ mask = np.empty(a.shape, dtype=np.bool_)
23
+ mask[:1] = True
24
+ mask[1:] = a[1:] != a[:-1]
25
+ unique_vals = a[mask]
26
+ unique_indices = np.nonzero(mask)[0]
27
+ counts = np.diff(np.append(unique_indices, a.size))
28
+ return unique_vals, counts
29
+
30
+
31
+ @jit(nopython=True, cache=True)
32
+ def _calculate_metrics_for_window(
33
+ highs: np.ndarray,
34
+ lows: np.ndarray,
35
+ closes: np.ndarray,
36
+ vols: np.ndarray,
37
+ time_in_years: np.ndarray,
38
+ n_pivots: int,
39
+ min_days_in_years: float,
40
+ days_in_year: float,
41
+ ) -> Tuple:
42
+ """Calculate JASON metrics for a single lookback window.
43
+
44
+ Identifies high/low pivot points by frequency, weights the top N pivots,
45
+ and computes Z-scores, days-since-pivot, volatility, and JSON2
46
+ (volatility-adjusted support/resistance) levels.
47
+
48
+ Args:
49
+ highs: High prices for the window.
50
+ lows: Low prices for the window.
51
+ closes: Close prices for the window.
52
+ vols: Annualized volatility values for the window.
53
+ time_in_years: Relative time array in years for the window.
54
+ n_pivots: Number of top pivots to consider.
55
+ min_days_in_years: Minimum days threshold in years.
56
+ days_in_year: Trading days per year (365 for crypto, 252 for equities).
57
+
58
+ Returns:
59
+ Tuple of 15 float values:
60
+ (high_vol, high_days, high_z, high_pivot, high_json2,
61
+ high_json2_plus_1d, high_json2_plus_2d, last_close, low_json2,
62
+ low_json2_plus_1d, low_json2_plus_2d, low_pivot, low_z,
63
+ low_days, low_vol)
64
+ """
65
+ window_size = len(highs)
66
+ last_close = closes[-1]
67
+ high_pivot_indices = np.empty(window_size, dtype=np.int64)
68
+ low_pivot_indices = np.empty(window_size, dtype=np.int64)
69
+ last_max_idx = window_size - 1
70
+ last_min_idx = window_size - 1
71
+
72
+ for i in range(window_size - 1, -1, -1):
73
+ if highs[i] > highs[last_max_idx]:
74
+ last_max_idx = i
75
+ if lows[i] < lows[last_min_idx]:
76
+ last_min_idx = i
77
+ high_pivot_indices[i] = last_max_idx
78
+ low_pivot_indices[i] = last_min_idx
79
+
80
+ valid_pivot_mask = time_in_years > min_days_in_years
81
+ top_n_h_indices, h_weights = np.empty(0, dtype=np.int64), np.empty(
82
+ 0, dtype=np.int64
83
+ )
84
+ h_pivots = high_pivot_indices[valid_pivot_mask]
85
+
86
+ if len(h_pivots) > 0:
87
+ unique_h_pivots, h_counts = _unique_with_counts(h_pivots)
88
+ h_sorted_indices = np.argsort(h_counts)[::-1][:n_pivots]
89
+ top_n_h_indices = unique_h_pivots[h_sorted_indices]
90
+ h_weights = h_counts[h_sorted_indices]
91
+
92
+ top_n_l_indices, l_weights = np.empty(0, dtype=np.int64), np.empty(
93
+ 0, dtype=np.int64
94
+ )
95
+ l_pivots = low_pivot_indices[valid_pivot_mask]
96
+
97
+ if len(l_pivots) > 0:
98
+ unique_l_pivots, l_counts = _unique_with_counts(l_pivots)
99
+ l_sorted_indices = np.argsort(l_counts)[::-1][:n_pivots]
100
+ top_n_l_indices = unique_l_pivots[l_sorted_indices]
101
+ l_weights = l_counts[l_sorted_indices]
102
+
103
+ h_ave_pivot, h_ave_days, h_ave_vol, h_ave_z = 0.0, 0.0, 0.0, 0.0
104
+ h_ave_json2, h_ave_json2_plus1d, h_ave_json2_plus2d = 0.0, 0.0, 0.0
105
+
106
+ if len(top_n_h_indices) > 0:
107
+ total_h_weight = np.sum(h_weights)
108
+ if total_h_weight > 0:
109
+ for i in range(len(top_n_h_indices)):
110
+ idx, weight = top_n_h_indices[i], h_weights[i]
111
+ pivot_high, pivot_vol = highs[idx], vols[idx]
112
+ pivot_time = max(time_in_years[idx], 1.0 / days_in_year)
113
+ std_dev = pivot_vol * np.sqrt(pivot_time)
114
+ # TODO: Workaround as current model crushes when high==low for a candle
115
+ std_dev = max(std_dev, 0.0000000001)
116
+ h_ave_pivot += pivot_high * weight
117
+ h_ave_days += time_in_years[idx] * days_in_year * weight
118
+ h_ave_vol += pivot_vol * weight
119
+ h_ave_z += np.log(pivot_high / last_close) / std_dev * weight
120
+ h_ave_json2 += pivot_high * np.exp(-2.0 * std_dev) * weight
121
+ std_dev_plus1d = pivot_vol * np.sqrt(
122
+ pivot_time + 1.0 / days_in_year
123
+ )
124
+ h_ave_json2_plus1d += (
125
+ pivot_high * np.exp(-2.0 * std_dev_plus1d) * weight
126
+ )
127
+ std_dev_plus2d = pivot_vol * np.sqrt(
128
+ pivot_time + 2.0 / days_in_year
129
+ )
130
+ h_ave_json2_plus2d += (
131
+ pivot_high * np.exp(-2.0 * std_dev_plus2d) * weight
132
+ )
133
+ h_ave_pivot /= total_h_weight
134
+ h_ave_days /= total_h_weight
135
+ h_ave_vol /= total_h_weight
136
+ h_ave_z /= total_h_weight
137
+ h_ave_json2 /= total_h_weight
138
+ h_ave_json2_plus1d /= total_h_weight
139
+ h_ave_json2_plus2d /= total_h_weight
140
+
141
+ l_ave_pivot, l_ave_days, l_ave_vol, l_ave_z = 0.0, 0.0, 0.0, 0.0
142
+ l_ave_json2, l_ave_json2_plus1d, l_ave_json2_plus2d = 0.0, 0.0, 0.0
143
+
144
+ if len(top_n_l_indices) > 0:
145
+ total_l_weight = np.sum(l_weights)
146
+ if total_l_weight > 0:
147
+ for i in range(len(top_n_l_indices)):
148
+ idx, weight = top_n_l_indices[i], l_weights[i]
149
+ pivot_low, pivot_vol = lows[idx], vols[idx]
150
+ pivot_time = max(time_in_years[idx], 1.0 / days_in_year)
151
+ std_dev = pivot_vol * np.sqrt(pivot_time)
152
+ # TODO: Workaround as current model crushes when high==low for a candle
153
+ std_dev = max(std_dev, 0.0000000001)
154
+ l_ave_pivot += pivot_low * weight
155
+ l_ave_days += time_in_years[idx] * days_in_year * weight
156
+ l_ave_vol += pivot_vol * weight
157
+ l_ave_z += np.log(last_close / pivot_low) / std_dev * weight
158
+ l_ave_json2 += pivot_low * np.exp(2.0 * std_dev) * weight
159
+ std_dev_plus1d = pivot_vol * np.sqrt(
160
+ pivot_time + 1.0 / days_in_year
161
+ )
162
+ l_ave_json2_plus1d += (
163
+ pivot_low * np.exp(2.0 * std_dev_plus1d) * weight
164
+ )
165
+ std_dev_plus2d = pivot_vol * np.sqrt(
166
+ pivot_time + 2.0 / days_in_year
167
+ )
168
+ l_ave_json2_plus2d += (
169
+ pivot_low * np.exp(2.0 * std_dev_plus2d) * weight
170
+ )
171
+ l_ave_pivot /= total_l_weight
172
+ l_ave_days /= total_l_weight
173
+ l_ave_vol /= total_l_weight
174
+ l_ave_z /= total_l_weight
175
+ l_ave_json2 /= total_l_weight
176
+ l_ave_json2_plus1d /= total_l_weight
177
+ l_ave_json2_plus2d /= total_l_weight
178
+
179
+ return (
180
+ h_ave_vol * 100,
181
+ h_ave_days,
182
+ h_ave_z,
183
+ h_ave_pivot,
184
+ h_ave_json2,
185
+ h_ave_json2_plus1d,
186
+ h_ave_json2_plus2d,
187
+ last_close,
188
+ l_ave_json2,
189
+ l_ave_json2_plus1d,
190
+ l_ave_json2_plus2d,
191
+ l_ave_pivot,
192
+ l_ave_z,
193
+ l_ave_days,
194
+ l_ave_vol * 100,
195
+ )