ygo 1.0.10__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ygo might be problematic. Click here for more details.

ycat/qdf/udf/cs_udf.py DELETED
@@ -1,97 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- ---------------------------------------------
4
- Created on 2025/3/4 20:20
5
- @author: ZhangYundi
6
- @email: yundi.xxii@outlook.com
7
- ---------------------------------------------
8
- """
9
-
10
- import polars as pl
11
- import numpy as np
12
-
13
- over = dict(
14
- partition_by=["date", "time"],
15
- order_by=["asset", ]
16
- )
17
-
18
- EPS = 1e-12
19
-
20
-
21
- def cs_ufit(expr: pl.Expr): return (expr - expr.median().over(**over)).abs()
22
-
23
-
24
- def cs_rank(expr: pl.Expr): return expr.rank().over(**over)
25
-
26
-
27
- def cs_demean(expr: pl.Expr): return expr - expr.mean().over(**over)
28
-
29
-
30
- def cs_mean(expr: pl.Expr): return expr.mean().over(**over)
31
-
32
-
33
- def cs_mid(expr: pl.Expr): return expr.median().over(**over)
34
-
35
-
36
- def cs_moderate(expr: pl.Expr): return (expr - expr.mean().over(**over)).abs()
37
-
38
-
39
- def cs_qcut(expr: pl.Expr, N=10):
40
- return expr.qcut(N, labels=[str(i) for i in range(1, N + 1)], allow_duplicates=True).cast(pl.Int32)
41
-
42
-
43
- def cs_ic(left: pl.Expr, right: pl.Expr, ): return pl.corr(left, right, method="spearman").over(**over)
44
-
45
-
46
- def cs_corr(left: pl.Expr, right: pl.Expr): return pl.corr(left, right, method="pearson").over(**over)
47
-
48
-
49
- def cs_std(expr: pl.Expr): return expr.std().over(**over)
50
-
51
-
52
- def cs_var(expr: pl.Expr): return expr.var().over(**over)
53
-
54
-
55
- def cs_skew(expr: pl.Expr): return expr.skew().over(**over)
56
-
57
-
58
- def cs_slope(left: pl.Expr, right: pl.Expr): return cs_corr(left, right) * cs_std(left) / cs_std(right)
59
-
60
-
61
- def cs_resid(left: pl.Expr, right: pl.Expr): return left - cs_slope(left, right) * right
62
-
63
- def cs_mad(expr: pl.Expr):
64
- return 1.4826 * (expr - expr.median()).abs().median().over(**over)
65
-
66
- def cs_zscore(expr: pl.Expr): return (expr - cs_mean(expr)) / cs_std(expr)
67
-
68
- def cs_norm(expr: pl.Expr): return (expr - cs_mid(expr))/cs_mad(expr)
69
-
70
- def cs_midby(expr: pl.Expr, *by: pl.Expr): return expr.median().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
71
-
72
- def cs_madby(expr: pl.Expr, *by: pl.Expr): return 1.4826 * (expr - expr.median()).abs().median().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
73
-
74
- def cs_normby(expr: pl.Expr, *by: pl.Expr): return (expr - cs_midby(expr, *by))/(cs_madby(expr, *by)+EPS)
75
-
76
- def cs_meanby(expr: pl.Expr, *by: pl.Expr): return expr.mean().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
77
-
78
- def cs_stdby(expr: pl.Expr, *by: pl.Expr): return expr.std().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
79
-
80
- def cs_sumby(expr: pl.Expr, *by: pl.Expr): return expr.sum().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
81
-
82
-
83
- def cs_max(expr: pl.Expr): return expr.max().over(**over)
84
-
85
-
86
- def cs_min(expr: pl.Expr): return expr.min().over(**over)
87
-
88
-
89
- def cs_peakmax(expr: pl.Expr): return expr.peak_max().over(**over)
90
-
91
- def cs_peakmin(expr: pl.Expr): return expr.peak_min().over(**over)
92
-
93
- def cs_zscoreby(expr: pl.Expr, *by: pl.Expr): return (expr - cs_meanby(expr, *by)) / cs_stdby(expr, *by)
94
-
95
- def cs_entropy(expr: pl.Expr): return expr.entropy().over(**over)
96
-
97
- def cs_entropyby(expr: pl.Expr, *by: pl.Expr): return expr.entropy().over(partition_by=[*over.get("partition_by"), *by], order_by=over.get("order_by"))
ycat/qdf/udf/d_udf.py DELETED
@@ -1,176 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- ---------------------------------------------
4
- Created on 2025/3/5 01:04
5
- @author: ZhangYundi
6
- @email: yundi.xxii@outlook.com
7
- ---------------------------------------------
8
- """
9
- import numpy as np
10
- import polars as pl
11
-
12
- over = dict(
13
- partition_by=["time", "asset"],
14
- order_by=["date"]
15
- )
16
-
17
-
18
- def d_mean(expr: pl.Expr, windows): return expr.rolling_mean(windows, min_samples=1).over(**over)
19
-
20
-
21
- def d_std(expr: pl.Expr, windows): return expr.rolling_std(windows, min_samples=1).over(**over)
22
-
23
-
24
- def d_sum(expr: pl.Expr, windows): return expr.rolling_sum(windows, min_samples=1).over(**over)
25
-
26
-
27
- def d_var(expr: pl.Expr, windows): return expr.rolling_var(windows, min_samples=1).over(**over)
28
-
29
-
30
- def d_skew(expr: pl.Expr, windows): return expr.rolling_skew(windows, ).over(**over)
31
-
32
-
33
- def d_ref(expr: pl.Expr, windows, dims): # return expr.shift(int(abs(windows))).over(**over)
34
- return (
35
- expr
36
- .map_batches(
37
- lambda x: pl.DataFrame(
38
- x.to_numpy().reshape((dims[0], -1))
39
- )
40
- .shift(windows)
41
- .to_numpy()
42
- .ravel()
43
- )
44
- .replace(np.nan, None)
45
- )
46
-
47
-
48
- def d_mid(expr: pl.Expr, windows): return expr.rolling_median(windows, min_samples=1).over(**over)
49
-
50
-
51
- def d_mad(expr: pl.Expr, windows):
52
- return (expr-expr.rolling_median(windows, min_samples=1)).abs().rolling_median(windows, min_samples=1).over(**over)
53
-
54
-
55
- def d_rank(expr: pl.Expr, windows, dims):
56
- return (
57
- expr
58
- .map_batches(
59
- lambda x: pl.DataFrame(
60
- x.to_numpy().reshape((dims[0], -1))
61
- )
62
- .with_row_index()
63
- .rolling("index", period=f"{windows}i")
64
- .agg(pl.all().exclude("index").rank().last())
65
- .drop("index")
66
- .to_numpy()
67
- .ravel()
68
- )
69
- )
70
-
71
-
72
- def d_prod(expr: pl.Expr, windows, dims):
73
- return (
74
- expr
75
- .map_batches(
76
- lambda x: pl.DataFrame(
77
- x.to_numpy().reshape((dims[0], -1))
78
- )
79
- .with_row_index()
80
- .rolling("index", period=f"{windows}i")
81
- .agg(pl.all().exclude("index").cum_prod())
82
- .drop("index")
83
- .to_numpy()
84
- .ravel()
85
- )
86
- )
87
-
88
-
89
- def d_max(expr: pl.Expr, windows): return expr.rolling_max(windows, min_samples=1).over(**over)
90
-
91
-
92
- def d_min(expr: pl.Expr, windows): return expr.rolling_min(windows, min_samples=1).over(**over)
93
-
94
-
95
- def d_ewmmean(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
96
- return (expr
97
- .ewm_mean(com=com,
98
- span=span,
99
- half_life=half_life,
100
- alpha=alpha,
101
- adjust=False,
102
- min_samples=1)
103
- .over(**over))
104
-
105
-
106
- def d_ewmstd(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
107
- return (expr
108
- .ewm_std(com=com,
109
- span=span,
110
- half_life=half_life,
111
- alpha=alpha,
112
- adjust=False,
113
- min_samples=1)
114
- .over(**over))
115
-
116
-
117
- def d_ewmvar(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
118
- return (expr
119
- .ewm_var(com=com,
120
- span=span,
121
- half_life=half_life,
122
- alpha=alpha,
123
- adjust=False,
124
- min_samples=1)
125
- .over(**over))
126
-
127
-
128
- def d_cv(expr: pl.Expr, windows): return d_std(expr, windows) / d_mean(expr, windows)
129
-
130
-
131
- def d_snr(expr: pl.Expr, windows): return d_mean(expr, windows) / d_std(expr, windows) # 信噪比: signal_to_noise ratio
132
-
133
-
134
- def d_diff(expr: pl.Expr, windows=1): return expr.diff(windows).over(**over)
135
-
136
-
137
- def d_pct(expr: pl.Expr, windows=1): return expr.pct_change(windows).over(**over)
138
-
139
-
140
- def d_corr(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_corr(left, right, window_size=windows,
141
- min_samples=1).over(**over)
142
-
143
-
144
- def d_cov(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_cov(left, right, window_size=windows,
145
- min_samples=1).over(**over).replace(np.nan,
146
- None)
147
-
148
-
149
- def d_slope(left: pl.Expr, right: pl.Expr, windows): return (
150
- d_mean(left * right, windows) - d_mean(right, windows) * d_mean(left, windows)) / d_var(right, windows)
151
-
152
-
153
- def d_resid(left: pl.Expr, right: pl.Expr, windows): return right - d_slope(left, right, windows) * right
154
-
155
-
156
- def d_quantile(expr: pl.Expr, windows, quantile):
157
- return expr.rolling_quantile(window_size=windows, quantile=quantile, min_samples=1).over(**over)
158
-
159
- def d_entropy(expr: pl.Expr, windows, dims):
160
- return (
161
- expr
162
- .map_batches(
163
- lambda x: pl.DataFrame(
164
- x.to_numpy().reshape((dims[0], -1))
165
- )
166
- .with_row_index()
167
- .rolling("index", period=f"{windows}i")
168
- .agg(pl.all().exclude("index").entropy())
169
- .drop("index")
170
- .to_numpy()
171
- .ravel()
172
- )
173
- )
174
-
175
- def d_zscore(expr: pl.Expr, windows):
176
- return (expr - d_mean(expr, windows))/d_std(expr, windows)
ycat/qdf/udf/ind_udf.py DELETED
@@ -1,202 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- ---------------------------------------------
4
- Created on 2025/3/5 01:04
5
- @author: ZhangYundi
6
- @email: yundi.xxii@outlook.com
7
- ---------------------------------------------
8
- """
9
- import numpy as np
10
- import polars as pl
11
-
12
- over = dict(
13
- partition_by=["date", "asset"],
14
- order_by=["time"]
15
- )
16
-
17
-
18
- def ind_mean(expr: pl.Expr, windows): return expr.rolling_mean(windows, min_samples=1).over(**over)
19
-
20
-
21
- def ind_std(expr: pl.Expr, windows): return expr.rolling_std(windows, min_samples=1).over(**over)
22
-
23
-
24
- def ind_sum(expr: pl.Expr, windows): return expr.rolling_sum(windows, min_samples=1).over(**over)
25
-
26
-
27
- def ind_var(expr: pl.Expr, windows): return expr.rolling_var(windows, min_samples=1).over(**over)
28
-
29
-
30
- def ind_skew(expr: pl.Expr, windows): return expr.rolling_skew(windows, ).over(**over)
31
-
32
-
33
- def ind_ref(expr: pl.Expr, windows, dims): # return expr.shift(int(abs(windows))).over(**over)
34
- return (
35
- expr
36
- .map_batches(
37
- lambda x: pl.DataFrame(
38
- x
39
- .to_numpy()
40
- .reshape(dims)
41
- .transpose((1, 0, 2))
42
- .reshape((dims[1], -1))
43
- )
44
- .shift(windows)
45
- .to_numpy()
46
- .reshape((dims[1], dims[0], dims[2]))
47
- .transpose((1, 0, 2))
48
- .ravel()
49
- )
50
- .replace(np.nan, None)
51
- )
52
-
53
-
54
- def ind_mid(expr: pl.Expr, windows): return expr.rolling_median(windows, min_samples=1).over(**over)
55
-
56
-
57
- def ind_mad(expr: pl.Expr, windows):
58
- return 1.4826 * (expr - expr.rolling_median(windows, min_samples=1)).abs().rolling_median(windows, min_samples=1).over(**over)
59
-
60
-
61
- def ind_rank(expr: pl.Expr, windows, dims):
62
- return (
63
- expr
64
- .map_batches(
65
- lambda x: pl.DataFrame(
66
- x
67
- .to_numpy()
68
- .reshape(dims)
69
- .transpose((1, 0, 2))
70
- .reshape((dims[1], -1))
71
- )
72
- .with_row_index()
73
- .rolling("index", period=f"{windows}i")
74
- .agg(pl.all().exclude("index").rank().last())
75
- .drop("index")
76
- .to_numpy()
77
- .reshape((dims[1], dims[0], dims[2]))
78
- .transpose((1, 0, 2))
79
- .ravel()
80
- )
81
- )
82
-
83
- def ind_prod(expr: pl.Expr, windows, dims):
84
- return (
85
- expr
86
- .map_batches(
87
- lambda x: pl.DataFrame(
88
- x
89
- .to_numpy()
90
- .reshape(dims)
91
- .transpose((1, 0, 2))
92
- .reshape((dims[1], -1))
93
- )
94
- .with_row_index()
95
- .rolling("index", period=f"{windows}i")
96
- .agg(pl.all().exclude("index").cum_prod())
97
- .drop("index")
98
- .to_numpy()
99
- .reshape((dims[1], dims[0], dims[2]))
100
- .transpose((1, 0, 2))
101
- .ravel()
102
- )
103
- )
104
-
105
- def ind_max(expr: pl.Expr, windows): return expr.rolling_max(windows, min_samples=1).over(**over)
106
-
107
-
108
- def ind_min(expr: pl.Expr, windows): return expr.rolling_min(windows, min_samples=1).over(**over)
109
-
110
-
111
- def ind_ewmmean(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
112
- return (expr
113
- .ewm_mean(com=com,
114
- span=span,
115
- half_life=half_life,
116
- alpha=alpha,
117
- adjust=False,
118
- min_samples=1)
119
- .over(**over))
120
-
121
-
122
- def ind_ewmstd(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
123
- return (expr
124
- .ewm_std(com=com,
125
- span=span,
126
- half_life=half_life,
127
- alpha=alpha,
128
- adjust=False,
129
- min_samples=1)
130
- .over(**over))
131
-
132
-
133
- def ind_ewmvar(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
134
- return (expr
135
- .ewm_var(com=com,
136
- span=span,
137
- half_life=half_life,
138
- alpha=alpha,
139
- adjust=False,
140
- min_samples=1)
141
- .over(**over))
142
-
143
-
144
- def ind_cv(expr: pl.Expr, windows): return ind_std(expr, windows) / ind_mean(expr, windows)
145
-
146
-
147
- def ind_snr(expr: pl.Expr, windows): return ind_mean(expr, windows) / ind_std(expr,
148
- windows) # 信噪比: signal_to_noise ratio
149
-
150
-
151
- def ind_diff(expr: pl.Expr, windows=1): return expr.diff(windows).over(**over)
152
-
153
-
154
- def ind_pct(expr: pl.Expr, windows=1): return expr.pct_change(windows).over(**over)
155
-
156
-
157
- def ind_corr(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_corr(left, right, window_size=windows,
158
- min_samples=1).over(**over)
159
-
160
-
161
- def ind_cov(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_cov(left, right, window_size=windows,
162
- min_samples=1).over(**over).replace(np.nan,
163
- None)
164
-
165
- def ind_slope(left: pl.Expr, right: pl.Expr, windows): return (
166
- ind_mean(left * right, windows) - ind_mean(right, windows) * ind_mean(left, windows)) / ind_var(right,
167
- windows)
168
-
169
-
170
- def ind_resid(left: pl.Expr, right: pl.Expr, windows): return right - ind_slope(left, right, windows) * right
171
-
172
-
173
- def ind_quantile(expr: pl.Expr, windows, quantile):
174
- return expr.rolling_quantile(window_size=windows, quantile=quantile, min_samples=1).over(**over)
175
-
176
- def ind_entropy(expr: pl.Expr, windows, dims):
177
- return (
178
- expr
179
- .map_batches(
180
- lambda x: pl.DataFrame(
181
- x
182
- .to_numpy()
183
- .reshape(dims)
184
- .transpose((1, 0, 2))
185
- .reshape((dims[1], -1))
186
- )
187
- .with_row_index()
188
- .rolling("index", period=f"{windows}i")
189
- .agg(pl.all().exclude("index").entropy())
190
- .drop("index")
191
- .to_numpy()
192
- .reshape((dims[1], dims[0], dims[2]))
193
- .transpose((1, 0, 2))
194
- .ravel()
195
- )
196
- )
197
-
198
- def ind_zscore(expr: pl.Expr, windows):
199
- return (expr - ind_mean(expr, windows))/ind_std(expr, windows)
200
-
201
- def ind_norm(expr: pl.Expr, windows):
202
- return (expr - ind_mid(expr, windows))/ind_mad(expr, windows)
ycat/qdf/udf/ts_udf.py DELETED
@@ -1,175 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- ---------------------------------------------
4
- Created on 2025/3/5 01:04
5
- @author: ZhangYundi
6
- @email: yundi.xxii@outlook.com
7
- ---------------------------------------------
8
- """
9
- import numpy as np
10
- import polars as pl
11
-
12
- over = dict(
13
- partition_by=["asset"],
14
- order_by=["date", "time"]
15
- )
16
-
17
-
18
- def ts_mean(expr: pl.Expr, windows): return expr.rolling_mean(windows, min_samples=1).over(**over)
19
-
20
-
21
- def ts_std(expr: pl.Expr, windows): return expr.rolling_std(windows, min_samples=1).over(**over)
22
-
23
-
24
- def ts_sum(expr: pl.Expr, windows): return expr.rolling_sum(windows, min_samples=1).over(**over)
25
-
26
-
27
- def ts_var(expr: pl.Expr, windows): return expr.rolling_var(windows, min_samples=1).over(**over)
28
-
29
-
30
- def ts_skew(expr: pl.Expr, windows): return expr.rolling_skew(windows, ).over(**over)
31
-
32
-
33
- def ts_ref(expr: pl.Expr, windows, dims): # return expr.shift(int(abs(windows))).over(**over)
34
- return (
35
- expr
36
- .map_batches(
37
- lambda x: pl.DataFrame(
38
- x.to_numpy().reshape((-1, dims[-1]))
39
- )
40
- .shift(windows)
41
- .to_numpy()
42
- .ravel()
43
- )
44
- .replace(np.nan, None)
45
- )
46
-
47
-
48
- def ts_mid(expr: pl.Expr, windows): return expr.rolling_median(windows, min_samples=1).over(**over)
49
-
50
-
51
- def ts_mad(expr: pl.Expr, windows):
52
- return 1.4826 * (expr - expr.rolling_median(windows, min_samples=1)).abs().rolling_median(windows, min_samples=1).over(**over)
53
-
54
-
55
- def ts_rank(expr: pl.Expr, windows, dims):
56
- return (
57
- expr
58
- .map_batches(
59
- lambda x: pl.DataFrame(
60
- x.to_numpy().reshape((-1, dims[-1]))
61
- )
62
- .with_row_index()
63
- .rolling("index", period=f"{windows}i")
64
- .agg(pl.all().exclude("index").rank().last())
65
- .drop("index")
66
- .to_numpy()
67
- .ravel()
68
- )
69
- )
70
-
71
- def ts_prod(expr: pl.Expr, windows, dims):
72
- return (
73
- expr
74
- .map_batches(
75
- lambda x: pl.DataFrame(
76
- x.to_numpy().reshape((-1, dims[-1]))
77
- )
78
- .with_row_index()
79
- .rolling("index", period=f"{windows}i")
80
- .agg(pl.all().exclude("index").cum_prod())
81
- .drop("index")
82
- .to_numpy()
83
- .ravel()
84
- )
85
- )
86
-
87
-
88
- def ts_max(expr: pl.Expr, windows): return expr.rolling_max(windows, min_samples=1).over(**over)
89
-
90
-
91
- def ts_min(expr: pl.Expr, windows): return expr.rolling_min(windows, min_samples=1).over(**over)
92
-
93
-
94
- def ts_ewmmean(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
95
- return (expr
96
- .ewm_mean(com=com,
97
- span=span,
98
- half_life=half_life,
99
- alpha=alpha,
100
- adjust=False,
101
- min_samples=1)
102
- .over(**over))
103
-
104
-
105
- def ts_ewmstd(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
106
- return (expr
107
- .ewm_std(com=com,
108
- span=span,
109
- half_life=half_life,
110
- alpha=alpha,
111
- adjust=False,
112
- min_samples=1)
113
- .over(**over))
114
-
115
-
116
- def ts_ewmvar(expr: pl.Expr, com=None, span=None, half_life=None, alpha=None):
117
- return (expr
118
- .ewm_var(com=com,
119
- span=span,
120
- half_life=half_life,
121
- alpha=alpha,
122
- adjust=False,
123
- min_samples=1)
124
- .over(**over))
125
-
126
-
127
- def ts_cv(expr: pl.Expr, windows): return ts_std(expr, windows) / ts_mean(expr, windows)
128
-
129
-
130
- def ts_snr(expr: pl.Expr, windows): return ts_mean(expr, windows) / ts_std(expr, windows) # 信噪比: signal_to_noise ratio
131
-
132
-
133
- def ts_diff(expr: pl.Expr, windows=1): return expr.diff(windows).over(**over)
134
-
135
-
136
- def ts_pct(expr: pl.Expr, windows=1): return expr.pct_change(windows).over(**over)
137
-
138
-
139
- def ts_corr(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_corr(left, right, window_size=windows,
140
- min_samples=1).over(**over)
141
-
142
-
143
- def ts_cov(left: pl.Expr, right: pl.Expr, windows): return pl.rolling_cov(left, right, window_size=windows,
144
- min_samples=1).over(**over).replace(np.nan,
145
- None)
146
-
147
-
148
- def ts_slope(left: pl.Expr, right: pl.Expr, windows): return (
149
- ts_mean(left * right, windows) - ts_mean(right, windows) * ts_mean(left, windows)) / ts_var(right, windows)
150
-
151
-
152
- def ts_resid(left: pl.Expr, right: pl.Expr, windows): return right - ts_slope(left, right, windows) * right
153
-
154
-
155
- def ts_quantile(expr: pl.Expr, windows, quantile):
156
- return expr.rolling_quantile(window_size=windows, quantile=quantile, min_samples=1).over(**over)
157
-
158
- def ts_entropy(expr: pl.Expr, windows, dims):
159
- return (
160
- expr
161
- .map_batches(
162
- lambda x: pl.DataFrame(
163
- x.to_numpy().reshape((-1, dims[-1]))
164
- )
165
- .with_row_index()
166
- .rolling("index", period=f"{windows}i")
167
- .agg(pl.all().exclude("index").entropy())
168
- .drop("index")
169
- .to_numpy()
170
- .ravel()
171
- )
172
- )
173
-
174
- def ts_zscore(expr: pl.Expr, windows):
175
- return (expr - ts_mean(expr, windows))/ts_std(expr, windows)