pyacm 0.1__py3-none-any.whl → 0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyacm/__init__.py ADDED
@@ -0,0 +1,5 @@
1
+ from pyacm.acm import NominalACM
2
+
3
+ __all__ = [
4
+ "NominalACM",
5
+ ]
pyacm/acm.py ADDED
@@ -0,0 +1,383 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ from numpy.linalg import inv
5
+ from sklearn.decomposition import PCA
6
+
7
+ from pyacm.utils import vec, vec_quad_form, commutation_matrix
8
+
9
+
10
+ class NominalACM:
11
+ """
12
+ This class implements the model from the article:
13
+
14
+ Adrian, Tobias, Richard K. Crump, and Emanuel Moench. “Pricing the
15
+ Term Structure with Linear Regressions.” SSRN Electronic Journal,
16
+ 2012. https://doi.org/10.2139/ssrn.1362586.
17
+
18
+ It handles data transformation, estimates parameters and generates the
19
+ relevant outputs. The version of the article that was published by the NY
20
+ FED is not 100% explicit on how the data is being manipulated, but I found
21
+ an earlier version of the paper on SSRN where the authors go deeper into
22
+ the details on how everything is being estimated:
23
+ - Data for zero yields uses monthly maturities starting from month 1
24
+ - All principal components and model parameters are estiamted with data
25
+ resampled to a monthly frequency, averaging observations in each
26
+ month.
27
+ - To get daily / real-time estimates, the factor loadings estimated
28
+ from the monthly frquency are used to transform the daily data.
29
+
30
+ Attributes
31
+ ----------
32
+ n_factors: int
33
+ number of principal components used
34
+
35
+ curve: pandas.DataFrame
36
+ Raw data of the yield curve
37
+
38
+ curve_monthly: pandas.DataFrame
39
+ Yield curve data resampled to a monthly frequency by averageing
40
+ the observations
41
+
42
+ t: int
43
+ Number of observations in the timeseries dimension
44
+
45
+ n: int
46
+ Number of observations in the cross-sectional dimension. Same
47
+ as number of maturities available after returns are computed
48
+
49
+ rx_m: pd.DataFrame
50
+ Excess returns in monthly frquency
51
+
52
+ rf_m: pandas.Series
53
+ Risk-free rate in monthly frequency
54
+
55
+ rf_d: pandas.Series
56
+ Risk-free rate in daily frequency
57
+
58
+ pc_factors_m: pandas.DataFrame
59
+ Principal components in monthly frequency
60
+
61
+ pc_loadings_m: pandas.DataFrame
62
+ Factor loadings of the monthly PCs
63
+
64
+ pc_explained_m: pandas.Series
65
+ Percent of total variance explained by each monthly principal component
66
+
67
+ pc_factors_d: pandas.DataFrame
68
+ Principal components in daily frequency
69
+
70
+ pc_loadings_d: pandas.DataFrame
71
+ Factor loadings of the daily PCs
72
+
73
+ pc_explained_d: pandas.Series
74
+ Percent of total variance explained by each monthly principal component
75
+
76
+ mu, phi, Sigma, v: numpy.array
77
+ Estimates of the VAR(1) parameters, the first stage of estimation.
78
+ The names are the same as the original paper
79
+
80
+ a, beta, c, sigma2: numpy.array
81
+ Estimates of the risk premium equation, the second stage of estimation.
82
+ The names are the same as the original paper
83
+
84
+ lambda0, lambda1: numpy.array
85
+ Estimates of the price of risk parameters, the third stage of estimation.
86
+ The names are the same as the original paper
87
+
88
+ miy: pandas.DataFrame
89
+ Model implied / fitted yields
90
+
91
+ rny: pandas.DataFrame
92
+ Risk neutral yields
93
+
94
+ tp: pandas.DataFrame
95
+ Term premium estimates
96
+
97
+ er_loadings: pandas.DataFrame
98
+ Loadings of the expected reutrns on the principal components
99
+
100
+ er_hist_m: pandas.DataFrame
101
+ Historical estimates of expected returns, computed in-sample, in monthly frequency
102
+
103
+ er_hist_d: pandas.DataFrame
104
+ Historical estimates of expected returns, computed in-sample, in daily frequency
105
+
106
+ z_lambda: pandas.DataFrame
107
+ Z-stat for inference on the price of risk parameters
108
+
109
+ z_beta: pandas.DataFrame
110
+ Z-stat for inference on the loadings of expected returns
111
+ """
112
+
113
+ def __init__(self, curve, n_factors=5):
114
+ """
115
+ Runs the baseline varsion of the ACM term premium model. Works for data
116
+ with monthly frequency or higher.
117
+
118
+ Parameters
119
+ ----------
120
+ curve : pandas.DataFrame
121
+ Annualized log-yields. Maturities (columns) must start at month 1
122
+ and be equally spaced in monthly frequency. The labels of the
123
+ columns do not matter, they be kept the same. Observations (index)
124
+ must be of monthly frequency or higher. The index must be a
125
+ pandas.DateTimeIndex.
126
+
127
+ n_factors : int
128
+ number of principal components to used as state variables.
129
+ """
130
+
131
+ self.n_factors = n_factors
132
+ self.curve = curve
133
+ self.curve_monthly = curve.resample('ME').mean()
134
+ self.t = self.curve_monthly.shape[0] - 1
135
+ self.n = self.curve_monthly.shape[1]
136
+ self.rx_m, self.rf_m = self._get_excess_returns()
137
+ self.rf_d = self.curve.iloc[:, 0] * (1 / 12)
138
+ self.pc_factors_m, self.pc_loadings_m, self.pc_explained_m = self._get_pcs(self.curve_monthly)
139
+ self.pc_factors_d, self.pc_loadings_d, self.pc_explained_d = self._get_pcs(self.curve)
140
+ self.mu, self.phi, self.Sigma, self.v = self._estimate_var()
141
+ self.a, self.beta, self.c, self.sigma2 = self._excess_return_regression()
142
+ self.lambda0, self.lambda1 = self._retrieve_lambda()
143
+
144
+ if self.curve.index.freqstr == 'M':
145
+ X = self.pc_factors_m
146
+ r1 = self.rf_m
147
+ else:
148
+ X = self.pc_factors_d
149
+ r1 = self.rf_d
150
+
151
+ self.miy = self._affine_recursions(self.lambda0, self.lambda1, X, r1)
152
+ self.rny = self._affine_recursions(0, 0, X, r1)
153
+ self.tp = self.miy - self.rny
154
+ self.er_loadings, self.er_hist_m, self.er_hist_d = self._expected_return()
155
+ self.z_lambda, self.z_beta = self._inference()
156
+
157
+ def fwd_curve(self, date=None):
158
+ """
159
+ Compute the forward curves for a given date.
160
+
161
+ Parameters
162
+ ----------
163
+ date : date-like
164
+ date in any format that can be interpreted by pandas.to_datetime()
165
+ """
166
+
167
+ if date is None:
168
+ date = self.curve.index[-1]
169
+
170
+ date = pd.to_datetime(date)
171
+ fwd_mkt = self._compute_fwd_curve(self.curve.loc[date])
172
+ fwd_miy = self._compute_fwd_curve(self.miy.loc[date])
173
+ fwd_rny = self._compute_fwd_curve(self.rny.loc[date])
174
+ df = pd.concat(
175
+ [
176
+ fwd_mkt.rename("Observed"),
177
+ fwd_miy.rename("Model Implied"),
178
+ fwd_rny.rename("Risk-Neutral"),
179
+ ],
180
+ axis=1,
181
+ )
182
+ return df
183
+
184
+
185
+ @staticmethod
186
+ def _compute_fwd_curve(curve):
187
+ aux_curve = curve.reset_index(drop=True)
188
+ aux_curve.index = aux_curve.index + 1
189
+ factor = (1 + aux_curve) ** (aux_curve.index / 12)
190
+ fwd_factor = factor / factor.shift(1).fillna(1)
191
+ fwds = (fwd_factor ** 12) - 1
192
+ fwds = pd.Series(fwds.values, index=curve.index)
193
+ return fwds
194
+
195
+ def _get_excess_returns(self):
196
+ ttm = np.arange(1, self.n + 1) / 12
197
+ log_prices = - self.curve_monthly * ttm
198
+ rf = - log_prices.iloc[:, 0].shift(1)
199
+ rx = (log_prices - log_prices.shift(1, axis=0).shift(-1, axis=1)).subtract(rf, axis=0)
200
+ rx = rx.dropna(how='all', axis=0).dropna(how='all', axis=1)
201
+ return rx, rf.dropna()
202
+
203
+ def _get_pcs(self, curve):
204
+ pca = PCA(n_components=self.n_factors)
205
+ pca.fit(curve)
206
+ col_names = [f'PC {i + 1}' for i in range(self.n_factors)]
207
+ df_loadings = pd.DataFrame(data=pca.components_.T,
208
+ columns=col_names,
209
+ index=curve.columns)
210
+
211
+ # Normalize the direction of the eigenvectors
212
+ signal = np.sign(df_loadings.iloc[-1])
213
+ df_loadings = df_loadings * signal
214
+ df_pc = (curve - curve.mean()) @ df_loadings
215
+
216
+ # Percent Explained
217
+ df_explained = pd.Series(data=pca.explained_variance_ratio_,
218
+ name='Explained Variance',
219
+ index=col_names)
220
+
221
+ return df_pc, df_loadings, df_explained
222
+
223
+ def _estimate_var(self):
224
+ X = self.pc_factors_m.copy().T
225
+ X_lhs = X.values[:, 1:] # X_t+1. Left hand side of VAR
226
+ X_rhs = np.vstack((np.ones((1, self.t)), X.values[:, 0:-1])) # X_t and a constant.
227
+
228
+ var_coeffs = (X_lhs @ np.linalg.pinv(X_rhs))
229
+ mu = var_coeffs[:, [0]]
230
+ phi = var_coeffs[:, 1:]
231
+
232
+ v = X_lhs - var_coeffs @ X_rhs
233
+ Sigma = v @ v.T / self.t
234
+
235
+ return mu, phi, Sigma, v
236
+
237
+ def _excess_return_regression(self):
238
+ X = self.pc_factors_m.copy().T.values[:, :-1]
239
+ Z = np.vstack((np.ones((1, self.t)), self.v, X)) # Innovations and lagged X
240
+ abc = self.rx_m.values.T @ np.linalg.pinv(Z)
241
+ E = self.rx_m.values.T - abc @ Z
242
+ sigma2 = np.trace(E @ E.T) / (self.n * self.t)
243
+
244
+ a = abc[:, [0]]
245
+ beta = abc[:, 1:self.n_factors + 1].T
246
+ c = abc[:, self.n_factors + 1:]
247
+
248
+ return a, beta, c, sigma2
249
+
250
+ def _retrieve_lambda(self):
251
+ BStar = np.squeeze(np.apply_along_axis(vec_quad_form, 1, self.beta.T))
252
+ lambda1 = np.linalg.pinv(self.beta.T) @ self.c
253
+ lambda0 = np.linalg.pinv(self.beta.T) @ (self.a + 0.5 * (BStar @ vec(self.Sigma) + self.sigma2))
254
+ return lambda0, lambda1
255
+
256
+ def _affine_recursions(self, lambda0, lambda1, X_in, r1):
257
+ X = X_in.T.values[:, 1:]
258
+ r1 = vec(r1.values)[-X.shape[1]:, :]
259
+
260
+ A = np.zeros((1, self.n))
261
+ B = np.zeros((self.n_factors, self.n))
262
+
263
+ delta = r1.T @ np.linalg.pinv(np.vstack((np.ones((1, X.shape[1])), X)))
264
+ delta0 = delta[[0], [0]]
265
+ delta1 = delta[[0], 1:]
266
+
267
+ A[0, 0] = - delta0
268
+ B[:, 0] = - delta1
269
+
270
+ for i in range(self.n - 1):
271
+ A[0, i + 1] = A[0, i] + B[:, i].T @ (self.mu - lambda0) + 1 / 2 * (B[:, i].T @ self.Sigma @ B[:, i] + 0 * self.sigma2) - delta0
272
+ B[:, i + 1] = B[:, i] @ (self.phi - lambda1) - delta1
273
+
274
+ # Construct fitted yields
275
+ ttm = np.arange(1, self.n + 1) / 12
276
+ fitted_log_prices = (A.T + B.T @ X).T
277
+ fitted_yields = - fitted_log_prices / ttm
278
+ fitted_yields = pd.DataFrame(
279
+ data=fitted_yields,
280
+ index=self.curve.index[1:],
281
+ columns=self.curve.columns,
282
+ )
283
+ return fitted_yields
284
+
285
+ def _expected_return(self):
286
+ """
287
+ Compute the "expected return" and "convexity adjustment" terms, to get
288
+ the expected return loadings and historical estimate
289
+
290
+ Loadings are interpreted as the effect of 1sd of the PCs on the
291
+ expected returns
292
+ """
293
+ stds = self.pc_factors_m.std().values[:, None].T
294
+ er_loadings = (self.beta.T @ self.lambda1) * stds
295
+ er_loadings = pd.DataFrame(
296
+ data=er_loadings,
297
+ columns=self.pc_factors_m.columns,
298
+ index=self.curve.columns[:-1],
299
+ )
300
+
301
+ # Monthly
302
+ exp_ret = (self.beta.T @ (self.lambda1 @ self.pc_factors_m.T + self.lambda0)).values
303
+ conv_adj = np.diag(self.beta.T @ self.Sigma @ self.beta) + self.sigma2
304
+ er_hist = (exp_ret + conv_adj[:, None]).T
305
+ er_hist_m = pd.DataFrame(
306
+ data=er_hist,
307
+ index=self.pc_factors_m.index,
308
+ columns=self.curve.columns[:er_hist.shape[1]]
309
+ )
310
+
311
+ # Higher frequency
312
+ exp_ret = (self.beta.T @ (self.lambda1 @ self.pc_factors_d.T + self.lambda0)).values
313
+ conv_adj = np.diag(self.beta.T @ self.Sigma @ self.beta) + self.sigma2
314
+ er_hist = (exp_ret + conv_adj[:, None]).T
315
+ er_hist_d = pd.DataFrame(
316
+ data=er_hist,
317
+ index=self.pc_factors_d.index,
318
+ columns=self.curve.columns[:er_hist.shape[1]]
319
+ )
320
+
321
+ return er_loadings, er_hist_m, er_hist_d
322
+
323
+ def _inference(self):
324
+ # TODO I AM NOT SURE THAT THIS SECTION IS CORRECT
325
+
326
+ # Auxiliary matrices
327
+ Z = self.pc_factors_m.copy().T
328
+ Z = Z.values[:, 1:]
329
+ Z = np.vstack((np.ones((1, self.t)), Z))
330
+
331
+ Lamb = np.hstack((self.lambda0, self.lambda1))
332
+
333
+ rho1 = np.zeros((self.n_factors + 1, 1))
334
+ rho1[0, 0] = 1
335
+
336
+ A_beta = np.zeros((self.n_factors * self.beta.shape[1], self.beta.shape[1]))
337
+
338
+ for ii in range(self.beta.shape[1]):
339
+ A_beta[ii * self.beta.shape[0]:(ii + 1) * self.beta.shape[0], ii] = self.beta[:, ii]
340
+
341
+ BStar = np.squeeze(np.apply_along_axis(vec_quad_form, 1, self.beta.T))
342
+
343
+ comm_kk = commutation_matrix(shape=(self.n_factors, self.n_factors))
344
+ comm_kn = commutation_matrix(shape=(self.n_factors, self.beta.shape[1]))
345
+
346
+ # Assymptotic variance of the betas
347
+ v_beta = self.sigma2 * np.kron(np.eye(self.beta.shape[1]), inv(self.Sigma))
348
+
349
+ # Assymptotic variance of the lambdas
350
+ upsilon_zz = (1 / self.t) * Z @ Z.T
351
+ v1 = np.kron(inv(upsilon_zz), self.Sigma)
352
+ v2 = self.sigma2 * np.kron(inv(upsilon_zz), inv(self.beta @ self.beta.T))
353
+ v3 = self.sigma2 * np.kron(Lamb.T @ self.Sigma @ Lamb, inv(self.beta @ self.beta.T))
354
+
355
+ v4_sim = inv(self.beta @ self.beta.T) @ self.beta @ A_beta.T
356
+ v4_mid = np.kron(np.eye(self.beta.shape[1]), self.Sigma)
357
+ v4 = self.sigma2 * np.kron(rho1 @ rho1.T, v4_sim @ v4_mid @ v4_sim.T)
358
+
359
+ v5_sim = inv(self.beta @ self.beta.T) @ self.beta @ BStar
360
+ v5_mid = (np.eye(self.n_factors ** 2) + comm_kk) @ np.kron(self.Sigma, self.Sigma)
361
+ v5 = 0.25 * np.kron(rho1 @ rho1.T, v5_sim @ v5_mid @ v5_sim.T)
362
+
363
+ v6_sim = inv(self.beta @ self.beta.T) @ self.beta @ np.ones((self.beta.shape[1], 1))
364
+ v6 = 0.5 * (self.sigma2 ** 2) * np.kron(rho1 @ rho1.T, v6_sim @ v6_sim.T)
365
+
366
+ v_lambda_tau = v1 + v2 + v3 + v4 + v5 + v6
367
+
368
+ c_lambda_tau_1 = np.kron(Lamb.T, inv(self.beta @ self.beta.T) @ self.beta)
369
+ c_lambda_tau_2 = np.kron(rho1, inv(self.beta @ self.beta.T) @ self.beta @ A_beta.T @ np.kron(np.eye(self.beta.shape[1]), self.Sigma))
370
+ c_lambda_tau = - c_lambda_tau_1 @ comm_kn @ v_beta @ c_lambda_tau_2.T
371
+
372
+ v_lambda = v_lambda_tau + c_lambda_tau + c_lambda_tau.T
373
+
374
+ # extract the z-tests
375
+ sd_lambda = np.sqrt(np.diag(v_lambda).reshape(Lamb.shape, order='F'))
376
+ sd_beta = np.sqrt(np.diag(v_beta).reshape(self.beta.shape, order='F'))
377
+
378
+ z_beta = pd.DataFrame(self.beta / sd_beta, index=self.pc_factors_m.columns, columns=self.curve.columns[:-1]).T
379
+ z_lambda = pd.DataFrame(Lamb / sd_lambda, index=self.pc_factors_m.columns, columns=[f"lambda {i}" for i in range(Lamb.shape[1])])
380
+
381
+ return z_lambda, z_beta
382
+
383
+
pyacm/utils.py ADDED
@@ -0,0 +1,43 @@
1
+ import numpy as np
2
+
3
+
4
+ def vec(mat):
5
+ """
6
+ Stack the columns of `mat` into a column vector. If mat is a M x N matrix,
7
+ then vec(mat) is an MN X 1 vector.
8
+
9
+ Parameters
10
+ ----------
11
+ mat: numpy.array
12
+ """
13
+ vec_mat = mat.reshape((-1, 1), order='F')
14
+ return vec_mat
15
+
16
+
17
+ def vec_quad_form(mat):
18
+ """
19
+ `vec` operation for quadratic forms
20
+
21
+ Parameters
22
+ ----------
23
+ mat: numpy.array
24
+ """
25
+ return vec(np.outer(mat, mat))
26
+
27
+
28
+ def commutation_matrix(shape):
29
+ """
30
+ Generates the commutation matrix for a matrix with shape equal to `shape`.
31
+
32
+ The definition of a commutation matrix `k` is:
33
+ k @ vec(mat) = vec(mat.T)
34
+
35
+ Parameters
36
+ ----------
37
+ shape : tuple
38
+ 2-d tuple (m, n) with the shape of `mat`
39
+ """
40
+ m, n = shape
41
+ w = np.arange(m * n).reshape((m, n), order="F").T.ravel(order="F")
42
+ k = np.eye(m * n)[w, :]
43
+ return k
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyacm
3
+ Version: 0.2
4
+ Summary: ACM Term Premium
5
+ Author: Tobias Adrian, Richard K. Crump, Emanuel Moench
6
+ Maintainer: Gustavo Amarante
7
+ Maintainer-email: developer@dsgepy.com
8
+ Keywords: asset pricing,yield curve,term premium
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: matplotlib
12
+ Requires-Dist: numpy
13
+ Requires-Dist: pandas
14
+ Requires-Dist: scikit-learn
15
+ Requires-Dist: tqdm
16
+
17
+
18
+ [paper_website]: https://www.newyorkfed.org/medialibrary/media/research/staff_reports/sr340.pdf
19
+ [inference_atribute]: https://github.com/gusamarante/pyacm/blob/ba641c14e450fc83d22db4ef5e60eadbd489b351/pyacm/acm.py#L203
20
+
21
+ # pyacm
22
+ Implementation of ["Pricing the Term Structure with Linear Regressions" from
23
+ Adrian, Crump and Moench (2013)][paper_website].
24
+
25
+ The `NominalACM` class prices the time series and cross-section of the term
26
+ structure of interest rates using a three-step linear regression approach.
27
+ Computations are fast, even with a large number of pricing factors. The object
28
+ carries all the relevant variables as atributes:
29
+ - The yield curve itself
30
+ - The excess returns from the synthetic zero coupon bonds
31
+ - The principal components of the curve used as princing factors
32
+ - Risk premium parameter estimates
33
+ - Yields fitted by the model
34
+ - Risk-neutral yields
35
+ - Term premium
36
+ - Expected return loadings
37
+ - Hypothesis testing (Not sure if correct, more info observations below)
38
+
39
+
40
+ # Instalation
41
+ ```bash
42
+ pip install pyacm
43
+ ```
44
+
45
+
46
+ # Original Article
47
+ > Adrian, Tobias and Crump, Richard K. and Moench, Emanuel,
48
+ > Pricing the Term Structure with Linear Regressions (April 11, 2013).
49
+ > FRB of New York Staff Report No. 340,
50
+ > Available at SSRN: https://ssrn.com/abstract=1362586 or http://dx.doi.org/10.2139/ssrn.1362586
51
+
52
+ The version of the article that was published by the NY FED is not 100% explicit on how the data is being manipulated,
53
+ but I found an earlier version of the paper on SSRN where the authors go deeper into the details on how everything is being estimated:
54
+ - Data for zero yields uses monthly maturities starting from month 1
55
+ - All principal components and model parameters are estiamted with data resampled to a monthly frequency, averaging observations in each month
56
+ - To get daily / real-time estimates, the factor loadings estimated from the monthly frquency are used to transform the daily data
57
+
58
+
59
+ # Usage
60
+ The tricky part of using this model is getting the correct data format:
61
+ - The model works with annualized log-yields for zero-coupon bonds
62
+ - Observations (index) must be in either monthly or daily frequency
63
+ - Maturities (columns) must be equally spaced in **monthly** frequency and start at month 1. This means that you need to construct a bootstraped curve for every date and interpolate it at fixed monthly maturities.
64
+ - Whichever maturity you want to be the longest, your input data should have one column more. For example, if you want term premium estimate up to the 10-year yield (120 months), your input data should include maturities up to 121 months. This is needed to properly compute the returns.
65
+
66
+
67
+ # Observations
68
+ I am not completely sure that computations in the [inferences attributes][inference_atribute]
69
+ are correct. If you find any mistakes, please open a pull request following the contributing
70
+ guidelines.
@@ -0,0 +1,8 @@
1
+ pyacm/__init__.py,sha256=pRFuR3Au_ybQAmkJduGrLMKGJd1pxjhGfhsfsjlK-mU,66
2
+ pyacm/acm.py,sha256=sUtw0NMEDfrpCtRWmVTryvJqMQXFvXo6m9tRAH1PGEA,14492
3
+ pyacm/utils.py,sha256=-PmH9L3LpzqUP-QU5BHisoLSBYrq-3PaPgR-W1sS1z8,904
4
+ pyacm-0.2.dist-info/LICENSE,sha256=YbUXx25Z6PzP4k4rsbs6tN58NiCwGIIrTMzql4iTeDs,1073
5
+ pyacm-0.2.dist-info/METADATA,sha256=UUo-0gkeAL5nudUKju0UQlsUCs5VCzrfrFGvbVkNLFE,3286
6
+ pyacm-0.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
7
+ pyacm-0.2.dist-info/top_level.txt,sha256=xQy7q1eSKCnRtTnwb-Iz_spT0UDNdTyzKd43yz-ffrI,6
8
+ pyacm-0.2.dist-info/RECORD,,
@@ -0,0 +1 @@
1
+ pyacm
@@ -1,18 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyacm
3
- Version: 0.1
4
- Summary: ACM Term Premium
5
- Author: Tobias Adrian, Richard K. Crump, Emanuel Moench
6
- Maintainer: Gustavo Amarante
7
- Maintainer-email: developer@dsgepy.com
8
- Keywords: asset pricing,yield curve,term premium
9
- Description-Content-Type: text/markdown
10
- License-File: LICENSE
11
- Requires-Dist: pandas
12
- Requires-Dist: scikit-learn
13
- Requires-Dist: numpy
14
- Requires-Dist: matplotlib
15
-
16
-
17
- # pyacm
18
- Implementation of "Pricing the Term Structure with Linear Regressions" from Adrian, Crump and Moench (2013)
@@ -1,5 +0,0 @@
1
- pyacm-0.1.dist-info/LICENSE,sha256=YbUXx25Z6PzP4k4rsbs6tN58NiCwGIIrTMzql4iTeDs,1073
2
- pyacm-0.1.dist-info/METADATA,sha256=5wIOk9eXnvc1lqZCJ4l5X3LF8MSiSfTbGvpe2-IUboc,523
3
- pyacm-0.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
4
- pyacm-0.1.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
5
- pyacm-0.1.dist-info/RECORD,,
@@ -1 +0,0 @@
1
-
File without changes
File without changes