virgo-modules 0.1.2__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of virgo-modules might be problematic. Click here for more details.

@@ -339,7 +339,7 @@ class SignalAnalyserObject:
339
339
  axs_.plot(dft.strat_prod_exp.values, label = 'strategy', color = 'darksalmon')
340
340
  axs_.set_xlabel("index")
341
341
  axs_.set_ylabel("comulative return")
342
- axs_.set_title(f'{map_[open_in]} strategy and cumulative returns based on signals')
342
+ axs_.set_title(f'{map_[open_in]} strategy and cumulative returns')
343
343
  axs_.legend()
344
344
 
345
345
  if self.show_plot:
@@ -0,0 +1,440 @@
1
+ from hmmlearn.hmm import GaussianHMM
2
+
3
+ from sklearn.pipeline import Pipeline
4
+ from feature_engine.imputation import MeanMedianImputer
5
+ from virgo_modules.src.ticketer_source import FeatureSelector
6
+ from feature_engine.selection import DropCorrelatedFeatures
7
+ from sklearn.preprocessing import RobustScaler
8
+
9
+ import pandas as pd
10
+ import numpy as np
11
+ import random
12
+
13
+ import matplotlib.pyplot as plt
14
+ import matplotlib.gridspec as gridspec
15
+ import seaborn as sns; sns.set()
16
+
17
+ from virgo_modules.src.ticketer_source import FeatureSelector, states_relevance_score
18
+
19
+ def create_hmm_derived_features(df, lag_returns):
20
+ """
21
+ create features derived from hmm states features. Features are the index of the state, the duration of the state, chain raturn
22
+ note: this is a copy of the method of the ticketer_object with the same name
23
+
24
+ Parameters:
25
+ df (pd.DataFrame): dataframe that must have hmm_feature columns
26
+ lag_returns (int): lag paramter (not used)
27
+
28
+ Returns:
29
+ df (pd.DataFrame): dataframe with extra hmm features as columns
30
+ """
31
+ df = df.sort_values('Date')
32
+ ## indexing chains
33
+ df['lag_hmm_feature'] = df['hmm_feature'].shift(1)
34
+ df['breack'] = np.where(df['lag_hmm_feature'] != df['hmm_feature'],1,0)
35
+ df["chain_id"] = df.groupby("breack")["Date"].rank(method="first", ascending=True)
36
+ df["chain_id"] = np.where(df['breack'] == 1,df["chain_id"],np.nan)
37
+ df["chain_id"] = df["chain_id"].fillna(method='ffill')
38
+ df["hmm_chain_order"] = df.groupby('chain_id')["Date"].rank(method="first", ascending=True)
39
+ ### returns using the windowsseeds
40
+ df['lag_chain_close'] = df.sort_values(by=["Date"]).groupby(['chain_id'])['Close'].shift(lag_returns)
41
+ df['chain_return'] = (df['Close']/df['lag_chain_close'] -1) * 100
42
+ df = df.drop(columns = ['breack'])
43
+ return df
44
+
45
+ class trainer_hmm():
46
+ """
47
+ wrapper that gaussian model
48
+ this class follows scikit learn practices
49
+
50
+ Attributes
51
+ ----------
52
+ hmm_model: obj
53
+ pipeline and model
54
+ features_hmm: list
55
+ list of features used to train the gaussian model
56
+
57
+ Methods
58
+ -------
59
+ train():
60
+ train pipeline given the parameters in the class initiliazation
61
+ plot_training_results(lag_diff_returns=int):
62
+ plot features and closing prices displaying the states
63
+ plot the returns distribution by state given lag to calculate the returns in the chains
64
+ """
65
+ def __init__(self, data, features_hmm, n_clusters= 3, corr_thrshold = 0.65, seed = None):
66
+ """
67
+ Initialize object
68
+
69
+ Parameters
70
+ ----------
71
+ data (pd.DataFrame): training data
72
+ features_hmm (list): features to pass for modeling
73
+ n_clusters (int): number or states to train
74
+ corr_thrshold (float): correlation threhsold for initial feature selection
75
+ seed (int): random state for model reproducibility
76
+
77
+ Returns
78
+ -------
79
+ None
80
+ """
81
+ self.__data_train = data
82
+ self.__features_hmm = features_hmm
83
+ self.__n_clusters = n_clusters
84
+ self.__corr_thrshold = corr_thrshold
85
+ self.__seed = seed
86
+ def train(self):
87
+ """
88
+ train pipeline and model
89
+
90
+ Parameters
91
+ ----------
92
+ None
93
+
94
+ Returns
95
+ -------
96
+ None
97
+ """
98
+ transform_pipe = Pipeline([
99
+ ('selector', FeatureSelector(columns=self.__features_hmm)),
100
+ ('fillna', MeanMedianImputer(imputation_method='median',variables=self.__features_hmm)),
101
+ ('drop_correlated', DropCorrelatedFeatures(method='spearman',threshold=self.__corr_thrshold)),
102
+ ])
103
+
104
+ # features_hmm = list(transform_pipe.fit_transform(self.__data_train).columns)
105
+ # n_features = len(features_hmm)
106
+ # startprob_prior = np.array([1/self.__n_clusters]*self.__n_clusters)
107
+ transmat_prior = np.diag([0.70]*self.__n_clusters)
108
+ # means_prior = np.array([1/n_features]*n_features)
109
+ pipeline_hmm = Pipeline([
110
+ ('transfrom_pipe', transform_pipe),
111
+ ('scaler', RobustScaler()),
112
+ ('hmm', GaussianHMM(
113
+ n_components = self.__n_clusters, covariance_type = 'spherical',
114
+ # startprob_prior = startprob_prior,
115
+ transmat_prior = transmat_prior,
116
+ # means_prior = means_prior,
117
+ random_state = self.__seed,)
118
+ )
119
+ ])
120
+
121
+ self.hmm_model = pipeline_hmm.fit(self.__data_train)
122
+ self.features_hmm = [x for x in self.__features_hmm if x not in list(self.hmm_model[0][-1].features_to_drop_)]
123
+
124
+ def plot_training_results(self, lag_diff_returns):
125
+ """
126
+ plot result as matplot figure
127
+
128
+ Parameters
129
+ ----------
130
+ lag_diff_returns (int): lag or diff factor to calculate returns of chains
131
+
132
+ Returns
133
+ -------
134
+ None
135
+ """
136
+ n_clusters = self.__n_clusters
137
+ df_train = self.__data_train.copy()
138
+ df_train['hmm_feature'] = self.hmm_model.predict(df_train)
139
+ df_train = create_hmm_derived_features(df_train, lag_diff_returns,)
140
+ n = len(self.features_hmm)+1
141
+ fig, axs = plt.subplots(n, 1, figsize=(10, 3*n), sharex=True)
142
+ for i,feature in enumerate(self.features_hmm):
143
+ axs[i].plot(df_train.Date, df_train[feature])
144
+ axs[i].set_title(feature)
145
+ for s in range(n_clusters):
146
+ df = df_train[df_train['hmm_feature'] == s]
147
+ axs[i].scatter(df.Date, df[feature])
148
+
149
+ axs[i+1].plot(df_train.Date, df_train.Close)
150
+ axs[i+1].set_title('close price')
151
+ for s in range(n_clusters):
152
+ df = df_train[df_train['hmm_feature'] == s]
153
+ axs[i+1].scatter(df.Date, df.Close)
154
+
155
+ n = 1
156
+ fig, axs = plt.subplots(n, 1, figsize=(10, 3*n), sharex=True)
157
+ df_plot = df_train.dropna()
158
+ sns.boxplot(data=df_plot, x="hmm_feature", y="chain_return", hue="hmm_feature", ax=axs)
159
+ axs.axhline(0.5, linestyle='--')
160
+ del df_train
161
+
162
+ def evaluate_model_chains(data, n_clusters, at_least_states, threshold_chain, at_least_length):
163
+ """
164
+ function that is going to assess chains or series of states given some sanity chekcs
165
+
166
+ Parameters:
167
+ data (pd.DataFrame): dataframe that must have hmm_feature and extra features
168
+ n_clusters (int): n_clusters that are trainned, not observed
169
+ at_least_states (int): number of states that should be ,at least, observed
170
+ threshold_chain (int): number of times that a state should be , at least, observed
171
+ at_least_length (int): minimal lenght that the states should have using a statical measure (median, q75, max, etc)
172
+
173
+ Returns:
174
+ result (boolean): true if the model complies with parameters
175
+ """
176
+ def q3(x):
177
+ return x.quantile(0.75)
178
+ tmp_df = data.groupby(['hmm_feature','chain_id'],as_index = False).agg(chain_lenght = ('hmm_chain_order','max'))
179
+ tmp_df = tmp_df.groupby("hmm_feature", as_index = False).agg(count = ('chain_id','nunique'), median_length = ('chain_lenght','median'), q3_length = ('chain_lenght',q3))
180
+ train_observedstates = len(tmp_df)
181
+
182
+ states_under_threshold = list(tmp_df[tmp_df['count'] <= threshold_chain].hmm_feature)
183
+ n_states_under_threshold = len(states_under_threshold)
184
+ min_count = np.min(tmp_df[~tmp_df.hmm_feature.isin(states_under_threshold)]['count'].values)
185
+ med_length = np.min(tmp_df['q3_length'].values)
186
+
187
+ condition_1 = threshold_chain <= min_count
188
+ condition_2 = n_states_under_threshold <= at_least_states
189
+ condition_3 = at_least_length <= med_length
190
+ condition_4 = (train_observedstates == n_clusters)
191
+
192
+ result = False
193
+
194
+ if condition_1 and condition_2 and condition_3 and condition_4:
195
+ result = True
196
+ else:
197
+ result = False
198
+ return result
199
+
200
+ def iterate_training(trials, train_params, relevance_params):
201
+ """
202
+ iterate valid training
203
+
204
+ Parameters:
205
+ trials (int): number of repetitions to iterate
206
+ train_params (dict): dictionary containing training configurations
207
+ relevance_params (dict): dictionary containing validation configurations
208
+
209
+ Returns:
210
+ results (list): list of valid relevance scores
211
+ kept_model (obj): model (pipeling) that is kept, if it exists
212
+ """
213
+ results = list()
214
+ kept_model=None
215
+ for _ in range(trials):
216
+ try:
217
+ th = trainer_hmm(**train_params)
218
+ th.train()
219
+ result_model = th.hmm_model
220
+ df_train_tmp = train_params.get('data')
221
+ df_train_tmp['hmm_feature'] = result_model.predict(df_train_tmp)
222
+ df_train_tmp = create_hmm_derived_features(df = df_train_tmp, lag_returns = relevance_params.get('lag'))
223
+ relev, _, _ = states_relevance_score(df_train_tmp)
224
+ relevance_hmm = evaluate_model_chains(data = df_train_tmp,
225
+ n_clusters=train_params.get('n_clusters'),
226
+ at_least_states=relevance_params.get('at_least_states'),
227
+ threshold_chain=relevance_params.get('threshold_chain'),
228
+ at_least_length=relevance_params.get('at_least_length'))
229
+ if relevance_hmm:
230
+ results.append(relev)
231
+ kept_model = result_model
232
+ except:
233
+ pass
234
+ del th
235
+ if not kept_model:
236
+ raise TypeError("no model was kept")
237
+ return results, kept_model
238
+
239
+ class custom_hmm_permutation_importance():
240
+ """
241
+ class that is going to perform feature importance using feature permutation
242
+ note: this method is inpired in the same method that is available in scikit-learn
243
+
244
+ Attributes
245
+ ----------
246
+ n_repeats: int
247
+ number of shufflings performed per feature
248
+ features: list
249
+ list of features that is going to be tested, note that these features have to be the input of the model
250
+ results: dict
251
+ dictionary with the results containing feature and relevance scores per each iteration
252
+
253
+ Methods
254
+ -------
255
+ fit():
256
+ fit class
257
+ """
258
+ def __init__(self, model, X, n_repeats=5,random_state=False, features = list(), lag = 4):
259
+ """
260
+ Initialize object
261
+
262
+ Parameters
263
+ ----------
264
+ model (obj): pipeline or model
265
+ X (pd.DataFrame): input data to test feature permutation
266
+ n_repeats (int): number or trials per feature
267
+ random_state (bool): if true set a random state
268
+ features (list): list of features to be tested. note that the features have to be input of the model
269
+ lag (int): lag of diff factor to calculate chain returns
270
+
271
+ Returns
272
+ -------
273
+ None
274
+ """
275
+ self.__model = model
276
+ self.__X = X
277
+ self.n_repeats = n_repeats
278
+ self.__random_state = random_state
279
+ self.features = features
280
+ self.__lag = lag
281
+ def __generate_seeds(self):
282
+ """
283
+ generate list of seeds
284
+
285
+ Parameters
286
+ ----------
287
+ None
288
+
289
+ Returns
290
+ -------
291
+ None
292
+ """
293
+ if self.__random_state:
294
+ self.__seeds = list()
295
+ for _ in range(self.n_repeats):
296
+ seed = np.random.randint(1,500)
297
+ self.__seeds.append(seed)
298
+ def fit(self):
299
+ """
300
+ fit class
301
+
302
+ Parameters
303
+ ----------
304
+ None
305
+
306
+ Returns
307
+ -------
308
+ None
309
+ """
310
+ self.__X['hmm_feature'] = self.__model.predict(self.__X)
311
+ self.__X = create_hmm_derived_features(df=self.__X, lag_returns=self.__lag)
312
+ init_relevance, _, _ = states_relevance_score(self.__X)
313
+ self.results = {feature: list() for feature in self.features}
314
+ if self.__random_state:
315
+ self.__generate_seeds()
316
+ for feature in self.features:
317
+ X_ = self.__X.dropna().reset_index(drop = True).copy()
318
+ for j in range(self.n_repeats):
319
+ if self.__random_state:
320
+ seed = self.__seeds[j]
321
+ np.random.seed(seed)
322
+ else:
323
+ seed = None
324
+ shuffled = X_[feature].sample(frac=1, random_state = seed, replace = True).reset_index(drop=True)
325
+ X_[feature] = shuffled
326
+ X_['hmm_feature'] = self.__model.predict(X_)
327
+ X_ = create_hmm_derived_features(df=X_, lag_returns=self.__lag)
328
+
329
+ tmp_df = X_.groupby(['hmm_feature','chain_id'],as_index = False).agg(chain_lenght = ('hmm_chain_order','max'))
330
+ tmp_df = tmp_df.groupby("hmm_feature", as_index = False).agg(count = ('chain_id','nunique'), median_length = ('chain_lenght','median')).copy()
331
+ mean_relevance, _, _ = states_relevance_score(X_)
332
+ self.results[feature].append(mean_relevance - init_relevance)
333
+ del X_
334
+
335
+ def hmm_feature_selection(max_features, trials, train_params, relevance_params):
336
+ """
337
+ wrapper function that is going to use permutation importance to select features
338
+
339
+ Parameters:
340
+ ax_features (int): target to number of features
341
+ trials (int): training iterations
342
+ train_params (dict): dictionary containing training configurations
343
+ relevance_params (dict): dictionary containing validation configurations
344
+
345
+ Returns:
346
+ results (pd.DataFrame): summary relevace score per excluded feature
347
+ """
348
+ results = {'index':list(),'feature_to_drop':list(), 'median relevance excluding feature':list()}
349
+ i=0
350
+ init_numfeatures = len(train_params.get('features_hmm'))
351
+ while max_features <= init_numfeatures:
352
+ print(init_numfeatures)
353
+ if i==0:
354
+ exclude = None
355
+ r,model= iterate_training(trials, train_params, relevance_params)
356
+ for ri in r:
357
+ results['index'].append(0)
358
+ results['feature_to_drop'].append('full')
359
+ results['median relevance excluding feature'].append(ri)
360
+ data_train = train_params.get('data')
361
+ chmm_pi = custom_hmm_permutation_importance(model, data_train,random_state=5, features = train_params.get('features_hmm'), lag = relevance_params.get('lag'))
362
+ chmm_pi.fit()
363
+ results_fp = pd.DataFrame(chmm_pi.results)
364
+ feature_deltas = results_fp.median(axis = 0)
365
+ feature_deltas = feature_deltas.sort_values(ascending = False)
366
+ feature_to_drop = feature_deltas.index[0]
367
+ print(f'excluding {feature_to_drop}')
368
+
369
+ train_params['features_hmm'].remove(feature_to_drop)
370
+ print(train_params['features_hmm'])
371
+ r,model = iterate_training(trials, train_params, relevance_params)
372
+ for ri in r:
373
+ results['index'].append(i+1)
374
+ results['feature_to_drop'].append(feature_to_drop)
375
+ results['median relevance excluding feature'].append(ri)
376
+ init_numfeatures = len(model[:-2].transform(data_train).columns)
377
+ i+=1
378
+ return pd.DataFrame(results)
379
+
380
+
381
+ def seed_finder(train_params, relevance_params, n_seed = 100,max_results =5):
382
+ """
383
+ iterate valid training finding best starter seed
384
+
385
+ Parameters:
386
+ train_params (dict): dictionary containing training configurations
387
+ relevance_params (dict): dictionary containing validation configurations
388
+ n_seed (int): number of iterations
389
+ max_results (int): number of max results to keep and stop the iteration
390
+
391
+ Returns:
392
+ df_results (pd.DataFrame): summary table of seed and relevance score
393
+ """
394
+ seeds = list()
395
+ i_ = 0
396
+ while len(seeds) < max_results and i_ < n_seed:
397
+ # print(i_)
398
+ if i_ >= (n_seed*0.5) and len(seeds) == 0:
399
+ i_ += 10
400
+
401
+ seed = random.randint(50, 10000)
402
+ train_params['seed'] = seed
403
+ try:
404
+ th = trainer_hmm(**train_params)
405
+ th.train()
406
+ result_model = th.hmm_model
407
+ df_train_tmp = train_params.get('data')
408
+ df_train_tmp['hmm_feature'] = result_model.predict(df_train_tmp)
409
+ df_train_tmp = create_hmm_derived_features(df = df_train_tmp, lag_returns = relevance_params.get('lag'))
410
+ relev, _, _ = states_relevance_score(df_train_tmp)
411
+ relevance_hmm = evaluate_model_chains(data = df_train_tmp,
412
+ n_clusters=train_params.get('n_clusters'),
413
+ at_least_states=relevance_params.get('at_least_states'),
414
+ threshold_chain=relevance_params.get('threshold_chain'),
415
+ at_least_length=relevance_params.get('at_least_length'))
416
+ if relevance_hmm:
417
+ print('new model candidate was found, seed saved')
418
+ seeds.append(seed)
419
+ i_ += 1
420
+ except:
421
+ i_ += 1
422
+ print('best seeds', seeds)
423
+ ## searching the best seed
424
+ results = {'seed' : list(),'train_relevance': list()}
425
+
426
+ for seed_x in seeds:
427
+ train_params['seed'] = seed_x
428
+ th = trainer_hmm(**train_params)
429
+ th.train()
430
+ result_model = th.hmm_model
431
+ df_train_tmp = train_params.get('data')
432
+ df_train_tmp['hmm_feature'] = result_model.predict(df_train_tmp)
433
+ df_train_tmp = create_hmm_derived_features(df = df_train_tmp, lag_returns = relevance_params.get('lag'))
434
+ relev, _, _ = states_relevance_score(df_train_tmp)
435
+
436
+ results['seed'].append(seed_x)
437
+ results['train_relevance'].append(relev)
438
+
439
+ df_results = pd.DataFrame(results).sort_values(['train_relevance'], ascending = [False])
440
+ return df_results
@@ -975,7 +975,7 @@ class produce_plotly_plots:
975
975
  for signal_low in signal_low_list:
976
976
  if signal_low in df.columns:
977
977
  fig.add_trace(go.Scatter(x=df['Date'], y=np.where(df[signal_low] == 1, df[norm_feat], np.nan),showlegend= False, mode='markers', marker_color = 'red'),col = 1, row = row_i)
978
-
978
+ fig.add_hline(y=0, line_width=2, line_dash="dash", line_color="grey",col = 1, row = row_i)
979
979
  fig.update_layout(height=height_plot, width=1600, title_text = f'asset plot and signals: {self.ticket_name}')
980
980
 
981
981
  ## state plot with close prices
@@ -1457,7 +1457,7 @@ def produce_simple_ts_from_model(stock_code, configs, n_days = 2000 , window_sco
1457
1457
  for signal_low in signal_low_list:
1458
1458
  if signal_low in df.columns:
1459
1459
  fig.add_trace(go.Scatter(x=df['Date'], y=np.where(df[signal_low] == 1, df[norm_feat], np.nan),showlegend= False, mode='markers', marker_color = 'red'),col = 1, row = row_i)
1460
-
1460
+ fig.add_hline(y=0, line_width=2, line_dash="dash", line_color="grey",col = 1, row = row_i)
1461
1461
  fig.update_layout(height=height_plot, width=1600, title_text = f'asset plot and signals: {stock_code}')
1462
1462
 
1463
1463
  del object_stock
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: virgo-modules
3
- Version: 0.1.2
3
+ Version: 0.2.1
4
4
  Summary: data processing and statistical modeling using stock market data
5
5
  Home-page: https://github.com/miguelmayhem92/virgo_module
6
6
  Author: Miguel Mayhuire
@@ -1,13 +1,14 @@
1
1
  virgo_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  virgo_modules/src/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  virgo_modules/src/aws_utils.py,sha256=q0l7D7ofo09Lu1QQjv-esheQ06uiSy1Pdq3xMul8zvk,2571
4
- virgo_modules/src/backtester.py,sha256=S6wnFUfGYqCAjPy4OJ7j6_7nk9pwUiE8swXujfPZmG4,22123
4
+ virgo_modules/src/backtester.py,sha256=OhiWyzDX0PthXGuhChyWUmDN3cLkzVYe95zS4nGtia8,22106
5
5
  virgo_modules/src/edge_utils.py,sha256=i3Hm3fO-QA-u17jDpnRodLLILMWZ2VTMEkMKijdGKLg,14287
6
+ virgo_modules/src/hmm_utils.py,sha256=RrEj2NEwORqZQeXoLOUfrcCDpnjgu77luJWCcdpU_xM,18236
6
7
  virgo_modules/src/pull_artifacts.py,sha256=5OPrgR7pcMSdpbevDRhf0ebk7g7ZRjff4NpTIIWAKjE,1989
7
- virgo_modules/src/re_utils.py,sha256=tRyU9WpH0K7qMWXB6DIDtVqjsWg_pVdxEbq363RHZ4M,72306
8
+ virgo_modules/src/re_utils.py,sha256=05pSVzGKBybPMFGm2wcbYgkSOZs3bZECLjyHvlPlFjM,72490
8
9
  virgo_modules/src/ticketer_source.py,sha256=fgwF34LJAL_Nr5Pzmp0p5RgHI81-ilRnCXxIBzrfVk4,129045
9
- virgo_modules-0.1.2.dist-info/LICENSE,sha256=pNgFyCYgmimaw0o6V20JupZLROycAnOA_HDDh1tX2V4,1097
10
- virgo_modules-0.1.2.dist-info/METADATA,sha256=bcxfRgAAbuhShFSwtEQzOlJGDQOIwXsCzCAI-w-ATh4,1428
11
- virgo_modules-0.1.2.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
12
- virgo_modules-0.1.2.dist-info/top_level.txt,sha256=ZjI-qEkDtT-8mFwGAWnXfqPOKEGlIhWRW1es1VyXc60,14
13
- virgo_modules-0.1.2.dist-info/RECORD,,
10
+ virgo_modules-0.2.1.dist-info/LICENSE,sha256=pNgFyCYgmimaw0o6V20JupZLROycAnOA_HDDh1tX2V4,1097
11
+ virgo_modules-0.2.1.dist-info/METADATA,sha256=BMhUeFK8A27crrWPw3bWMFCImOfER4XkuDscE0l-wpQ,1428
12
+ virgo_modules-0.2.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
13
+ virgo_modules-0.2.1.dist-info/top_level.txt,sha256=ZjI-qEkDtT-8mFwGAWnXfqPOKEGlIhWRW1es1VyXc60,14
14
+ virgo_modules-0.2.1.dist-info/RECORD,,