lecrapaud 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lecrapaud might be problematic. Click here for more details.

@@ -1,5 +1,5 @@
1
1
  lecrapaud/__init__.py,sha256=oCxbtw_nk8rlOXbXbWo0RRMlsh6w-hTiZ6e5PRG_wp0,28
2
- lecrapaud/api.py,sha256=VpVj6zRanLzHo2bwbkar-HcOKsJPEDghWhxbrqDOxp4,9749
2
+ lecrapaud/api.py,sha256=3yAoHTJt1XtjCrLemwZnoY7UyTdsZo9a7-jgbkOnirc,9912
3
3
  lecrapaud/config.py,sha256=Jf7kknumJ9NudVoLugOxqLqB7zTSl67IVV2VVWA0Pv4,1027
4
4
  lecrapaud/db/__init__.py,sha256=82o9fMfaqKXPh2_rt44EzNRVZV1R4LScEnQYvj_TjK0,34
5
5
  lecrapaud/db/alembic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38
@@ -16,7 +16,7 @@ lecrapaud/db/alembic/versions/2025_05_27_2047-6b6f2d38e9bc_double_instead_of_flo
16
16
  lecrapaud/db/alembic/versions/2025_05_31_1111-c175e4a36d68_generalise_stock_to_group.py,sha256=oezuATpCoIsWEuMNB4M19LIb6y_a3UHskFjf7nF_-gk,1312
17
17
  lecrapaud/db/alembic/versions/2025_05_31_1256-5681095bfc27_create_investment_run_and_portfolio_.py,sha256=6kdAkFkVQi7hJQx-B1YhA_og2HkGep1gHITXivMNZR4,2850
18
18
  lecrapaud/db/alembic/versions/2025_05_31_1806-339927587383_add_investment_run_id.py,sha256=AdN1DzPOgtMDQQFbM62URsv1IxbPUO1_kNzy1Yl1oIc,3346
19
- lecrapaud/db/alembic/versions/2025_05_31_1834-52b809a34371_make_nullablee.py,sha256=40luKBAdfTJxhSgzKogm3nPbo4xvJKJmIEKcoD9V7tk,1331
19
+ lecrapaud/db/alembic/versions/2025_05_31_1834-52b809a34371_make_nullablee.py,sha256=mnwDhWUFd-7FECHSn0_OGI93jxOeK4H2L4gp3ioqTDE,1234
20
20
  lecrapaud/db/alembic/versions/2025_05_31_1849-3b8550297e8e_change_date_to_datetime.py,sha256=d5swE95kdKovIBClS_tmTgNtN8epgMMz1X7DxPI17Lw,1356
21
21
  lecrapaud/db/alembic/versions/2025_05_31_1852-e6b8c95d8243_add_date_to_portfolio_history.py,sha256=veTx2DTbJDZ-wUhQTHJRA9lKam6To45jtRKT1UTX93I,829
22
22
  lecrapaud/db/alembic/versions/2025_06_10_1136-db8cdd83563a_addnewsandoptiontodata.py,sha256=V_Wjm-XMfMyG988R-3ii3-fzHP_tb2FtZmc4C6WkQOM,907
@@ -32,19 +32,17 @@ lecrapaud/db/models/model_selection.py,sha256=Lj2WZBWvaVpJeDjdiAs8wvkoI-XrWv2IIj
32
32
  lecrapaud/db/models/model_training.py,sha256=2xv9SOh8IUV1ROSsKlooAGanl9G8nBdmr7vaV4GAWtw,1618
33
33
  lecrapaud/db/models/score.py,sha256=I6kA55ZQhReo9dA_JEgeRSqDoFKmYyL3l-zCffwsofs,1719
34
34
  lecrapaud/db/models/target.py,sha256=scEtXVO3oysEvg9uwm_ncsS3-4o2A6z6XiDVGrg5WHE,1644
35
- lecrapaud/db/session.py,sha256=eD3zNodgC_ZEiNrSnhooMsaMiyccK9xgeReABWrbAuk,1629
36
- lecrapaud/directory_management.py,sha256=wrHmAJ-cBKb0GhJifxrb_RoLhZJX8xdkeirrWs7jQHk,791
37
- lecrapaud/experiment.py,sha256=8Y7HX5MGifrcquOML1lYXG-q40Wn4WBBRqLfPs1tLuk,1994
38
- lecrapaud/feature_engineering.py,sha256=zia2qxe_KLBUZV2KYVPqGd2K55AjHiyeeCm_GhobJHE,30327
39
- lecrapaud/feature_selection.py,sha256=2h3S_c0hYZM-EKenpVRYTFFqT4T7fg2lJ5pY2gUDalk,43023
35
+ lecrapaud/db/session.py,sha256=LW97WdE4W48Fkx5fMrgDUKjwq54M2pfjqHjKwvUvAi0,2078
36
+ lecrapaud/directories.py,sha256=wrHmAJ-cBKb0GhJifxrb_RoLhZJX8xdkeirrWs7jQHk,791
37
+ lecrapaud/experiment.py,sha256=ApsGtGaISYsQCdAGa1llzBslfODK-w1_zf44p6Es3Xs,1985
38
+ lecrapaud/feature_engineering.py,sha256=MEPW8-AgO3xnbwMHV3dC_gMtrBeKQhaCDrAicWgXX6w,30251
39
+ lecrapaud/feature_selection.py,sha256=RD016F9_r249VEAe5bLCpB4tHQSbhdMoXRFohkk3c5g,42429
40
40
  lecrapaud/integrations/openai_integration.py,sha256=hHLF3fk5Bps8KNbNrEL3NUFa945jwClE6LrLpuMZOd4,7459
41
41
  lecrapaud/jobs/__init__.py,sha256=ZkrsyTOR21c_wN7RY8jPhm8jCrL1oCEtTsf3VFIlQiE,292
42
42
  lecrapaud/jobs/config.py,sha256=AmO0j3RFjx8H66dfKw_7vnshaOJb9Ox5BAZ9cwwLFMY,377
43
43
  lecrapaud/jobs/scheduler.py,sha256=SiYWPxokpKnR8V6btLOO6gbK0PEjSRoeG0kCbQvYPf4,990
44
44
  lecrapaud/jobs/tasks.py,sha256=evzhOHpgp6Gvoz__jUipi-_HSNny7bWgAauHv2Hpxyk,1640
45
- lecrapaud/model_selection.py,sha256=3BGp9mM9pZECcEcxLvm5YBJZ9Q7CJxZEC2zVWOm9g2o,61153
46
- lecrapaud/predictions.py,sha256=Uw6Ghm05KcoZIB1m9YLBe4v7sV42_Hv1oPgYfKsF2iY,10754
47
- lecrapaud/preprocessing.py,sha256=GfOTdVpubf9w13bbgKjcx12oP3mefaTgm1HK1V3tVeQ,34424
45
+ lecrapaud/model_selection.py,sha256=y01yOK5oB9hdeEJkjjavDJvYAUCR3PS6FWRAN43uDzo,61322
48
46
  lecrapaud/search_space.py,sha256=6kVXDSmvpTuLKTlqQkKIzrJOM2P7HpqRiO3PR37VrsM,34123
49
47
  lecrapaud/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
48
  lecrapaud/services/embedding_categorical.py,sha256=jPKl3GjJ8STk5u0ux9VHbUYq-XRuNULhMJuXdeJAePU,2593
@@ -55,9 +53,8 @@ lecrapaud/speed_tests/test-gpu-resnet.ipynb,sha256=27Vu7nYwujYeh3fOxBNCnKJn3MXNP
55
53
  lecrapaud/speed_tests/test-gpu-transformers.ipynb,sha256=k6MBSs_Um1h4PykvE-LTBcdpbWLbIFST_xl_AFW2jgI,8444
56
54
  lecrapaud/speed_tests/tests.ipynb,sha256=RjI7LDHSsbadUkea_hT14sD7ivljtIQk4NB5McXJ1bE,3835
57
55
  lecrapaud/speed_tests/trash.py,sha256=E4zrrRyVqeNEumWg8rYKquR9VTIULN52eCRqjmv_s58,1647
58
- lecrapaud/training.py,sha256=a-p-06-h-oRj5A728-xwekHZ7mxTnrPnKjAjoZhNZME,8071
59
- lecrapaud/utils.py,sha256=J2lMRo8J1CoZ1AYIPcdbFWXg-PxtQTs8qmeINioXWfk,8107
60
- lecrapaud-0.2.1.dist-info/LICENSE,sha256=MImCryu0AnqhJE_uAZD-PIDKXDKb8sT7v0i1NOYeHTM,11350
61
- lecrapaud-0.2.1.dist-info/METADATA,sha256=g2qZx1kl3Qf1GT7LE-NNmTOwJluyZel0tgL1FCSYmt4,4460
62
- lecrapaud-0.2.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
63
- lecrapaud-0.2.1.dist-info/RECORD,,
56
+ lecrapaud/utils.py,sha256=TMlEyU6aRhgiYRtdbvKrlq2t1L9Al5BXfQ3cPbeHlX8,8098
57
+ lecrapaud-0.3.0.dist-info/LICENSE,sha256=MImCryu0AnqhJE_uAZD-PIDKXDKb8sT7v0i1NOYeHTM,11350
58
+ lecrapaud-0.3.0.dist-info/METADATA,sha256=dcXHKrW9p_QPxsshz7iVrG47_FikmRBpQ7gY6peB-2o,4525
59
+ lecrapaud-0.3.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
60
+ lecrapaud-0.3.0.dist-info/RECORD,,
lecrapaud/predictions.py DELETED
@@ -1,292 +0,0 @@
1
- import keras
2
- import pickle
3
- import pandas as pd
4
- from pathlib import Path
5
- import joblib
6
- from datetime import timedelta, datetime
7
- import logging
8
-
9
- from lecrapaud.search_space import ml_models, dl_recurrent_models
10
- from lecrapaud.data_sourcing import get_filtered_data
11
- from lecrapaud.preprocessing import feature_engineering
12
- from lecrapaud.feature_selection import (
13
- encode_categorical_features,
14
- reshape_df,
15
- TARGETS_CLF,
16
- reshape_time_series,
17
- )
18
- from lecrapaud.model_selection import predict, evaluate
19
- from lecrapaud.utils import logger
20
- from lecrapaud.db import Dataset
21
- from lecrapaud.config import LOGGING_LEVEL
22
-
23
- MODELS_LIST = ml_models + dl_recurrent_models
24
-
25
-
26
- def run_prediction(
27
- dataset_id: str,
28
- targets_numbers: list[int],
29
- test: bool = True,
30
- date: datetime = None,
31
- verbose: int = 0,
32
- ):
33
- """Function to run prediction on several TARGETS using best models"""
34
- if verbose == 0:
35
- logger.setLevel(logging.WARNING)
36
-
37
- logger.warning("Running prediction...")
38
-
39
- dataset = Dataset.get(dataset_id)
40
- dataset_dir = dataset.path
41
- preprocessing_dir = f"{dataset_dir}/preprocessing"
42
- list_of_groups = dataset.list_of_groups
43
-
44
- features_dict = {}
45
- scaler_y_dict = {}
46
- model_dict = {}
47
- threshold_dict = {}
48
- for target_number in targets_numbers:
49
- (
50
- model_dict[target_number],
51
- threshold_dict[target_number],
52
- features_dict[target_number],
53
- scaler_y_dict[target_number],
54
- all_features,
55
- scaler_x,
56
- ) = load_model(dataset, target_number)
57
-
58
- # get data for backtesting
59
- if test:
60
- train_data_dir = f"{dataset_dir}/data"
61
- data_for_pred = joblib.load(f"{train_data_dir}/test.pkl")
62
- data_for_pred_scaled = joblib.load(f"{train_data_dir}/test_scaled.pkl")
63
-
64
- if any(
65
- config["recurrent"]
66
- for config in MODELS_LIST
67
- if config["model_name"] in model_dict.values()
68
- ):
69
- train_scaled = joblib.load(f"{train_data_dir}/train_scaled.pkl")
70
- val_scaled = joblib.load(f"{train_data_dir}/val_scaled.pkl")
71
- test_scaled = joblib.load(f"{train_data_dir}/test_scaled.pkl")
72
- reshaped_data = reshape_time_series(
73
- train_scaled, val_scaled, test_scaled, all_features, timesteps=120
74
- )
75
- data_for_pred_reshaped = reshaped_data["x_train_reshaped"]
76
-
77
- most_recent_data = joblib.load(f"{train_data_dir}/full.pkl")
78
- most_recent_data = most_recent_data.loc[data_for_pred.index]
79
-
80
- scores_clf = []
81
- scores_reg = []
82
- # get data for predicting future
83
- else:
84
- # TODO: if date is a bit more older, need more than 0 years
85
- most_recent_data = get_filtered_data(
86
- years_of_data=0, list_of_groups=list_of_groups
87
- )
88
-
89
- most_recent_data = feature_engineering(
90
- most_recent_data, for_training=False, save_as_csv=True
91
- )
92
-
93
- data_for_pred = encode_categorical_features(
94
- most_recent_data, save_dir=preprocessing_dir, fit=False
95
- )
96
-
97
- data_for_pred_scaled = pd.DataFrame(
98
- scaler_x.transform(data_for_pred[all_features]),
99
- columns=list(data_for_pred[all_features].columns),
100
- index=data_for_pred.index,
101
- )
102
-
103
- # TODO: don't we need to have 120 days of data for each stock?
104
- if any(
105
- config["recurrent"]
106
- for config in MODELS_LIST
107
- if config["model_name"] in model_dict.values()
108
- ):
109
- # Count number of rows per stock
110
- counts = data_for_pred["STOCK"].value_counts()
111
-
112
- # Find stocks with insufficient history
113
- insufficient_stocks = counts[counts < 120]
114
-
115
- if not insufficient_stocks.empty:
116
- raise ValueError(
117
- f"Insufficient history for stocks: {', '.join(insufficient_stocks.index)}"
118
- )
119
-
120
- data_for_pred_reshaped = reshape_df(
121
- data_for_pred_scaled[all_features], data_for_pred["STOCK"], 120
122
- )
123
-
124
- # make prediction
125
- for target_number in targets_numbers:
126
-
127
- # Prepare variables and data
128
- target_type = "classification" if target_number in TARGETS_CLF else "regression"
129
- features = features_dict[target_number]
130
- model = model_dict[target_number]
131
- threshold = threshold_dict[target_number]
132
-
133
- config = [
134
- config for config in MODELS_LIST if config["model_name"] == model.model_name
135
- ]
136
- if config is None or len(config) == 0:
137
- Exception(f"Model {model.model_name} was not found in search space.")
138
- else:
139
- config = config[0]
140
-
141
- need_scaling = config["need_scaling"] and target_type == "regression"
142
- if config["recurrent"]:
143
- features_idx = [i for i, e in enumerate(all_features) if e in set(features)]
144
- x_pred = data_for_pred_reshaped[:, :, features_idx]
145
- else:
146
- x_pred = (
147
- data_for_pred_scaled[features]
148
- if need_scaling
149
- else data_for_pred[features]
150
- )
151
-
152
- # Predict
153
- y_pred = predict(model, x_pred, target_type, config, threshold)
154
-
155
- # Fix for recurrent model because x_val has no index as it is a 3D np array
156
- if config["recurrent"]:
157
- y_pred.index = (
158
- most_recent_data.index
159
- ) # TODO: not sure this will work for old dataset not aligned with data_for_training for test use case (done, this is why we decode the test set)
160
-
161
- # Unscale prediction
162
- if need_scaling or config["recurrent"]:
163
- scaler_y = scaler_y_dict[target_number]
164
- y_pred = pd.Series(
165
- scaler_y.inverse_transform(y_pred.values.reshape(-1, 1)).flatten(),
166
- index=most_recent_data.index,
167
- )
168
- y_pred.name = "PRED"
169
-
170
- # Evaluate if test
171
- if test:
172
- prediction = pd.concat(
173
- [most_recent_data[f"TARGET_{target_number}"], y_pred], axis=1
174
- )
175
- prediction.rename(
176
- columns={f"TARGET_{target_number}": "TARGET"}, inplace=True
177
- )
178
- score = evaluate(prediction, target_type)
179
- score["TARGET"] = f"TARGET_{target_number}"
180
- (
181
- scores_clf.append(score)
182
- if target_type == "classification"
183
- else scores_reg.append(score)
184
- )
185
-
186
- if isinstance(y_pred, pd.DataFrame):
187
- y_pred.rename(
188
- columns={"PRED": f"TARGET_{target_number}_PRED"}, inplace=True
189
- )
190
- most_recent_data = pd.concat(
191
- [most_recent_data, y_pred[f"TARGET_{target_number}_PRED"]], axis=1
192
- )
193
-
194
- else:
195
- y_pred.name = f"TARGET_{target_number}_PRED"
196
- most_recent_data = pd.concat([most_recent_data, y_pred], axis=1)
197
-
198
- # return result either for test set or for tomorrow prediction
199
- result = most_recent_data
200
-
201
- if verbose == 0:
202
- logger.setLevel(LOGGING_LEVEL)
203
-
204
- if test:
205
- logger.info("Test results on test set")
206
- scores_reg = pd.DataFrame(scores_reg).set_index("TARGET")
207
- scores_clf = pd.DataFrame(scores_clf).set_index("TARGET")
208
- return result, scores_reg, scores_clf, prediction
209
- elif date:
210
- date = date.replace(hour=0, minute=0, second=0, microsecond=0)
211
- tomorrow = date + timedelta(days=1)
212
- logger.info(f"Prediction for : {tomorrow.date()}")
213
- result = result[result["DATE"] == date]
214
- return result, None, None, None
215
- else:
216
- date = datetime.today()
217
- max_date = result["DATE"].max()
218
- if max_date.date() != date.date():
219
- logger.info(
220
- f"The maximum date found in the dataset is {max_date} and not {date}"
221
- )
222
- tomorrow = max_date + timedelta(days=1)
223
- logger.info(f"Prediction for tomorrow : {tomorrow.date()}")
224
-
225
- # Filter the DataFrame for the last date
226
- filtered_result = result[result["DATE"] == max_date]
227
-
228
- return filtered_result, None, None, None
229
-
230
-
231
- # Helpers
232
- def load_model(dataset: Dataset, target_number: int):
233
- dataset_dir = dataset.path
234
- training_target_dir = f"{dataset_dir}/TARGET_{target_number}"
235
- preprocessing_dir = f"{dataset_dir}/preprocessing"
236
-
237
- # Search for files that contain '.best' or '.keras' in the name
238
- scores_tracking = pd.read_csv(f"{training_target_dir}/scores_tracking.csv")
239
- training_target_dir = Path(training_target_dir)
240
- best_files = list(training_target_dir.glob("*.best*")) + list(
241
- training_target_dir.glob("*.keras*")
242
- )
243
- threshold = (
244
- scores_tracking["THRESHOLD"].values[0]
245
- if "THRESHOLD" in scores_tracking.columns
246
- else None
247
- )
248
-
249
- # If any files are found, try loading the first one (or process as needed)
250
- if best_files:
251
- file_path = best_files[0] # Assuming you want to open the first matching file
252
- try:
253
- # Attempt to load the file as a scikit-learn, XGBoost, or LightGBM model (Pickle format)
254
- model = joblib.load(file_path)
255
- logger.info(f"Loaded model {model.model_name} and threshold {threshold}")
256
- except (pickle.UnpicklingError, EOFError):
257
- # If it's not a pickle file, try loading it as a Keras model
258
- try:
259
- # Attempt to load the file as a Keras model
260
- model = keras.models.load_model(file_path)
261
- logger.info(
262
- f"Loaded model {model.model_name} and threshold {threshold}"
263
- )
264
- except Exception as e:
265
- raise FileNotFoundError(
266
- f"Model could not be loaded from path: {file_path}: {e}"
267
- )
268
- else:
269
- raise FileNotFoundError(
270
- f"No files with '.best' or '.keras' found in the specified folder: {training_target_dir}"
271
- )
272
-
273
- if dataset.name == "data_28_X_X":
274
- features = joblib.load(
275
- f"{preprocessing_dir}/features_{target_number}.pkl"
276
- ) # we keep this for backward compatibility
277
- else:
278
- features = dataset.get_features(target_number)
279
-
280
- scaler_y = None
281
- if target_number not in TARGETS_CLF:
282
- scaler_y = joblib.load(f"{preprocessing_dir}/scaler_y_{target_number}.pkl")
283
-
284
- if dataset.name == "data_28_X_X":
285
- all_features = joblib.load(
286
- f"{preprocessing_dir}/all_features.pkl"
287
- ) # we keep this for backward compatibility
288
- else:
289
- all_features = dataset.get_all_features()
290
- scaler_x = joblib.load(f"{preprocessing_dir}/scaler_x.pkl")
291
-
292
- return model, threshold, features, scaler_y, all_features, scaler_x