lecrapaud 0.20.1__py3-none-any.whl → 0.20.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lecrapaud might be problematic. Click here for more details.
- lecrapaud/config.py +3 -2
- lecrapaud/experiment.py +5 -2
- lecrapaud/utils.py +4 -4
- lecrapaud-0.20.2.dist-info/METADATA +344 -0
- {lecrapaud-0.20.1.dist-info → lecrapaud-0.20.2.dist-info}/RECORD +7 -7
- lecrapaud-0.20.1.dist-info/METADATA +0 -250
- {lecrapaud-0.20.1.dist-info → lecrapaud-0.20.2.dist-info}/WHEEL +0 -0
- {lecrapaud-0.20.1.dist-info → lecrapaud-0.20.2.dist-info}/licenses/LICENSE +0 -0
lecrapaud/config.py
CHANGED
|
@@ -32,6 +32,7 @@ DB_URI: str = (
|
|
|
32
32
|
)
|
|
33
33
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
34
34
|
LECRAPAUD_LOGFILE = os.getenv("LECRAPAUD_LOGFILE")
|
|
35
|
-
LECRAPAUD_LOCAL = os.getenv("LECRAPAUD_LOCAL", False)
|
|
36
35
|
LECRAPAUD_TABLE_PREFIX = os.getenv("LECRAPAUD_TABLE_PREFIX", "lecrapaud")
|
|
37
|
-
LECRAPAUD_OPTIMIZATION_BACKEND = os.getenv(
|
|
36
|
+
LECRAPAUD_OPTIMIZATION_BACKEND = os.getenv(
|
|
37
|
+
"LECRAPAUD_OPTIMIZATION_BACKEND", "ray"
|
|
38
|
+
).lower()
|
lecrapaud/experiment.py
CHANGED
|
@@ -16,15 +16,18 @@ from lecrapaud.db.session import get_db
|
|
|
16
16
|
|
|
17
17
|
def create_experiment(
|
|
18
18
|
data: pd.DataFrame | str,
|
|
19
|
-
date_column,
|
|
20
|
-
group_column,
|
|
21
19
|
experiment_name,
|
|
20
|
+
date_column=None,
|
|
21
|
+
group_column=None,
|
|
22
22
|
**kwargs,
|
|
23
23
|
):
|
|
24
24
|
if isinstance(data, str):
|
|
25
25
|
path = f"{data}/data/full.pkl"
|
|
26
26
|
data = joblib.load(path)
|
|
27
27
|
|
|
28
|
+
if kwargs.get("time_series") and not date_column:
|
|
29
|
+
raise ValueError("date_column must be provided for time series experiments")
|
|
30
|
+
|
|
28
31
|
dates = {}
|
|
29
32
|
if date_column:
|
|
30
33
|
dates["start_date"] = pd.to_datetime(data[date_column].iat[0])
|
lecrapaud/utils.py
CHANGED
|
@@ -11,7 +11,7 @@ import re
|
|
|
11
11
|
import string
|
|
12
12
|
|
|
13
13
|
from lecrapaud.directories import logger_dir
|
|
14
|
-
from lecrapaud.config import LOGGING_LEVEL, PYTHON_ENV
|
|
14
|
+
from lecrapaud.config import LOGGING_LEVEL, PYTHON_ENV
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
_LECRAPAUD_LOGGER_ALREADY_CONFIGURED = False
|
|
@@ -237,7 +237,7 @@ def serialize_for_json(obj):
|
|
|
237
237
|
import numpy as np
|
|
238
238
|
from datetime import datetime, date
|
|
239
239
|
import pandas as pd
|
|
240
|
-
|
|
240
|
+
|
|
241
241
|
# Handle NumPy types
|
|
242
242
|
if isinstance(obj, (np.integer, np.int64, np.int32, np.int16)):
|
|
243
243
|
return int(obj)
|
|
@@ -247,11 +247,11 @@ def serialize_for_json(obj):
|
|
|
247
247
|
return obj.tolist()
|
|
248
248
|
elif isinstance(obj, np.bool_):
|
|
249
249
|
return bool(obj)
|
|
250
|
-
|
|
250
|
+
|
|
251
251
|
# Handle datetime types
|
|
252
252
|
elif isinstance(obj, (datetime, date, pd.Timestamp)):
|
|
253
253
|
return obj.isoformat()
|
|
254
|
-
|
|
254
|
+
|
|
255
255
|
# Handle basic Python types
|
|
256
256
|
elif isinstance(obj, (str, int, float, bool, type(None))):
|
|
257
257
|
return obj
|
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lecrapaud
|
|
3
|
+
Version: 0.20.2
|
|
4
|
+
Summary: Framework for machine and deep learning, with regression, classification and time series analysis
|
|
5
|
+
License: Apache License
|
|
6
|
+
License-File: LICENSE
|
|
7
|
+
Author: Pierre H. Gallet
|
|
8
|
+
Requires-Python: ==3.12.*
|
|
9
|
+
Classifier: License :: Other/Proprietary License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Requires-Dist: catboost (>=1.2.8)
|
|
13
|
+
Requires-Dist: category-encoders (>=2.8.1)
|
|
14
|
+
Requires-Dist: celery (>=5.5.3)
|
|
15
|
+
Requires-Dist: ftfy (>=6.3.1)
|
|
16
|
+
Requires-Dist: joblib (>=1.5.1)
|
|
17
|
+
Requires-Dist: keras (>=3.10.0)
|
|
18
|
+
Requires-Dist: lightgbm (>=4.6.0)
|
|
19
|
+
Requires-Dist: matplotlib (>=3.10.3)
|
|
20
|
+
Requires-Dist: mlxtend (>=0.23.4)
|
|
21
|
+
Requires-Dist: numpy (>=2.1.3)
|
|
22
|
+
Requires-Dist: openai (>=1.88.0)
|
|
23
|
+
Requires-Dist: pandas (>=2.3.0)
|
|
24
|
+
Requires-Dist: pydantic (>=2.9.2)
|
|
25
|
+
Requires-Dist: python-dotenv (>=1.1.0)
|
|
26
|
+
Requires-Dist: scikit-learn (>=1.6.1)
|
|
27
|
+
Requires-Dist: scipy (<1.14.0)
|
|
28
|
+
Requires-Dist: seaborn (>=0.13.2)
|
|
29
|
+
Requires-Dist: sqlalchemy (>=2.0.41)
|
|
30
|
+
Requires-Dist: tensorboardx (>=2.6.4)
|
|
31
|
+
Requires-Dist: tensorflow (>=2.19.0)
|
|
32
|
+
Requires-Dist: tiktoken (>=0.9.0)
|
|
33
|
+
Requires-Dist: tqdm (>=4.67.1)
|
|
34
|
+
Requires-Dist: xgboost (>=3.0.2)
|
|
35
|
+
Description-Content-Type: text/markdown
|
|
36
|
+
|
|
37
|
+
<div align="center">
|
|
38
|
+
|
|
39
|
+
<img src="https://s3.amazonaws.com/pix.iemoji.com/images/emoji/apple/ios-12/256/frog-face.png" width=120 alt="crapaud"/>
|
|
40
|
+
|
|
41
|
+
## Welcome to LeCrapaud
|
|
42
|
+
|
|
43
|
+
**An all-in-one machine learning framework**
|
|
44
|
+
|
|
45
|
+
[](https://github.com/pierregallet/lecrapaud/stargazers)
|
|
46
|
+
[](https://badge.fury.io/py/lecrapaud)
|
|
47
|
+
[](https://pypi.org/project/lecrapaud)
|
|
48
|
+
[](https://github.com/pierregallet/lecrapaud/blob/main/LICENSE)
|
|
49
|
+
[](https://codecov.io/gh/pierregallet/lecrapaud)
|
|
50
|
+
|
|
51
|
+
</div>
|
|
52
|
+
|
|
53
|
+
## 🚀 Introduction
|
|
54
|
+
|
|
55
|
+
LeCrapaud is a high-level Python library for end-to-end machine learning workflows on tabular data, with a focus on financial and stock datasets. It provides a simple API to handle feature engineering, model selection, training, and prediction, all in a reproducible and modular way.
|
|
56
|
+
|
|
57
|
+
## ✨ Key Features
|
|
58
|
+
|
|
59
|
+
- 🧩 Modular pipeline: Feature engineering, preprocessing, selection, and modeling as independent steps
|
|
60
|
+
- 🤖 Automated model selection and hyperparameter optimization
|
|
61
|
+
- 📊 Easy integration with pandas DataFrames
|
|
62
|
+
- 🔬 Supports both regression and classification tasks
|
|
63
|
+
- 🛠️ Simple API for both full pipeline and step-by-step usage
|
|
64
|
+
- 📦 Ready for production and research workflows
|
|
65
|
+
|
|
66
|
+
## ⚡ Quick Start
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
### Install the package
|
|
70
|
+
|
|
71
|
+
```sh
|
|
72
|
+
pip install lecrapaud
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### How it works
|
|
76
|
+
|
|
77
|
+
This package provides a high-level API to manage experiments for feature engineering, model selection, and prediction on tabular data (e.g. stock data).
|
|
78
|
+
|
|
79
|
+
### Typical workflow
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
from lecrapaud import LeCrapaud
|
|
83
|
+
|
|
84
|
+
# 1. Create the main app
|
|
85
|
+
app = LeCrapaud(uri=uri)
|
|
86
|
+
|
|
87
|
+
# 2. Define your experiment context (see your notebook or api.py for all options)
|
|
88
|
+
context = {
|
|
89
|
+
"data": your_dataframe,
|
|
90
|
+
"columns_drop": [...],
|
|
91
|
+
"columns_date": [...],
|
|
92
|
+
# ... other config options
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# 3. Create an experiment
|
|
96
|
+
experiment = app.create_experiment(**context)
|
|
97
|
+
|
|
98
|
+
# 4. Run the full training pipeline
|
|
99
|
+
experiment.train(your_dataframe)
|
|
100
|
+
|
|
101
|
+
# 5. Make predictions on new data
|
|
102
|
+
predictions = experiment.predict(new_data)
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Database Configuration (Required)
|
|
106
|
+
|
|
107
|
+
LeCrapaud requires access to a MySQL database to store experiments and results. You must either:
|
|
108
|
+
|
|
109
|
+
- Pass a valid MySQL URI to the `LeCrapaud` constructor:
|
|
110
|
+
```python
|
|
111
|
+
app = LeCrapaud(uri="mysql+pymysql://user:password@host:port/dbname")
|
|
112
|
+
```
|
|
113
|
+
- **OR** set the following environment variables before using the package:
|
|
114
|
+
- `DB_USER`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_NAME`
|
|
115
|
+
- Or set `DB_URI` directly with your full connection string.
|
|
116
|
+
|
|
117
|
+
If neither is provided, database operations will not work.
|
|
118
|
+
|
|
119
|
+
### Using OpenAI Embeddings (Optional)
|
|
120
|
+
|
|
121
|
+
If you want to use the `columns_pca` embedding feature (for advanced feature engineering), you must set the `OPENAI_API_KEY` environment variable with your OpenAI API key:
|
|
122
|
+
|
|
123
|
+
```sh
|
|
124
|
+
export OPENAI_API_KEY=sk-...
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
If this variable is not set, features relying on OpenAI embeddings will not be available.
|
|
128
|
+
|
|
129
|
+
### Experiment Context Arguments
|
|
130
|
+
|
|
131
|
+
The experiment context is a dictionary containing all configuration parameters for your ML pipeline. Parameters are stored in the experiment's database record and automatically retrieved when loading an existing experiment.
|
|
132
|
+
|
|
133
|
+
#### Required Parameters
|
|
134
|
+
|
|
135
|
+
| Parameter | Type | Description | Example |
|
|
136
|
+
|-------------------|-----------|------------------------------------------------------|------------------------|
|
|
137
|
+
| `data` | DataFrame | Input dataset (required for new experiments only) | `pd.DataFrame(...)` |
|
|
138
|
+
| `experiment_name`| str | Unique name for the experiment | `'stock_prediction'` |
|
|
139
|
+
| `date_column` | str | Name of the date column (required for time series) | `'DATE'` |
|
|
140
|
+
| `group_column` | str | Name of the group column (required for panel data) | `'STOCK'` |
|
|
141
|
+
|
|
142
|
+
#### Feature Engineering Parameters
|
|
143
|
+
|
|
144
|
+
| Parameter | Type | Default | Description |
|
|
145
|
+
|-----------------------|-------|---------|--------------------------------------------------------------------------|
|
|
146
|
+
| `columns_drop` | list | `[]` | Columns to drop during feature engineering |
|
|
147
|
+
| `columns_boolean` | list | `[]` | Columns to convert to boolean features |
|
|
148
|
+
| `columns_date` | list | `[]` | Date columns for cyclic encoding |
|
|
149
|
+
| `columns_te_groupby` | list | `[]` | Groupby columns for target encoding |
|
|
150
|
+
| `columns_te_target` | list | `[]` | Target columns for target encoding |
|
|
151
|
+
|
|
152
|
+
#### Preprocessing Parameters
|
|
153
|
+
|
|
154
|
+
| Parameter | Type | Default | Description |
|
|
155
|
+
|-------------------------|-------|---------|-----------------------------------------------------------------------|
|
|
156
|
+
| `time_series` | bool | `False` | Whether data is time series |
|
|
157
|
+
| `val_size` | float | `0.2` | Validation set size (fraction) |
|
|
158
|
+
| `test_size` | float | `0.2` | Test set size (fraction) |
|
|
159
|
+
| `columns_pca` | list | `[]` | Columns for PCA transformation |
|
|
160
|
+
| `pca_temporal` | list | `[]` | Temporal PCA config (e.g., lag features) |
|
|
161
|
+
| `pca_cross_sectional` | list | `[]` | Cross-sectional PCA config (e.g., market regime) |
|
|
162
|
+
| `columns_onehot` | list | `[]` | Columns for one-hot encoding |
|
|
163
|
+
| `columns_binary` | list | `[]` | Columns for binary encoding |
|
|
164
|
+
| `columns_ordinal` | list | `[]` | Columns for ordinal encoding |
|
|
165
|
+
| `columns_frequency` | list | `[]` | Columns for frequency encoding |
|
|
166
|
+
|
|
167
|
+
#### Feature Selection Parameters
|
|
168
|
+
|
|
169
|
+
| Parameter | Type | Default | Description |
|
|
170
|
+
|-----------------------------|-------|---------|------------------------------------------------------------------|
|
|
171
|
+
| `percentile` | float | `20` | Percentage of features to keep per selection method |
|
|
172
|
+
| `corr_threshold` | float | `80` | Maximum correlation threshold (%) between features |
|
|
173
|
+
| `max_features` | int | `50` | Maximum number of final features |
|
|
174
|
+
| `max_p_value_categorical` | float | `0.05` | Maximum p-value for categorical feature selection (Chi2) |
|
|
175
|
+
|
|
176
|
+
#### Model Selection Parameters
|
|
177
|
+
|
|
178
|
+
| Parameter | Type | Default | Description |
|
|
179
|
+
|------------------------|-------|---------|-----------------------------------------------------------------------|
|
|
180
|
+
| `target_numbers` | list | `[]` | List of target indices to predict |
|
|
181
|
+
| `target_clf` | list | `[]` | Classification target indices |
|
|
182
|
+
| `models_idx` | list | `[]` | Model indices or names to use (e.g., `[1, 'xgb', 'lgb']`) |
|
|
183
|
+
| `max_timesteps` | int | `120` | Maximum timesteps for recurrent models |
|
|
184
|
+
| `perform_hyperopt` | bool | `True` | Whether to perform hyperparameter optimization |
|
|
185
|
+
| `number_of_trials` | int | `20` | Number of hyperopt trials |
|
|
186
|
+
| `perform_crossval` | bool | `False` | Whether to use cross-validation during hyperopt |
|
|
187
|
+
| `plot` | bool | `True` | Whether to generate plots |
|
|
188
|
+
| `preserve_model` | bool | `True` | Whether to save the best model |
|
|
189
|
+
| `target_clf_thresholds`| dict | `{}` | Classification thresholds per target |
|
|
190
|
+
|
|
191
|
+
#### Example Context Configuration
|
|
192
|
+
|
|
193
|
+
```python
|
|
194
|
+
context = {
|
|
195
|
+
# Required parameters
|
|
196
|
+
"experiment_name": f"stock_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
|
197
|
+
"date_column": "DATE",
|
|
198
|
+
"group_column": "STOCK",
|
|
199
|
+
|
|
200
|
+
# Feature selection
|
|
201
|
+
"corr_threshold": 80,
|
|
202
|
+
"max_features": 20,
|
|
203
|
+
"percentile": 20,
|
|
204
|
+
"max_p_value_categorical": 0.05,
|
|
205
|
+
|
|
206
|
+
# Feature engineering
|
|
207
|
+
"columns_drop": ["SECURITY", "ISIN", "ID"],
|
|
208
|
+
"columns_boolean": [],
|
|
209
|
+
"columns_date": ["DATE"],
|
|
210
|
+
"columns_te_groupby": [["SECTOR", "DATE"]],
|
|
211
|
+
"columns_te_target": ["RET", "VOLUME"],
|
|
212
|
+
|
|
213
|
+
# Preprocessing
|
|
214
|
+
"time_series": True,
|
|
215
|
+
"val_size": 0.2,
|
|
216
|
+
"test_size": 0.2,
|
|
217
|
+
"pca_temporal": [
|
|
218
|
+
{"name": "LAST_20_RET", "columns": [f"RET_-{i}" for i in range(1, 21)]},
|
|
219
|
+
],
|
|
220
|
+
"pca_cross_sectional": [
|
|
221
|
+
{
|
|
222
|
+
"name": "MARKET_REGIME",
|
|
223
|
+
"index": "DATE",
|
|
224
|
+
"columns": "STOCK",
|
|
225
|
+
"value": "RET",
|
|
226
|
+
}
|
|
227
|
+
],
|
|
228
|
+
"columns_onehot": ["BUY_SIGNAL"],
|
|
229
|
+
"columns_binary": ["SECTOR", "LOCATION"],
|
|
230
|
+
"columns_ordinal": ["STOCK"],
|
|
231
|
+
|
|
232
|
+
# Model selection
|
|
233
|
+
"target_numbers": [1, 2, 3],
|
|
234
|
+
"target_clf": [1],
|
|
235
|
+
"models_idx": ["xgb", "lgb", "catboost"],
|
|
236
|
+
"max_timesteps": 120,
|
|
237
|
+
"perform_hyperopt": True,
|
|
238
|
+
"number_of_trials": 50,
|
|
239
|
+
"perform_crossval": True,
|
|
240
|
+
"plot": True,
|
|
241
|
+
"preserve_model": True,
|
|
242
|
+
"target_clf_thresholds": {1: {"precision": 0.80}},
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
# Create experiment
|
|
246
|
+
experiment = app.create_experiment(data=your_dataframe, **context)
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
#### Important Notes
|
|
250
|
+
|
|
251
|
+
1. **Context Persistence**: All context parameters are saved in the database when creating an experiment and automatically restored when loading it.
|
|
252
|
+
|
|
253
|
+
2. **Parameter Precedence**: When loading an existing experiment, the stored context takes precedence over any parameters passed to the constructor.
|
|
254
|
+
|
|
255
|
+
3. **PCA Time Series**: For time series data with `pca_cross_sectional` where index equals `date_column`, the system automatically uses an expanding window approach to prevent data leakage.
|
|
256
|
+
|
|
257
|
+
4. **OpenAI Embeddings**: If using `columns_pca` with text columns, ensure `OPENAI_API_KEY` is set as an environment variable.
|
|
258
|
+
|
|
259
|
+
5. **Model Indices**: The `models_idx` parameter accepts both integer indices and string names (e.g., `'xgb'`, `'lgb'`, `'catboost'`).
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
### Modular usage
|
|
264
|
+
|
|
265
|
+
You can also use each step independently:
|
|
266
|
+
|
|
267
|
+
```python
|
|
268
|
+
data_eng = experiment.feature_engineering(data)
|
|
269
|
+
train, val, test = experiment.preprocess_feature(data_eng)
|
|
270
|
+
features = experiment.feature_selection(train)
|
|
271
|
+
std_data, reshaped_data = experiment.preprocess_model(train, val, test)
|
|
272
|
+
experiment.model_selection(std_data, reshaped_data)
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
## ⚠️ Using Alembic in Your Project (Important for Integrators)
|
|
276
|
+
|
|
277
|
+
If you use Alembic for migrations in your own project and you share the same database with LeCrapaud, you must ensure that Alembic does **not** attempt to drop or modify LeCrapaud tables (those prefixed with `{LECRAPAUD_TABLE_PREFIX}_`).
|
|
278
|
+
|
|
279
|
+
By default, Alembic's autogenerate feature will propose to drop any table that exists in the database but is not present in your project's models. To prevent this, add the following filter to your `env.py`:
|
|
280
|
+
|
|
281
|
+
```python
|
|
282
|
+
def include_object(object, name, type_, reflected, compare_to):
|
|
283
|
+
if type_ == "table" and name.startswith(f"{LECRAPAUD_TABLE_PREFIX}_"):
|
|
284
|
+
return False # Ignore LeCrapaud tables
|
|
285
|
+
return True
|
|
286
|
+
|
|
287
|
+
context.configure(
|
|
288
|
+
# ... other options ...
|
|
289
|
+
include_object=include_object,
|
|
290
|
+
)
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
This will ensure that Alembic ignores all tables created by LeCrapaud when generating migrations for your own project.
|
|
294
|
+
|
|
295
|
+
---
|
|
296
|
+
|
|
297
|
+
## 🤝 Contributing
|
|
298
|
+
|
|
299
|
+
### Reminders for Github usage
|
|
300
|
+
|
|
301
|
+
1. Creating Github repository
|
|
302
|
+
|
|
303
|
+
```sh
|
|
304
|
+
$ brew install gh
|
|
305
|
+
$ gh auth login
|
|
306
|
+
$ gh repo create
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
2. Initializing git and first commit to distant repository
|
|
310
|
+
|
|
311
|
+
```sh
|
|
312
|
+
$ git init
|
|
313
|
+
$ git add .
|
|
314
|
+
$ git commit -m 'first commit'
|
|
315
|
+
$ git remote add origin <YOUR_REPO_URL>
|
|
316
|
+
$ git push -u origin master
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
3. Use conventional commits
|
|
320
|
+
https://www.conventionalcommits.org/en/v1.0.0/#summary
|
|
321
|
+
|
|
322
|
+
4. Create environment
|
|
323
|
+
|
|
324
|
+
```sh
|
|
325
|
+
$ pip install virtualenv
|
|
326
|
+
$ python -m venv .venv
|
|
327
|
+
$ source .venv/bin/activate
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
5. Install dependencies
|
|
331
|
+
|
|
332
|
+
```sh
|
|
333
|
+
$ make install
|
|
334
|
+
```
|
|
335
|
+
|
|
336
|
+
6. Deactivate virtualenv (if needed)
|
|
337
|
+
|
|
338
|
+
```sh
|
|
339
|
+
$ deactivate
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
---
|
|
343
|
+
|
|
344
|
+
Pierre Gallet © 2025
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
lecrapaud/__init__.py,sha256=oCxbtw_nk8rlOXbXbWo0RRMlsh6w-hTiZ6e5PRG_wp0,28
|
|
2
2
|
lecrapaud/api.py,sha256=IQlH3wcSzxYgvlamfICNMwNsQGoaNxBJUPTlC9M0kBk,20321
|
|
3
|
-
lecrapaud/config.py,sha256=
|
|
3
|
+
lecrapaud/config.py,sha256=0NEg61QdLxQ97bVFDDXa6OwlWFEo_z8VIhX5KrD1ik0,1170
|
|
4
4
|
lecrapaud/db/__init__.py,sha256=82o9fMfaqKXPh2_rt44EzNRVZV1R4LScEnQYvj_TjK0,34
|
|
5
5
|
lecrapaud/db/alembic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38
|
|
6
6
|
lecrapaud/db/alembic/env.py,sha256=RvTTBa3bDVBxmDtapAfzUoeWBgmVQU3s9U6HmQCAP84,2421
|
|
@@ -29,7 +29,7 @@ lecrapaud/db/models/target.py,sha256=DKnfeaLU8eT8J_oh_vuFo5-o1CaoXR13xBbswme6Bgk
|
|
|
29
29
|
lecrapaud/db/models/utils.py,sha256=-a-nWWmpJ2XzidIxo2COVUTrGZIPYCfBzjhcszJj_bM,1109
|
|
30
30
|
lecrapaud/db/session.py,sha256=u9NCwUoV5VbtScRb6HOSQr4oTEjIwj0waP5mGlc1qJg,3735
|
|
31
31
|
lecrapaud/directories.py,sha256=0LrANuDgbuneSLker60c6q2hmGnQ3mKHIztTGzTx6Gw,826
|
|
32
|
-
lecrapaud/experiment.py,sha256=
|
|
32
|
+
lecrapaud/experiment.py,sha256=hhi6NdVKtxoyx_AGBB4iNEZZpd9b3rKs23qiLPf-mUk,2384
|
|
33
33
|
lecrapaud/feature_engineering.py,sha256=UM-EIOsgYWedqsR9uA-09eaWSb9FofVxoE0rRcDelQ8,39173
|
|
34
34
|
lecrapaud/feature_selection.py,sha256=Q9xWVmZsvRjX9mJHB_PY_KLXsEAYNLX7txSe0cniY4A,47529
|
|
35
35
|
lecrapaud/integrations/openai_integration.py,sha256=hHLF3fk5Bps8KNbNrEL3NUFa945jwClE6LrLpuMZOd4,7459
|
|
@@ -43,8 +43,8 @@ lecrapaud/misc/test-gpu-resnet.ipynb,sha256=27Vu7nYwujYeh3fOxBNCnKJn3MXNPKZU-U8o
|
|
|
43
43
|
lecrapaud/misc/test-gpu-transformers.ipynb,sha256=k6MBSs_Um1h4PykvE-LTBcdpbWLbIFST_xl_AFW2jgI,8444
|
|
44
44
|
lecrapaud/model_selection.py,sha256=o4_hOEp91_33HtMatVHU7YPc71KZ2hK7wucN63xqWkA,88017
|
|
45
45
|
lecrapaud/search_space.py,sha256=caCehJklD3-sgmlisJj_GmuB7LJiVvTF71gEjPGDvV4,36336
|
|
46
|
-
lecrapaud/utils.py,sha256=
|
|
47
|
-
lecrapaud-0.20.
|
|
48
|
-
lecrapaud-0.20.
|
|
49
|
-
lecrapaud-0.20.
|
|
50
|
-
lecrapaud-0.20.
|
|
46
|
+
lecrapaud/utils.py,sha256=0k76HFETO0_NgCYUv8b3RTBLgry6MsDBaHJfpAplxCY,8855
|
|
47
|
+
lecrapaud-0.20.2.dist-info/METADATA,sha256=FUXEVYVCJAoat8HUtsupISlRbK56YVxezYwCH6j4kBE,14239
|
|
48
|
+
lecrapaud-0.20.2.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
49
|
+
lecrapaud-0.20.2.dist-info/licenses/LICENSE,sha256=MImCryu0AnqhJE_uAZD-PIDKXDKb8sT7v0i1NOYeHTM,11350
|
|
50
|
+
lecrapaud-0.20.2.dist-info/RECORD,,
|
|
@@ -1,250 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: lecrapaud
|
|
3
|
-
Version: 0.20.1
|
|
4
|
-
Summary: Framework for machine and deep learning, with regression, classification and time series analysis
|
|
5
|
-
License: Apache License
|
|
6
|
-
License-File: LICENSE
|
|
7
|
-
Author: Pierre H. Gallet
|
|
8
|
-
Requires-Python: ==3.12.*
|
|
9
|
-
Classifier: License :: Other/Proprietary License
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
-
Requires-Dist: catboost (>=1.2.8)
|
|
13
|
-
Requires-Dist: category-encoders (>=2.8.1)
|
|
14
|
-
Requires-Dist: celery (>=5.5.3)
|
|
15
|
-
Requires-Dist: ftfy (>=6.3.1)
|
|
16
|
-
Requires-Dist: joblib (>=1.5.1)
|
|
17
|
-
Requires-Dist: keras (>=3.10.0)
|
|
18
|
-
Requires-Dist: lightgbm (>=4.6.0)
|
|
19
|
-
Requires-Dist: matplotlib (>=3.10.3)
|
|
20
|
-
Requires-Dist: mlxtend (>=0.23.4)
|
|
21
|
-
Requires-Dist: numpy (>=2.1.3)
|
|
22
|
-
Requires-Dist: openai (>=1.88.0)
|
|
23
|
-
Requires-Dist: pandas (>=2.3.0)
|
|
24
|
-
Requires-Dist: pydantic (>=2.9.2)
|
|
25
|
-
Requires-Dist: python-dotenv (>=1.1.0)
|
|
26
|
-
Requires-Dist: scikit-learn (>=1.6.1)
|
|
27
|
-
Requires-Dist: scipy (<1.14.0)
|
|
28
|
-
Requires-Dist: seaborn (>=0.13.2)
|
|
29
|
-
Requires-Dist: sqlalchemy (>=2.0.41)
|
|
30
|
-
Requires-Dist: tensorboardx (>=2.6.4)
|
|
31
|
-
Requires-Dist: tensorflow (>=2.19.0)
|
|
32
|
-
Requires-Dist: tiktoken (>=0.9.0)
|
|
33
|
-
Requires-Dist: tqdm (>=4.67.1)
|
|
34
|
-
Requires-Dist: xgboost (>=3.0.2)
|
|
35
|
-
Description-Content-Type: text/markdown
|
|
36
|
-
|
|
37
|
-
<div align="center">
|
|
38
|
-
|
|
39
|
-
<img src="https://s3.amazonaws.com/pix.iemoji.com/images/emoji/apple/ios-12/256/frog-face.png" width=120 alt="crapaud"/>
|
|
40
|
-
|
|
41
|
-
## Welcome to LeCrapaud
|
|
42
|
-
|
|
43
|
-
**An all-in-one machine learning framework**
|
|
44
|
-
|
|
45
|
-
[](https://github.com/pierregallet/lecrapaud/stargazers)
|
|
46
|
-
[](https://badge.fury.io/py/lecrapaud)
|
|
47
|
-
[](https://pypi.org/project/lecrapaud)
|
|
48
|
-
[](https://github.com/pierregallet/lecrapaud/blob/main/LICENSE)
|
|
49
|
-
[](https://codecov.io/gh/pierregallet/lecrapaud)
|
|
50
|
-
|
|
51
|
-
</div>
|
|
52
|
-
|
|
53
|
-
## 🚀 Introduction
|
|
54
|
-
|
|
55
|
-
LeCrapaud is a high-level Python library for end-to-end machine learning workflows on tabular data, with a focus on financial and stock datasets. It provides a simple API to handle feature engineering, model selection, training, and prediction, all in a reproducible and modular way.
|
|
56
|
-
|
|
57
|
-
## ✨ Key Features
|
|
58
|
-
|
|
59
|
-
- 🧩 Modular pipeline: Feature engineering, preprocessing, selection, and modeling as independent steps
|
|
60
|
-
- 🤖 Automated model selection and hyperparameter optimization
|
|
61
|
-
- 📊 Easy integration with pandas DataFrames
|
|
62
|
-
- 🔬 Supports both regression and classification tasks
|
|
63
|
-
- 🛠️ Simple API for both full pipeline and step-by-step usage
|
|
64
|
-
- 📦 Ready for production and research workflows
|
|
65
|
-
|
|
66
|
-
## ⚡ Quick Start
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
### Install the package
|
|
70
|
-
|
|
71
|
-
```sh
|
|
72
|
-
pip install lecrapaud
|
|
73
|
-
```
|
|
74
|
-
|
|
75
|
-
### How it works
|
|
76
|
-
|
|
77
|
-
This package provides a high-level API to manage experiments for feature engineering, model selection, and prediction on tabular data (e.g. stock data).
|
|
78
|
-
|
|
79
|
-
### Typical workflow
|
|
80
|
-
|
|
81
|
-
```python
|
|
82
|
-
from lecrapaud import LeCrapaud
|
|
83
|
-
|
|
84
|
-
# 1. Create the main app
|
|
85
|
-
app = LeCrapaud(uri=uri)
|
|
86
|
-
|
|
87
|
-
# 2. Define your experiment context (see your notebook or api.py for all options)
|
|
88
|
-
context = {
|
|
89
|
-
"data": your_dataframe,
|
|
90
|
-
"columns_drop": [...],
|
|
91
|
-
"columns_date": [...],
|
|
92
|
-
# ... other config options
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
# 3. Create an experiment
|
|
96
|
-
experiment = app.create_experiment(**context)
|
|
97
|
-
|
|
98
|
-
# 4. Run the full training pipeline
|
|
99
|
-
experiment.train(your_dataframe)
|
|
100
|
-
|
|
101
|
-
# 5. Make predictions on new data
|
|
102
|
-
predictions = experiment.predict(new_data)
|
|
103
|
-
```
|
|
104
|
-
|
|
105
|
-
### Database Configuration (Required)
|
|
106
|
-
|
|
107
|
-
LeCrapaud requires access to a MySQL database to store experiments and results. You must either:
|
|
108
|
-
|
|
109
|
-
- Pass a valid MySQL URI to the `LeCrapaud` constructor:
|
|
110
|
-
```python
|
|
111
|
-
app = LeCrapaud(uri="mysql+pymysql://user:password@host:port/dbname")
|
|
112
|
-
```
|
|
113
|
-
- **OR** set the following environment variables before using the package:
|
|
114
|
-
- `DB_USER`, `DB_PASSWORD`, `DB_HOST`, `DB_PORT`, `DB_NAME`
|
|
115
|
-
- Or set `DB_URI` directly with your full connection string.
|
|
116
|
-
|
|
117
|
-
If neither is provided, database operations will not work.
|
|
118
|
-
|
|
119
|
-
### Using OpenAI Embeddings (Optional)
|
|
120
|
-
|
|
121
|
-
If you want to use the `columns_pca` embedding feature (for advanced feature engineering), you must set the `OPENAI_API_KEY` environment variable with your OpenAI API key:
|
|
122
|
-
|
|
123
|
-
```sh
|
|
124
|
-
export OPENAI_API_KEY=sk-...
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
If this variable is not set, features relying on OpenAI embeddings will not be available.
|
|
128
|
-
|
|
129
|
-
### Experiment Context Arguments
|
|
130
|
-
|
|
131
|
-
Below are the main arguments you can pass to `create_experiment` (or the `Experiment` class):
|
|
132
|
-
|
|
133
|
-
| Argument | Type | Description | Example/Default |
|
|
134
|
-
| -------------------- | --------- | ---------------------------------------------------------------------------------------- | ------------------ |
|
|
135
|
-
| `columns_binary` | list | Columns to treat as binary | `['flag']` |
|
|
136
|
-
| `columns_boolean` | list | Columns to treat as boolean | `['is_active']` |
|
|
137
|
-
| `columns_date` | list | Columns to treat as dates | `['date']` |
|
|
138
|
-
| `columns_drop` | list | Columns to drop during feature engineering | `['col1', 'col2']` |
|
|
139
|
-
| `columns_frequency` | list | Columns to frequency encode | `['category']` |
|
|
140
|
-
| `columns_onehot` | list | Columns to one-hot encode | `['sector']` |
|
|
141
|
-
| `columns_ordinal` | list | Columns to ordinal encode | `['grade']` |
|
|
142
|
-
| `columns_pca` | list | Columns to use for PCA/embeddings (requires `OPENAI_API_KEY` if using OpenAI embeddings) | `['text_col']` |
|
|
143
|
-
| `columns_te_groupby` | list | Columns for target encoding groupby | `['sector']` |
|
|
144
|
-
| `columns_te_target` | list | Columns for target encoding target | `['target']` |
|
|
145
|
-
| `data` | DataFrame | Your main dataset (required for new experiment) | `your_dataframe` |
|
|
146
|
-
| `date_column` | str | Name of the date column | `'date'` |
|
|
147
|
-
| `experiment_name` | str | Name for the training session | `'my_session'` |
|
|
148
|
-
| `group_column` | str | Name of the group column | `'stock_id'` |
|
|
149
|
-
| `max_timesteps` | int | Max timesteps for time series models | `30` |
|
|
150
|
-
| `models_idx` | list | Indices of models to use for model selection | `[0, 1, 2]` |
|
|
151
|
-
| `number_of_trials` | int | Number of trials for hyperparameter optimization | `20` |
|
|
152
|
-
| `perform_crossval` | bool | Whether to perform cross-validation | `True`/`False` |
|
|
153
|
-
| `perform_hyperopt` | bool | Whether to perform hyperparameter optimization | `True`/`False` |
|
|
154
|
-
| `plot` | bool | Whether to plot results | `True`/`False` |
|
|
155
|
-
| `preserve_model` | bool | Whether to preserve the best model | `True`/`False` |
|
|
156
|
-
| `target_clf` | list | List of classification target column indices/names | `[1, 2, 3]` |
|
|
157
|
-
| `target_mclf` | list | Multi-class classification targets (not yet implemented) | `[11]` |
|
|
158
|
-
| `target_numbers` | list | List of regression target column indices/names | `[1, 2, 3]` |
|
|
159
|
-
| `test_size` | int/float | Test set size (count or fraction) | `0.2` |
|
|
160
|
-
| `time_series` | bool | Whether the data is time series | `True`/`False` |
|
|
161
|
-
| `val_size` | int/float | Validation set size (count or fraction) | `0.2` |
|
|
162
|
-
|
|
163
|
-
**Note:**
|
|
164
|
-
- Not all arguments are required; defaults may exist for some.
|
|
165
|
-
- For `columns_pca` with OpenAI embeddings, you must set the `OPENAI_API_KEY` environment variable.
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
### Modular usage
|
|
170
|
-
|
|
171
|
-
You can also use each step independently:
|
|
172
|
-
|
|
173
|
-
```python
|
|
174
|
-
data_eng = experiment.feature_engineering(data)
|
|
175
|
-
train, val, test = experiment.preprocess_feature(data_eng)
|
|
176
|
-
features = experiment.feature_selection(train)
|
|
177
|
-
std_data, reshaped_data = experiment.preprocess_model(train, val, test)
|
|
178
|
-
experiment.model_selection(std_data, reshaped_data)
|
|
179
|
-
```
|
|
180
|
-
|
|
181
|
-
## ⚠️ Using Alembic in Your Project (Important for Integrators)
|
|
182
|
-
|
|
183
|
-
If you use Alembic for migrations in your own project and you share the same database with LeCrapaud, you must ensure that Alembic does **not** attempt to drop or modify LeCrapaud tables (those prefixed with `{LECRAPAUD_TABLE_PREFIX}_`).
|
|
184
|
-
|
|
185
|
-
By default, Alembic's autogenerate feature will propose to drop any table that exists in the database but is not present in your project's models. To prevent this, add the following filter to your `env.py`:
|
|
186
|
-
|
|
187
|
-
```python
|
|
188
|
-
def include_object(object, name, type_, reflected, compare_to):
|
|
189
|
-
if type_ == "table" and name.startswith(f"{LECRAPAUD_TABLE_PREFIX}_"):
|
|
190
|
-
return False # Ignore LeCrapaud tables
|
|
191
|
-
return True
|
|
192
|
-
|
|
193
|
-
context.configure(
|
|
194
|
-
# ... other options ...
|
|
195
|
-
include_object=include_object,
|
|
196
|
-
)
|
|
197
|
-
```
|
|
198
|
-
|
|
199
|
-
This will ensure that Alembic ignores all tables created by LeCrapaud when generating migrations for your own project.
|
|
200
|
-
|
|
201
|
-
---
|
|
202
|
-
|
|
203
|
-
## 🤝 Contributing
|
|
204
|
-
|
|
205
|
-
### Reminders for Github usage
|
|
206
|
-
|
|
207
|
-
1. Creating Github repository
|
|
208
|
-
|
|
209
|
-
```sh
|
|
210
|
-
$ brew install gh
|
|
211
|
-
$ gh auth login
|
|
212
|
-
$ gh repo create
|
|
213
|
-
```
|
|
214
|
-
|
|
215
|
-
2. Initializing git and first commit to distant repository
|
|
216
|
-
|
|
217
|
-
```sh
|
|
218
|
-
$ git init
|
|
219
|
-
$ git add .
|
|
220
|
-
$ git commit -m 'first commit'
|
|
221
|
-
$ git remote add origin <YOUR_REPO_URL>
|
|
222
|
-
$ git push -u origin master
|
|
223
|
-
```
|
|
224
|
-
|
|
225
|
-
3. Use conventional commits
|
|
226
|
-
https://www.conventionalcommits.org/en/v1.0.0/#summary
|
|
227
|
-
|
|
228
|
-
4. Create environment
|
|
229
|
-
|
|
230
|
-
```sh
|
|
231
|
-
$ pip install virtualenv
|
|
232
|
-
$ python -m venv .venv
|
|
233
|
-
$ source .venv/bin/activate
|
|
234
|
-
```
|
|
235
|
-
|
|
236
|
-
5. Install dependencies
|
|
237
|
-
|
|
238
|
-
```sh
|
|
239
|
-
$ make install
|
|
240
|
-
```
|
|
241
|
-
|
|
242
|
-
6. Deactivate virtualenv (if needed)
|
|
243
|
-
|
|
244
|
-
```sh
|
|
245
|
-
$ deactivate
|
|
246
|
-
```
|
|
247
|
-
|
|
248
|
-
---
|
|
249
|
-
|
|
250
|
-
Pierre Gallet © 2025
|
|
File without changes
|
|
File without changes
|