lecrapaud 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lecrapaud might be problematic. Click here for more details.

Files changed (42) hide show
  1. lecrapaud/__init__.py +1 -0
  2. lecrapaud/api.py +277 -0
  3. lecrapaud/config.py +10 -0
  4. lecrapaud/db/__init__.py +1 -0
  5. lecrapaud/db/alembic/env.py +2 -2
  6. lecrapaud/db/alembic/versions/2025_05_31_1834-52b809a34371_make_nullablee.py +24 -12
  7. lecrapaud/db/alembic/versions/2025_06_17_1652-c45f5e49fa2c_make_fields_nullable.py +89 -0
  8. lecrapaud/db/alembic.ini +116 -0
  9. lecrapaud/db/models/__init__.py +10 -10
  10. lecrapaud/db/models/base.py +176 -1
  11. lecrapaud/db/models/dataset.py +25 -20
  12. lecrapaud/db/models/feature.py +5 -6
  13. lecrapaud/db/models/feature_selection.py +3 -4
  14. lecrapaud/db/models/feature_selection_rank.py +3 -4
  15. lecrapaud/db/models/model.py +3 -4
  16. lecrapaud/db/models/model_selection.py +15 -8
  17. lecrapaud/db/models/model_training.py +15 -7
  18. lecrapaud/db/models/score.py +9 -6
  19. lecrapaud/db/models/target.py +16 -8
  20. lecrapaud/db/session.py +66 -0
  21. lecrapaud/experiment.py +64 -0
  22. lecrapaud/feature_engineering.py +747 -1022
  23. lecrapaud/feature_selection.py +915 -998
  24. lecrapaud/integrations/openai_integration.py +225 -0
  25. lecrapaud/jobs/__init__.py +2 -2
  26. lecrapaud/jobs/config.py +1 -1
  27. lecrapaud/jobs/scheduler.py +1 -1
  28. lecrapaud/jobs/tasks.py +6 -6
  29. lecrapaud/model_selection.py +1060 -960
  30. lecrapaud/search_space.py +4 -0
  31. lecrapaud/utils.py +2 -2
  32. lecrapaud-0.4.1.dist-info/METADATA +171 -0
  33. {lecrapaud-0.4.0.dist-info → lecrapaud-0.4.1.dist-info}/RECORD +36 -35
  34. {lecrapaud-0.4.0.dist-info → lecrapaud-0.4.1.dist-info}/WHEEL +1 -1
  35. lecrapaud/db/crud.py +0 -179
  36. lecrapaud/db/services.py +0 -0
  37. lecrapaud/db/setup.py +0 -58
  38. lecrapaud/predictions.py +0 -292
  39. lecrapaud/training.py +0 -151
  40. lecrapaud-0.4.0.dist-info/METADATA +0 -103
  41. /lecrapaud/{directory_management.py → directories.py} +0 -0
  42. {lecrapaud-0.4.0.dist-info → lecrapaud-0.4.1.dist-info}/LICENSE +0 -0
lecrapaud/__init__.py CHANGED
@@ -0,0 +1 @@
1
+ from lecrapaud.api import *
lecrapaud/api.py ADDED
@@ -0,0 +1,277 @@
1
+ """
2
+ Main API class
3
+
4
+ the way I want it to work :
5
+
6
+ app = LeCrapaud()
7
+
8
+ kwargs = {
9
+
10
+ }
11
+
12
+ experiment = app.create_experiment(**kwargs) # return a class Experiment()
13
+ ou
14
+ experiment = app.get_experiment(exp_id)
15
+
16
+ best_features, artifacts, best_model = experiment.train(get_data, get_data_params)
17
+
18
+ new_data + target_pred + target_proba (if classif) = experiment.predict(**new_data)
19
+
20
+ On veut aussi pouvoir juste faire :
21
+
22
+ experiment.feature_engineering(data) : feat eng, return data
23
+
24
+ experiment.preprocess_feature(data) : split, encoding, pcas, return train, val, test df
25
+
26
+ experiment.feature_selection(train) : return features
27
+
28
+ experiment.preprocess_model(train, val, test) : return data = dict of df
29
+
30
+ experiment.model_selection(data) : return best_model
31
+ """
32
+
33
+ import joblib
34
+ import pandas as pd
35
+ import logging
36
+ from lecrapaud.utils import logger
37
+ from lecrapaud.db.session import init_db
38
+ from lecrapaud.feature_selection import FeatureSelectionEngine, PreprocessModel
39
+ from lecrapaud.model_selection import ModelSelectionEngine, ModelEngine
40
+ from lecrapaud.feature_engineering import FeatureEngineeringEngine, PreprocessFeature
41
+ from lecrapaud.experiment import create_dataset
42
+ from lecrapaud.db import Dataset
43
+
44
+
45
+ class LeCrapaud:
46
+ def __init__(self, uri: str = None):
47
+ init_db(uri=uri)
48
+
49
+ def create_experiment(self, **kwargs):
50
+ return Experiment(**kwargs)
51
+
52
+ def get_experiment(self, id: int):
53
+ return Experiment(id)
54
+
55
+
56
+ class Experiment:
57
+ def __init__(self, id=None, **kwargs):
58
+ if id:
59
+ self.dataset = Dataset.get(id)
60
+ else:
61
+ self.dataset = create_dataset(**kwargs)
62
+
63
+ for key, value in kwargs.items():
64
+ setattr(self, key, value)
65
+
66
+ self.context = {
67
+ # generic
68
+ "dataset": self.dataset,
69
+ # for FeatureEngineering
70
+ "columns_drop": self.columns_drop,
71
+ "columns_boolean": self.columns_boolean,
72
+ "columns_date": self.columns_date,
73
+ "columns_te_groupby": self.columns_te_groupby,
74
+ "columns_te_target": self.columns_te_target,
75
+ # for PreprocessFeature
76
+ "time_series": self.time_series,
77
+ "date_column": self.date_column,
78
+ "group_column": self.group_column,
79
+ "val_size": self.val_size,
80
+ "test_size": self.test_size,
81
+ "columns_pca": self.columns_pca,
82
+ "columns_onehot": self.columns_onehot,
83
+ "columns_binary": self.columns_binary,
84
+ "columns_frequency": self.columns_frequency,
85
+ "columns_ordinal": self.columns_ordinal,
86
+ "target_numbers": self.target_numbers,
87
+ "target_clf": self.target_clf,
88
+ # for PreprocessModel
89
+ "models_idx": self.models_idx,
90
+ "max_timesteps": self.max_timesteps,
91
+ # for ModelSelection
92
+ "perform_hyperopt": self.perform_hyperopt,
93
+ "number_of_trials": self.number_of_trials,
94
+ "perform_crossval": self.perform_crossval,
95
+ "plot": self.plot,
96
+ "preserve_model": self.preserve_model,
97
+ # not yet
98
+ "target_mclf": self.target_mclf,
99
+ }
100
+
101
+ def train(self, data):
102
+ data_eng = self.feature_engineering(data)
103
+ train, val, test = self.preprocess_feature(data_eng)
104
+ all_features = self.feature_selection(train)
105
+ std_data, reshaped_data = self.preprocess_model(train, val, test)
106
+ self.model_selection(std_data, reshaped_data)
107
+
108
+ def predict(self, new_data, verbose: int = 0):
109
+ if verbose == 0:
110
+ logger.setLevel(logging.WARNING)
111
+
112
+ logger.warning("Running prediction...")
113
+
114
+ data = self.feature_engineering(
115
+ data=new_data,
116
+ for_training=False,
117
+ )
118
+ data = self.preprocess_feature(data, for_training=False)
119
+ data, scaled_data, reshaped_data = self.preprocess_model(
120
+ data, for_training=False
121
+ )
122
+
123
+ for target_number in self.target_numbers:
124
+
125
+ # loading model
126
+ training_target_dir = f"{self.dataset.path}/TARGET_{target_number}"
127
+ all_features = self.dataset.get_all_features(
128
+ date_column=self.date_column, group_column=self.group_column
129
+ )
130
+ if self.dataset.name == "data_28_X_X":
131
+ features = joblib.load(
132
+ f"{self.dataset.path}/preprocessing/features_{target_number}.pkl"
133
+ ) # we keep this for backward compatibility
134
+ else:
135
+ features = self.dataset.get_features(target_number)
136
+ model = ModelEngine(path=training_target_dir)
137
+
138
+ # getting data
139
+ if model.recurrent:
140
+ features_idx = [
141
+ i for i, e in enumerate(all_features) if e in set(features)
142
+ ]
143
+ x_pred = reshaped_data[:, :, features_idx]
144
+ else:
145
+ x_pred = scaled_data[features] if model.need_scaling else data[features]
146
+
147
+ # predicting
148
+ y_pred = model.predict(x_pred)
149
+
150
+ # fix for recurrent model because x_val has no index as it is a 3D np array
151
+ if model.recurrent:
152
+ y_pred.index = (
153
+ new_data.index
154
+ ) # TODO: not sure this will work for old dataset not aligned with data_for_training for test use case (done, this is why we decode the test set)
155
+
156
+ # unscaling prediction
157
+ if (
158
+ model.need_scaling
159
+ and model.target_type == "regression"
160
+ and model.scaler_y is not None
161
+ ):
162
+ y_pred = pd.Series(
163
+ model.scaler_y.inverse_transform(
164
+ y_pred.values.reshape(-1, 1)
165
+ ).flatten(),
166
+ index=new_data.index,
167
+ )
168
+
169
+ # renaming pred column and concatenating with initial data
170
+ if isinstance(y_pred, pd.DataFrame):
171
+ y_pred.rename(
172
+ columns={"PRED": f"TARGET_{target_number}_PRED"}, inplace=True
173
+ )
174
+ new_data = pd.concat(
175
+ [new_data, y_pred[f"TARGET_{target_number}_PRED"]], axis=1
176
+ )
177
+
178
+ else:
179
+ y_pred.name = f"TARGET_{target_number}_PRED"
180
+ new_data = pd.concat([new_data, y_pred], axis=1)
181
+
182
+ return new_data
183
+
184
+ def feature_engineering(self, data, for_training=True):
185
+ app = FeatureEngineeringEngine(
186
+ data=data,
187
+ columns_drop=self.columns_drop,
188
+ columns_boolean=self.columns_boolean,
189
+ columns_date=self.columns_date,
190
+ columns_te_groupby=self.columns_te_groupby,
191
+ columns_te_target=self.columns_te_target,
192
+ for_training=for_training,
193
+ )
194
+ data = app.run()
195
+ return data
196
+
197
+ def preprocess_feature(self, data, for_training=True):
198
+ app = PreprocessFeature(
199
+ data=data,
200
+ dataset=self.dataset,
201
+ time_series=self.time_series,
202
+ date_column=self.date_column,
203
+ group_column=self.group_column,
204
+ val_size=self.val_size,
205
+ test_size=self.test_size,
206
+ columns_pca=self.columns_pca,
207
+ columns_onehot=self.columns_onehot,
208
+ columns_binary=self.columns_binary,
209
+ columns_frequency=self.columns_frequency,
210
+ columns_ordinal=self.columns_ordinal,
211
+ target_numbers=self.target_numbers,
212
+ target_clf=self.target_clf,
213
+ )
214
+ if for_training:
215
+ train, val, test = app.run()
216
+ return train, val, test
217
+ else:
218
+ data = app.inference()
219
+ return data
220
+
221
+ def feature_selection(self, train):
222
+ for target_number in self.target_numbers:
223
+ app = FeatureSelectionEngine(
224
+ train=train,
225
+ target_number=target_number,
226
+ dataset=self.dataset,
227
+ target_clf=self.target_clf,
228
+ )
229
+ app.run()
230
+ self.dataset = Dataset.get(self.dataset.id)
231
+ all_features = self.dataset.get_all_features(
232
+ date_column=self.date_column, group_column=self.group_column
233
+ )
234
+ return all_features
235
+
236
+ def preprocess_model(self, train, val=None, test=None, for_training=True):
237
+ app = PreprocessModel(
238
+ train=train,
239
+ val=val,
240
+ test=test,
241
+ dataset=self.dataset,
242
+ target_numbers=self.target_numbers,
243
+ target_clf=self.target_clf,
244
+ models_idx=self.models_idx,
245
+ time_series=self.time_series,
246
+ max_timesteps=self.max_timesteps,
247
+ date_column=self.date_column,
248
+ group_column=self.group_column,
249
+ )
250
+ if for_training:
251
+ data, reshaped_data = app.run()
252
+ return data, reshaped_data
253
+ else:
254
+ data, scaled_data, reshaped_data = app.inference()
255
+ return data, scaled_data, reshaped_data
256
+
257
+ def model_selection(self, data, reshaped_data):
258
+ for target_number in self.target_numbers:
259
+ app = ModelSelectionEngine(
260
+ data=data,
261
+ reshaped_data=reshaped_data,
262
+ target_number=target_number,
263
+ dataset=self.dataset,
264
+ target_clf=self.target_clf,
265
+ models_idx=self.models_idx,
266
+ time_series=self.time_series,
267
+ date_column=self.date_column,
268
+ group_column=self.group_column,
269
+ )
270
+ app.run(
271
+ self.session_name,
272
+ perform_hyperopt=self.perform_hyperopt,
273
+ number_of_trials=self.number_of_trials,
274
+ perform_crossval=self.perform_crossval,
275
+ plot=self.plot,
276
+ preserve_model=self.preserve_model,
277
+ )
lecrapaud/config.py CHANGED
@@ -14,3 +14,13 @@ FA2 = os.getenv("2FA")
14
14
  INT = os.getenv("INT")
15
15
  LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO")
16
16
  ALPHA_VENTAGE_API_KEY = os.getenv("ALPHA_VENTAGE_API_KEY")
17
+
18
+ DB_USER = os.getenv("TEST_DB_USER") if PYTHON_ENV == "Test" else os.getenv("DB_USER")
19
+ DB_PASSWORD = (
20
+ os.getenv("TEST_DB_PASSWORD") if PYTHON_ENV == "Test" else os.getenv("DB_PASSWORD")
21
+ )
22
+ DB_HOST = os.getenv("TEST_DB_HOST") if PYTHON_ENV == "Test" else os.getenv("DB_HOST")
23
+ DB_PORT = os.getenv("TEST_DB_PORT") if PYTHON_ENV == "Test" else os.getenv("DB_PORT")
24
+ DB_NAME = os.getenv("TEST_DB_NAME") if PYTHON_ENV == "Test" else os.getenv("DB_NAME")
25
+ DB_URI = os.getenv("DB_URI", None)
26
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
lecrapaud/db/__init__.py CHANGED
@@ -0,0 +1 @@
1
+ from lecrapaud.db.models import *
@@ -4,7 +4,7 @@ from sqlalchemy import engine_from_config
4
4
  from sqlalchemy import pool
5
5
 
6
6
  from alembic import context
7
- from src.db.setup import DATABASE_URL
7
+ from lecrapaud.db.session import DATABASE_URL
8
8
 
9
9
  # this is the Alembic Config object, which provides
10
10
  # access to the values within the .ini file in use.
@@ -18,7 +18,7 @@ if config.config_file_name is not None:
18
18
 
19
19
  # add your model's MetaData object here
20
20
  # for 'autogenerate' support
21
- from src.db.models.base import Base
21
+ from lecrapaud.db.models.base import Base
22
22
 
23
23
  target_metadata = Base.metadata
24
24
 
@@ -5,6 +5,7 @@ Revises: 339927587383
5
5
  Create Date: 2025-05-31 18:34:58.962966
6
6
 
7
7
  """
8
+
8
9
  from typing import Sequence, Union
9
10
 
10
11
  from alembic import op
@@ -12,27 +13,38 @@ import sqlalchemy as sa
12
13
  from sqlalchemy.dialects import mysql
13
14
 
14
15
  # revision identifiers, used by Alembic.
15
- revision: str = '52b809a34371'
16
- down_revision: Union[str, None] = '339927587383'
16
+ revision: str = "52b809a34371"
17
+ down_revision: Union[str, None] = "339927587383"
17
18
  branch_labels: Union[str, Sequence[str], None] = None
18
19
  depends_on: Union[str, Sequence[str], None] = None
19
20
 
20
21
 
21
22
  def upgrade() -> None:
22
23
  # ### commands auto generated by Alembic - please adjust! ###
23
- op.alter_column('investment_runs', 'initial_portfolio',
24
- existing_type=mysql.JSON(),
25
- nullable=True)
26
- op.create_index(op.f('ix_investment_runs_id'), 'investment_runs', ['id'], unique=False)
27
- op.create_foreign_key(None, 'portfolios', 'investment_runs', ['investment_run_id'], ['id'], ondelete='CASCADE')
24
+ op.alter_column(
25
+ "investment_runs",
26
+ "initial_portfolio",
27
+ existing_type=mysql.JSON(),
28
+ nullable=True,
29
+ )
30
+ op.create_foreign_key(
31
+ None,
32
+ "portfolios",
33
+ "investment_runs",
34
+ ["investment_run_id"],
35
+ ["id"],
36
+ ondelete="CASCADE",
37
+ )
28
38
  # ### end Alembic commands ###
29
39
 
30
40
 
31
41
  def downgrade() -> None:
32
42
  # ### commands auto generated by Alembic - please adjust! ###
33
- op.drop_constraint(None, 'portfolios', type_='foreignkey')
34
- op.drop_index(op.f('ix_investment_runs_id'), table_name='investment_runs')
35
- op.alter_column('investment_runs', 'initial_portfolio',
36
- existing_type=mysql.JSON(),
37
- nullable=False)
43
+ op.drop_constraint(None, "portfolios", type_="foreignkey")
44
+ op.alter_column(
45
+ "investment_runs",
46
+ "initial_portfolio",
47
+ existing_type=mysql.JSON(),
48
+ nullable=False,
49
+ )
38
50
  # ### end Alembic commands ###
@@ -0,0 +1,89 @@
1
+ """
2
+
3
+ Revision ID: c45f5e49fa2c
4
+ Revises: db8cdd83563a
5
+ Create Date: 2025-06-17 16:52:45.042045
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+ import sqlalchemy as sa
13
+ from sqlalchemy.dialects import mysql
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "c45f5e49fa2c"
17
+ down_revision: Union[str, None] = "db8cdd83563a"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.alter_column(
25
+ "datasets", "train_size", existing_type=mysql.INTEGER(), nullable=True
26
+ )
27
+ op.alter_column(
28
+ "datasets", "test_size", existing_type=mysql.INTEGER(), nullable=True
29
+ )
30
+ op.alter_column(
31
+ "datasets", "number_of_groups", existing_type=mysql.INTEGER(), nullable=True
32
+ )
33
+ op.alter_column(
34
+ "datasets", "list_of_groups", existing_type=mysql.JSON(), nullable=True
35
+ )
36
+ op.alter_column(
37
+ "datasets", "start_date", existing_type=mysql.DATETIME(), nullable=True
38
+ )
39
+ op.alter_column(
40
+ "datasets", "end_date", existing_type=mysql.DATETIME(), nullable=True
41
+ )
42
+ op.alter_column(
43
+ "datasets", "train_start_date", existing_type=mysql.DATETIME(), nullable=True
44
+ )
45
+ op.alter_column(
46
+ "datasets", "train_end_date", existing_type=mysql.DATETIME(), nullable=True
47
+ )
48
+ op.alter_column(
49
+ "datasets", "test_start_date", existing_type=mysql.DATETIME(), nullable=True
50
+ )
51
+ op.alter_column(
52
+ "datasets", "test_end_date", existing_type=mysql.DATETIME(), nullable=True
53
+ )
54
+ # ### end Alembic commands ###
55
+
56
+
57
+ def downgrade() -> None:
58
+ # ### commands auto generated by Alembic - please adjust! ###
59
+ op.alter_column(
60
+ "datasets", "test_end_date", existing_type=mysql.DATETIME(), nullable=False
61
+ )
62
+ op.alter_column(
63
+ "datasets", "test_start_date", existing_type=mysql.DATETIME(), nullable=False
64
+ )
65
+ op.alter_column(
66
+ "datasets", "train_end_date", existing_type=mysql.DATETIME(), nullable=False
67
+ )
68
+ op.alter_column(
69
+ "datasets", "train_start_date", existing_type=mysql.DATETIME(), nullable=False
70
+ )
71
+ op.alter_column(
72
+ "datasets", "end_date", existing_type=mysql.DATETIME(), nullable=False
73
+ )
74
+ op.alter_column(
75
+ "datasets", "start_date", existing_type=mysql.DATETIME(), nullable=False
76
+ )
77
+ op.alter_column(
78
+ "datasets", "list_of_groups", existing_type=mysql.JSON(), nullable=False
79
+ )
80
+ op.alter_column(
81
+ "datasets", "number_of_groups", existing_type=mysql.INTEGER(), nullable=False
82
+ )
83
+ op.alter_column(
84
+ "datasets", "test_size", existing_type=mysql.INTEGER(), nullable=False
85
+ )
86
+ op.alter_column(
87
+ "datasets", "train_size", existing_type=mysql.INTEGER(), nullable=False
88
+ )
89
+ # ### end Alembic commands ###
@@ -0,0 +1,116 @@
1
+ # A generic, single database configuration.
2
+
3
+ [alembic]
4
+ # path to migration scripts
5
+ script_location = alembic
6
+
7
+ # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
8
+ # Uncomment the line below if you want the files to be prepended with date and time
9
+ # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
10
+ # for all available tokens
11
+ file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
12
+
13
+ # sys.path path, will be prepended to sys.path if present.
14
+ # defaults to the current working directory.
15
+ prepend_sys_path = .
16
+
17
+ # timezone to use when rendering the date within the migration file
18
+ # as well as the filename.
19
+ # If specified, requires the python-dateutil library that can be
20
+ # installed by adding `alembic[tz]` to the pip requirements
21
+ # string value is passed to dateutil.tz.gettz()
22
+ # leave blank for localtime
23
+ # timezone =
24
+
25
+ # max length of characters to apply to the
26
+ # "slug" field
27
+ # truncate_slug_length = 40
28
+
29
+ # set to 'true' to run the environment during
30
+ # the 'revision' command, regardless of autogenerate
31
+ # revision_environment = false
32
+
33
+ # set to 'true' to allow .pyc and .pyo files without
34
+ # a source .py file to be detected as revisions in the
35
+ # versions/ directory
36
+ # sourceless = false
37
+
38
+ # version location specification; This defaults
39
+ # to src/db/alembic/versions. When using multiple version
40
+ # directories, initial revisions must be specified with --version-path.
41
+ # The path separator used here should be the separator specified by "version_path_separator" below.
42
+ # version_locations = %(here)s/bar:%(here)s/bat:src/db/alembic/versions
43
+
44
+ # version path separator; As mentioned above, this is the character used to split
45
+ # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46
+ # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47
+ # Valid values for version_path_separator are:
48
+ #
49
+ # version_path_separator = :
50
+ # version_path_separator = ;
51
+ # version_path_separator = space
52
+ version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
53
+
54
+ # set to 'true' to search source files recursively
55
+ # in each "version_locations" directory
56
+ # new in Alembic version 1.10
57
+ # recursive_version_locations = false
58
+
59
+ # the output encoding used when revision files
60
+ # are written from script.py.mako
61
+ # output_encoding = utf-8
62
+
63
+ sqlalchemy.url = %(DATABASE_URL)s
64
+
65
+
66
+ [post_write_hooks]
67
+ # post_write_hooks defines scripts or Python functions that are run
68
+ # on newly generated revision scripts. See the documentation for further
69
+ # detail and examples
70
+
71
+ # format using "black" - use the console_scripts runner, against the "black" entrypoint
72
+ # hooks = black
73
+ # black.type = console_scripts
74
+ # black.entrypoint = black
75
+ # black.options = -l 79 REVISION_SCRIPT_FILENAME
76
+
77
+ # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
78
+ # hooks = ruff
79
+ # ruff.type = exec
80
+ # ruff.executable = %(here)s/.venv/bin/ruff
81
+ # ruff.options = --fix REVISION_SCRIPT_FILENAME
82
+
83
+ # Logging configuration
84
+ [loggers]
85
+ keys = root,sqlalchemy,alembic
86
+
87
+ [handlers]
88
+ keys = console
89
+
90
+ [formatters]
91
+ keys = generic
92
+
93
+ [logger_root]
94
+ level = WARN
95
+ handlers = console
96
+ qualname =
97
+
98
+ [logger_sqlalchemy]
99
+ level = WARN
100
+ handlers =
101
+ qualname = sqlalchemy.engine
102
+
103
+ [logger_alembic]
104
+ level = INFO
105
+ handlers =
106
+ qualname = alembic
107
+
108
+ [handler_console]
109
+ class = StreamHandler
110
+ args = (sys.stderr,)
111
+ level = NOTSET
112
+ formatter = generic
113
+
114
+ [formatter_generic]
115
+ format = %(levelname)-5.5s [%(name)s] %(message)s
116
+ datefmt = %H:%M:%S
@@ -1,11 +1,11 @@
1
- from src.db.models.base import Base
1
+ from lecrapaud.db.models.base import Base
2
2
 
3
- from src.db.models.dataset import Dataset
4
- from src.db.models.feature_selection_rank import FeatureSelectionRank
5
- from src.db.models.feature_selection import FeatureSelection
6
- from src.db.models.feature import Feature
7
- from src.db.models.model_selection import ModelSelection
8
- from src.db.models.model_training import ModelTraining
9
- from src.db.models.model import Model
10
- from src.db.models.score import Score
11
- from src.db.models.target import Target
3
+ from lecrapaud.db.models.dataset import Dataset
4
+ from lecrapaud.db.models.feature_selection_rank import FeatureSelectionRank
5
+ from lecrapaud.db.models.feature_selection import FeatureSelection
6
+ from lecrapaud.db.models.feature import Feature
7
+ from lecrapaud.db.models.model_selection import ModelSelection
8
+ from lecrapaud.db.models.model_training import ModelTraining
9
+ from lecrapaud.db.models.model import Model
10
+ from lecrapaud.db.models.score import Score
11
+ from lecrapaud.db.models.target import Target