ins-pricing 0.4.5__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. ins_pricing/README.md +48 -22
  2. ins_pricing/__init__.py +142 -90
  3. ins_pricing/cli/BayesOpt_entry.py +58 -46
  4. ins_pricing/cli/BayesOpt_incremental.py +77 -110
  5. ins_pricing/cli/Explain_Run.py +42 -23
  6. ins_pricing/cli/Explain_entry.py +551 -577
  7. ins_pricing/cli/Pricing_Run.py +42 -23
  8. ins_pricing/cli/bayesopt_entry_runner.py +51 -16
  9. ins_pricing/cli/utils/bootstrap.py +23 -0
  10. ins_pricing/cli/utils/cli_common.py +256 -256
  11. ins_pricing/cli/utils/cli_config.py +379 -360
  12. ins_pricing/cli/utils/import_resolver.py +375 -358
  13. ins_pricing/cli/utils/notebook_utils.py +256 -242
  14. ins_pricing/cli/watchdog_run.py +216 -198
  15. ins_pricing/frontend/__init__.py +10 -10
  16. ins_pricing/frontend/app.py +132 -61
  17. ins_pricing/frontend/config_builder.py +33 -0
  18. ins_pricing/frontend/example_config.json +11 -0
  19. ins_pricing/frontend/example_workflows.py +1 -1
  20. ins_pricing/frontend/runner.py +340 -388
  21. ins_pricing/governance/__init__.py +20 -20
  22. ins_pricing/governance/release.py +159 -159
  23. ins_pricing/modelling/README.md +1 -1
  24. ins_pricing/modelling/__init__.py +147 -92
  25. ins_pricing/modelling/{core/bayesopt → bayesopt}/README.md +31 -13
  26. ins_pricing/modelling/{core/bayesopt → bayesopt}/__init__.py +64 -102
  27. ins_pricing/modelling/{core/bayesopt → bayesopt}/config_components.py +12 -0
  28. ins_pricing/modelling/{core/bayesopt → bayesopt}/config_preprocess.py +589 -552
  29. ins_pricing/modelling/{core/bayesopt → bayesopt}/core.py +987 -958
  30. ins_pricing/modelling/{core/bayesopt → bayesopt}/model_explain_mixin.py +296 -296
  31. ins_pricing/modelling/{core/bayesopt → bayesopt}/model_plotting_mixin.py +488 -548
  32. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/__init__.py +27 -27
  33. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_ft_components.py +349 -342
  34. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_ft_trainer.py +921 -913
  35. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_gnn.py +794 -785
  36. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_resn.py +454 -446
  37. ins_pricing/modelling/bayesopt/trainers/__init__.py +19 -0
  38. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_base.py +1294 -1282
  39. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_ft.py +64 -56
  40. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_glm.py +203 -198
  41. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_gnn.py +333 -325
  42. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_resn.py +279 -267
  43. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_xgb.py +515 -313
  44. ins_pricing/modelling/bayesopt/utils/__init__.py +67 -0
  45. ins_pricing/modelling/bayesopt/utils/constants.py +21 -0
  46. ins_pricing/modelling/{core/bayesopt → bayesopt}/utils/distributed_utils.py +193 -186
  47. ins_pricing/modelling/bayesopt/utils/io_utils.py +7 -0
  48. ins_pricing/modelling/bayesopt/utils/losses.py +27 -0
  49. ins_pricing/modelling/bayesopt/utils/metrics_and_devices.py +17 -0
  50. ins_pricing/modelling/{core/bayesopt → bayesopt}/utils/torch_trainer_mixin.py +636 -623
  51. ins_pricing/modelling/{core/evaluation.py → evaluation.py} +113 -104
  52. ins_pricing/modelling/explain/__init__.py +55 -55
  53. ins_pricing/modelling/explain/metrics.py +27 -174
  54. ins_pricing/modelling/explain/permutation.py +237 -237
  55. ins_pricing/modelling/plotting/__init__.py +40 -36
  56. ins_pricing/modelling/plotting/compat.py +228 -0
  57. ins_pricing/modelling/plotting/curves.py +572 -572
  58. ins_pricing/modelling/plotting/diagnostics.py +163 -163
  59. ins_pricing/modelling/plotting/geo.py +362 -362
  60. ins_pricing/modelling/plotting/importance.py +121 -121
  61. ins_pricing/pricing/__init__.py +27 -27
  62. ins_pricing/pricing/factors.py +67 -56
  63. ins_pricing/production/__init__.py +35 -25
  64. ins_pricing/production/{predict.py → inference.py} +140 -57
  65. ins_pricing/production/monitoring.py +8 -21
  66. ins_pricing/reporting/__init__.py +11 -11
  67. ins_pricing/setup.py +1 -1
  68. ins_pricing/tests/production/test_inference.py +90 -0
  69. ins_pricing/utils/__init__.py +112 -78
  70. ins_pricing/utils/device.py +258 -237
  71. ins_pricing/utils/features.py +53 -0
  72. ins_pricing/utils/io.py +72 -0
  73. ins_pricing/utils/logging.py +34 -1
  74. ins_pricing/{modelling/core/bayesopt/utils → utils}/losses.py +125 -129
  75. ins_pricing/utils/metrics.py +158 -24
  76. ins_pricing/utils/numerics.py +76 -0
  77. ins_pricing/utils/paths.py +9 -1
  78. ins_pricing/utils/profiling.py +8 -4
  79. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/METADATA +1 -1
  80. ins_pricing-0.5.1.dist-info/RECORD +132 -0
  81. ins_pricing/modelling/core/BayesOpt.py +0 -146
  82. ins_pricing/modelling/core/__init__.py +0 -1
  83. ins_pricing/modelling/core/bayesopt/trainers/__init__.py +0 -19
  84. ins_pricing/modelling/core/bayesopt/utils/__init__.py +0 -86
  85. ins_pricing/modelling/core/bayesopt/utils/constants.py +0 -183
  86. ins_pricing/modelling/core/bayesopt/utils/io_utils.py +0 -126
  87. ins_pricing/modelling/core/bayesopt/utils/metrics_and_devices.py +0 -555
  88. ins_pricing/modelling/core/bayesopt/utils.py +0 -105
  89. ins_pricing/modelling/core/bayesopt/utils_backup.py +0 -1503
  90. ins_pricing/tests/production/test_predict.py +0 -233
  91. ins_pricing-0.4.5.dist-info/RECORD +0 -130
  92. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/WHEEL +0 -0
  93. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/top_level.txt +0 -0
@@ -1,102 +1,64 @@
1
- """BayesOpt subpackage (split from monolithic BayesOpt.py)."""
2
-
3
- from __future__ import annotations
4
-
5
- import torch
6
-
7
- from ins_pricing.utils.torch_compat import disable_torch_dynamo_if_requested
8
-
9
- disable_torch_dynamo_if_requested()
10
-
11
- from .config_preprocess import (
12
- BayesOptConfig,
13
- DatasetPreprocessor,
14
- OutputManager,
15
- VersionManager,
16
- )
17
- from .core import BayesOptModel
18
- from .models import (
19
- FeatureTokenizer,
20
- FTTransformerCore,
21
- FTTransformerSklearn,
22
- GraphNeuralNetSklearn,
23
- MaskedTabularDataset,
24
- ResBlock,
25
- ResNetSequential,
26
- ResNetSklearn,
27
- ScaledTransformerEncoderLayer,
28
- SimpleGraphLayer,
29
- SimpleGNN,
30
- TabularDataset,
31
- )
32
- from .trainers import (
33
- FTTrainer,
34
- GLMTrainer,
35
- GNNTrainer,
36
- ResNetTrainer,
37
- TrainerBase,
38
- XGBTrainer,
39
- _xgb_cuda_available,
40
- )
41
- from .utils import (
42
- EPS,
43
- DistributedUtils,
44
- IOUtils,
45
- PlotUtils,
46
- TorchTrainerMixin,
47
- TrainingUtils,
48
- compute_batch_size,
49
- csv_to_dict,
50
- ensure_parent_dir,
51
- free_cuda,
52
- infer_factor_and_cate_list,
53
- plot_dlift_list,
54
- plot_lift_list,
55
- set_global_seed,
56
- split_data,
57
- tweedie_loss,
58
- )
59
-
60
- __all__ = [
61
- "BayesOptConfig",
62
- "DatasetPreprocessor",
63
- "OutputManager",
64
- "VersionManager",
65
- "BayesOptModel",
66
- "FeatureTokenizer",
67
- "FTTransformerCore",
68
- "FTTransformerSklearn",
69
- "GraphNeuralNetSklearn",
70
- "MaskedTabularDataset",
71
- "ResBlock",
72
- "ResNetSequential",
73
- "ResNetSklearn",
74
- "ScaledTransformerEncoderLayer",
75
- "SimpleGraphLayer",
76
- "SimpleGNN",
77
- "TabularDataset",
78
- "FTTrainer",
79
- "GLMTrainer",
80
- "GNNTrainer",
81
- "ResNetTrainer",
82
- "TrainerBase",
83
- "XGBTrainer",
84
- "_xgb_cuda_available",
85
- "EPS",
86
- "DistributedUtils",
87
- "IOUtils",
88
- "PlotUtils",
89
- "TorchTrainerMixin",
90
- "TrainingUtils",
91
- "compute_batch_size",
92
- "csv_to_dict",
93
- "ensure_parent_dir",
94
- "free_cuda",
95
- "infer_factor_and_cate_list",
96
- "plot_dlift_list",
97
- "plot_lift_list",
98
- "set_global_seed",
99
- "split_data",
100
- "tweedie_loss",
101
- "torch",
102
- ]
1
+ """BayesOpt subpackage (split from monolithic BayesOpt.py)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ins_pricing.utils.torch_compat import disable_torch_dynamo_if_requested
6
+
7
+ disable_torch_dynamo_if_requested()
8
+
9
+ from ins_pricing.modelling.bayesopt.config_preprocess import (
10
+ BayesOptConfig,
11
+ DatasetPreprocessor,
12
+ OutputManager,
13
+ VersionManager,
14
+ )
15
+ from ins_pricing.modelling.bayesopt.core import BayesOptModel
16
+ from ins_pricing.modelling.bayesopt.models import (
17
+ FeatureTokenizer,
18
+ FTTransformerCore,
19
+ FTTransformerSklearn,
20
+ GraphNeuralNetSklearn,
21
+ MaskedTabularDataset,
22
+ ResBlock,
23
+ ResNetSequential,
24
+ ResNetSklearn,
25
+ ScaledTransformerEncoderLayer,
26
+ SimpleGraphLayer,
27
+ SimpleGNN,
28
+ TabularDataset,
29
+ )
30
+ from ins_pricing.modelling.bayesopt.trainers import (
31
+ FTTrainer,
32
+ GLMTrainer,
33
+ GNNTrainer,
34
+ ResNetTrainer,
35
+ TrainerBase,
36
+ XGBTrainer,
37
+ _xgb_cuda_available,
38
+ )
39
+ __all__ = [
40
+ "BayesOptConfig",
41
+ "DatasetPreprocessor",
42
+ "OutputManager",
43
+ "VersionManager",
44
+ "BayesOptModel",
45
+ "FeatureTokenizer",
46
+ "FTTransformerCore",
47
+ "FTTransformerSklearn",
48
+ "GraphNeuralNetSklearn",
49
+ "MaskedTabularDataset",
50
+ "ResBlock",
51
+ "ResNetSequential",
52
+ "ResNetSklearn",
53
+ "ScaledTransformerEncoderLayer",
54
+ "SimpleGraphLayer",
55
+ "SimpleGNN",
56
+ "TabularDataset",
57
+ "FTTrainer",
58
+ "GLMTrainer",
59
+ "GNNTrainer",
60
+ "ResNetTrainer",
61
+ "TrainerBase",
62
+ "XGBTrainer",
63
+ "_xgb_cuda_available",
64
+ ]
@@ -178,10 +178,18 @@ class XGBoostConfig:
178
178
  Attributes:
179
179
  max_depth_max: Maximum tree depth for hyperparameter tuning
180
180
  n_estimators_max: Maximum number of estimators for tuning
181
+ gpu_id: GPU device id for XGBoost (None = default)
182
+ cleanup_per_fold: Whether to cleanup GPU memory after each fold
183
+ cleanup_synchronize: Whether to synchronize CUDA during cleanup
184
+ use_dmatrix: Whether to use xgb.train with DMatrix/QuantileDMatrix
181
185
  """
182
186
 
183
187
  max_depth_max: int = 25
184
188
  n_estimators_max: int = 500
189
+ gpu_id: Optional[int] = None
190
+ cleanup_per_fold: bool = False
191
+ cleanup_synchronize: bool = False
192
+ use_dmatrix: bool = True
185
193
 
186
194
  @classmethod
187
195
  def from_flat_dict(cls, d: Dict[str, Any]) -> "XGBoostConfig":
@@ -189,6 +197,10 @@ class XGBoostConfig:
189
197
  return cls(
190
198
  max_depth_max=int(d.get("xgb_max_depth_max", 25)),
191
199
  n_estimators_max=int(d.get("xgb_n_estimators_max", 500)),
200
+ gpu_id=d.get("xgb_gpu_id"),
201
+ cleanup_per_fold=bool(d.get("xgb_cleanup_per_fold", False)),
202
+ cleanup_synchronize=bool(d.get("xgb_cleanup_synchronize", False)),
203
+ use_dmatrix=bool(d.get("xgb_use_dmatrix", True)),
192
204
  )
193
205
 
194
206