ins-pricing 0.4.5__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. ins_pricing/README.md +48 -22
  2. ins_pricing/__init__.py +142 -90
  3. ins_pricing/cli/BayesOpt_entry.py +58 -46
  4. ins_pricing/cli/BayesOpt_incremental.py +77 -110
  5. ins_pricing/cli/Explain_Run.py +42 -23
  6. ins_pricing/cli/Explain_entry.py +551 -577
  7. ins_pricing/cli/Pricing_Run.py +42 -23
  8. ins_pricing/cli/bayesopt_entry_runner.py +51 -16
  9. ins_pricing/cli/utils/bootstrap.py +23 -0
  10. ins_pricing/cli/utils/cli_common.py +256 -256
  11. ins_pricing/cli/utils/cli_config.py +379 -360
  12. ins_pricing/cli/utils/import_resolver.py +375 -358
  13. ins_pricing/cli/utils/notebook_utils.py +256 -242
  14. ins_pricing/cli/watchdog_run.py +216 -198
  15. ins_pricing/frontend/__init__.py +10 -10
  16. ins_pricing/frontend/app.py +132 -61
  17. ins_pricing/frontend/config_builder.py +33 -0
  18. ins_pricing/frontend/example_config.json +11 -0
  19. ins_pricing/frontend/example_workflows.py +1 -1
  20. ins_pricing/frontend/runner.py +340 -388
  21. ins_pricing/governance/__init__.py +20 -20
  22. ins_pricing/governance/release.py +159 -159
  23. ins_pricing/modelling/README.md +1 -1
  24. ins_pricing/modelling/__init__.py +147 -92
  25. ins_pricing/modelling/{core/bayesopt → bayesopt}/README.md +31 -13
  26. ins_pricing/modelling/{core/bayesopt → bayesopt}/__init__.py +64 -102
  27. ins_pricing/modelling/{core/bayesopt → bayesopt}/config_components.py +12 -0
  28. ins_pricing/modelling/{core/bayesopt → bayesopt}/config_preprocess.py +589 -552
  29. ins_pricing/modelling/{core/bayesopt → bayesopt}/core.py +987 -958
  30. ins_pricing/modelling/{core/bayesopt → bayesopt}/model_explain_mixin.py +296 -296
  31. ins_pricing/modelling/{core/bayesopt → bayesopt}/model_plotting_mixin.py +488 -548
  32. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/__init__.py +27 -27
  33. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_ft_components.py +349 -342
  34. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_ft_trainer.py +921 -913
  35. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_gnn.py +794 -785
  36. ins_pricing/modelling/{core/bayesopt → bayesopt}/models/model_resn.py +454 -446
  37. ins_pricing/modelling/bayesopt/trainers/__init__.py +19 -0
  38. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_base.py +1294 -1282
  39. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_ft.py +64 -56
  40. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_glm.py +203 -198
  41. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_gnn.py +333 -325
  42. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_resn.py +279 -267
  43. ins_pricing/modelling/{core/bayesopt → bayesopt}/trainers/trainer_xgb.py +515 -313
  44. ins_pricing/modelling/bayesopt/utils/__init__.py +67 -0
  45. ins_pricing/modelling/bayesopt/utils/constants.py +21 -0
  46. ins_pricing/modelling/{core/bayesopt → bayesopt}/utils/distributed_utils.py +193 -186
  47. ins_pricing/modelling/bayesopt/utils/io_utils.py +7 -0
  48. ins_pricing/modelling/bayesopt/utils/losses.py +27 -0
  49. ins_pricing/modelling/bayesopt/utils/metrics_and_devices.py +17 -0
  50. ins_pricing/modelling/{core/bayesopt → bayesopt}/utils/torch_trainer_mixin.py +636 -623
  51. ins_pricing/modelling/{core/evaluation.py → evaluation.py} +113 -104
  52. ins_pricing/modelling/explain/__init__.py +55 -55
  53. ins_pricing/modelling/explain/metrics.py +27 -174
  54. ins_pricing/modelling/explain/permutation.py +237 -237
  55. ins_pricing/modelling/plotting/__init__.py +40 -36
  56. ins_pricing/modelling/plotting/compat.py +228 -0
  57. ins_pricing/modelling/plotting/curves.py +572 -572
  58. ins_pricing/modelling/plotting/diagnostics.py +163 -163
  59. ins_pricing/modelling/plotting/geo.py +362 -362
  60. ins_pricing/modelling/plotting/importance.py +121 -121
  61. ins_pricing/pricing/__init__.py +27 -27
  62. ins_pricing/pricing/factors.py +67 -56
  63. ins_pricing/production/__init__.py +35 -25
  64. ins_pricing/production/{predict.py → inference.py} +140 -57
  65. ins_pricing/production/monitoring.py +8 -21
  66. ins_pricing/reporting/__init__.py +11 -11
  67. ins_pricing/setup.py +1 -1
  68. ins_pricing/tests/production/test_inference.py +90 -0
  69. ins_pricing/utils/__init__.py +112 -78
  70. ins_pricing/utils/device.py +258 -237
  71. ins_pricing/utils/features.py +53 -0
  72. ins_pricing/utils/io.py +72 -0
  73. ins_pricing/utils/logging.py +34 -1
  74. ins_pricing/{modelling/core/bayesopt/utils → utils}/losses.py +125 -129
  75. ins_pricing/utils/metrics.py +158 -24
  76. ins_pricing/utils/numerics.py +76 -0
  77. ins_pricing/utils/paths.py +9 -1
  78. ins_pricing/utils/profiling.py +8 -4
  79. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/METADATA +1 -1
  80. ins_pricing-0.5.1.dist-info/RECORD +132 -0
  81. ins_pricing/modelling/core/BayesOpt.py +0 -146
  82. ins_pricing/modelling/core/__init__.py +0 -1
  83. ins_pricing/modelling/core/bayesopt/trainers/__init__.py +0 -19
  84. ins_pricing/modelling/core/bayesopt/utils/__init__.py +0 -86
  85. ins_pricing/modelling/core/bayesopt/utils/constants.py +0 -183
  86. ins_pricing/modelling/core/bayesopt/utils/io_utils.py +0 -126
  87. ins_pricing/modelling/core/bayesopt/utils/metrics_and_devices.py +0 -555
  88. ins_pricing/modelling/core/bayesopt/utils.py +0 -105
  89. ins_pricing/modelling/core/bayesopt/utils_backup.py +0 -1503
  90. ins_pricing/tests/production/test_predict.py +0 -233
  91. ins_pricing-0.4.5.dist-info/RECORD +0 -130
  92. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/WHEEL +0 -0
  93. {ins_pricing-0.4.5.dist-info → ins_pricing-0.5.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,67 @@
1
+ """Backward compatibility re-exports for bayesopt utilities.
2
+
3
+ This module keeps legacy imports working while routing general helpers
4
+ through ins_pricing.utils and leaving bayesopt-specific utilities in place.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ # Constants and simple utilities
10
+ from ins_pricing.modelling.bayesopt.utils.constants import (
11
+ EPS,
12
+ set_global_seed,
13
+ ensure_parent_dir,
14
+ compute_batch_size,
15
+ tweedie_loss,
16
+ infer_factor_and_cate_list,
17
+ )
18
+
19
+ # I/O utilities
20
+ from ins_pricing.modelling.bayesopt.utils.io_utils import (
21
+ IOUtils,
22
+ csv_to_dict,
23
+ )
24
+
25
+ # Distributed training
26
+ from ins_pricing.modelling.bayesopt.utils.distributed_utils import (
27
+ DistributedUtils,
28
+ TrainingUtils,
29
+ free_cuda,
30
+ )
31
+
32
+ # PyTorch training mixin
33
+ from ins_pricing.modelling.bayesopt.utils.torch_trainer_mixin import (
34
+ TorchTrainerMixin,
35
+ )
36
+
37
+ # Metrics and device helpers (shared utilities)
38
+ from ins_pricing.modelling.bayesopt.utils.metrics_and_devices import (
39
+ get_logger,
40
+ MetricFactory,
41
+ GPUMemoryManager,
42
+ DeviceManager,
43
+ )
44
+
45
+ __all__ = [
46
+ # Constants
47
+ 'EPS',
48
+ 'set_global_seed',
49
+ 'ensure_parent_dir',
50
+ 'compute_batch_size',
51
+ 'tweedie_loss',
52
+ 'infer_factor_and_cate_list',
53
+ # I/O
54
+ 'IOUtils',
55
+ 'csv_to_dict',
56
+ # Distributed
57
+ 'DistributedUtils',
58
+ 'TrainingUtils',
59
+ 'free_cuda',
60
+ # PyTorch
61
+ 'TorchTrainerMixin',
62
+ # Utilities
63
+ 'get_logger',
64
+ 'MetricFactory',
65
+ 'GPUMemoryManager',
66
+ 'DeviceManager',
67
+ ]
@@ -0,0 +1,21 @@
1
+ """Backward-compatible re-exports for numerical utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ins_pricing.utils.features import infer_factor_and_cate_list
6
+ from ins_pricing.utils.io import ensure_parent_dir
7
+ from ins_pricing.utils.numerics import (
8
+ EPS,
9
+ compute_batch_size,
10
+ set_global_seed,
11
+ tweedie_loss,
12
+ )
13
+
14
+ __all__ = [
15
+ "EPS",
16
+ "set_global_seed",
17
+ "ensure_parent_dir",
18
+ "compute_batch_size",
19
+ "tweedie_loss",
20
+ "infer_factor_and_cate_list",
21
+ ]
@@ -1,186 +1,193 @@
1
- """Distributed training utilities for PyTorch DDP.
2
-
3
- This module contains:
4
- - DistributedUtils for DDP setup and process coordination
5
- - TrainingUtils for CUDA memory management
6
- - free_cuda() for legacy compatibility
7
- """
8
-
9
- from __future__ import annotations
10
-
11
- import gc
12
- import os
13
- from datetime import timedelta
14
- from typing import Optional
15
-
16
- import torch
17
- import torch.distributed as dist
18
-
19
-
20
- def _select_ddp_backend() -> str:
21
- """Select the appropriate DDP backend based on system capabilities.
22
-
23
- Returns:
24
- "nccl" if CUDA is available and NCCL is supported (non-Windows),
25
- otherwise "gloo"
26
- """
27
- if not torch.cuda.is_available():
28
- return "gloo"
29
-
30
- if os.name == "nt": # Windows doesn't support NCCL
31
- return "gloo"
32
-
33
- try:
34
- nccl_available = getattr(dist, "is_nccl_available", lambda: False)()
35
- return "nccl" if nccl_available else "gloo"
36
- except Exception:
37
- return "gloo"
38
-
39
-
40
- def _get_ddp_timeout() -> timedelta:
41
- """Get the DDP timeout from environment variable.
42
-
43
- Returns:
44
- timedelta for DDP timeout (default: 1800 seconds)
45
- """
46
- timeout_seconds = int(os.environ.get("BAYESOPT_DDP_TIMEOUT_SECONDS", "1800"))
47
- return timedelta(seconds=max(1, timeout_seconds))
48
-
49
-
50
- def _cache_ddp_state(local_rank: int, rank: int, world_size: int) -> tuple:
51
- """Cache and return DDP state tuple."""
52
- state = (True, local_rank, rank, world_size)
53
- DistributedUtils._cached_state = state
54
- return state
55
-
56
-
57
- class DistributedUtils:
58
- """Utilities for distributed data parallel training.
59
-
60
- This class provides methods for:
61
- - Initializing DDP process groups
62
- - Checking process rank and world size
63
- - Cleanup after distributed training
64
- """
65
-
66
- _cached_state: Optional[tuple] = None
67
-
68
- @staticmethod
69
- def setup_ddp():
70
- """Initialize the DDP process group for distributed training.
71
-
72
- Returns:
73
- Tuple of (success, local_rank, rank, world_size)
74
- """
75
- # Return cached state if already initialized
76
- if dist.is_initialized():
77
- if DistributedUtils._cached_state is None:
78
- DistributedUtils._cached_state = _cache_ddp_state(
79
- int(os.environ.get("LOCAL_RANK", 0)),
80
- dist.get_rank(),
81
- dist.get_world_size(),
82
- )
83
- return DistributedUtils._cached_state
84
-
85
- # Check for required environment variables
86
- if 'RANK' not in os.environ or 'WORLD_SIZE' not in os.environ:
87
- print(
88
- f">>> DDP Setup Failed: RANK or WORLD_SIZE not found in env. "
89
- f"Keys found: {list(os.environ.keys())}"
90
- )
91
- print(">>> Hint: launch with torchrun --nproc_per_node=<N> <script.py>")
92
- return False, 0, 0, 1
93
-
94
- rank = int(os.environ["RANK"])
95
- world_size = int(os.environ["WORLD_SIZE"])
96
- local_rank = int(os.environ.get("LOCAL_RANK", 0))
97
-
98
- # Windows CUDA DDP is not supported
99
- if os.name == "nt" and torch.cuda.is_available() and world_size > 1:
100
- print(
101
- ">>> DDP Setup Disabled: Windows CUDA DDP is not supported. "
102
- "Falling back to single process."
103
- )
104
- return False, 0, 0, 1
105
-
106
- # Set CUDA device for this process
107
- if torch.cuda.is_available():
108
- torch.cuda.set_device(local_rank)
109
-
110
- # Initialize process group
111
- backend = _select_ddp_backend()
112
- timeout = _get_ddp_timeout()
113
-
114
- dist.init_process_group(backend=backend, init_method="env://", timeout=timeout)
115
- print(
116
- f">>> DDP Initialized ({backend}, timeout={timeout.total_seconds():.0f}s): "
117
- f"Rank {rank}/{world_size}, Local Rank {local_rank}"
118
- )
119
-
120
- return _cache_ddp_state(local_rank, rank, world_size)
121
-
122
- @staticmethod
123
- def cleanup_ddp():
124
- """Destroy the DDP process group and clear cached state."""
125
- if dist.is_initialized():
126
- dist.destroy_process_group()
127
- DistributedUtils._cached_state = None
128
-
129
- @staticmethod
130
- def is_main_process():
131
- """Check if current process is rank 0 (main process).
132
-
133
- Returns:
134
- True if main process or DDP not initialized
135
- """
136
- return not dist.is_initialized() or dist.get_rank() == 0
137
-
138
- @staticmethod
139
- def world_size() -> int:
140
- """Get the total number of processes in the distributed group.
141
-
142
- Returns:
143
- World size (1 if DDP not initialized)
144
- """
145
- return dist.get_world_size() if dist.is_initialized() else 1
146
-
147
-
148
- class TrainingUtils:
149
- """General training utilities including CUDA management."""
150
-
151
- @staticmethod
152
- def free_cuda() -> None:
153
- """Release CUDA memory and clear cache.
154
-
155
- This performs aggressive cleanup:
156
- 1. Move all PyTorch models to CPU
157
- 2. Run garbage collection
158
- 3. Clear CUDA cache
159
- """
160
- print(">>> Moving all models to CPU...")
161
- for obj in gc.get_objects():
162
- try:
163
- if hasattr(obj, "to") and callable(obj.to):
164
- obj.to("cpu")
165
- except Exception:
166
- pass
167
-
168
- print(">>> Releasing tensor/optimizer/DataLoader references...")
169
- gc.collect()
170
-
171
- print(">>> Clearing CUDA cache...")
172
- if torch.cuda.is_available():
173
- torch.cuda.empty_cache()
174
- torch.cuda.synchronize()
175
- print(">>> CUDA memory released.")
176
- else:
177
- print(">>> CUDA not available; cleanup skipped.")
178
-
179
-
180
- # Backward compatibility function wrapper
181
- def free_cuda():
182
- """Legacy function wrapper for CUDA memory cleanup.
183
-
184
- This function calls TrainingUtils.free_cuda() for backward compatibility.
185
- """
186
- TrainingUtils.free_cuda()
1
+ """Distributed training utilities for PyTorch DDP.
2
+
3
+ This module contains:
4
+ - DistributedUtils for DDP setup and process coordination
5
+ - TrainingUtils for CUDA memory management
6
+ - free_cuda() for legacy compatibility
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import gc
12
+ import os
13
+ from datetime import timedelta
14
+ from typing import Optional
15
+
16
+ import torch
17
+ import torch.distributed as dist
18
+ from ins_pricing.utils import get_logger, log_print
19
+
20
+ _logger = get_logger("ins_pricing.modelling.bayesopt.utils.distributed_utils")
21
+
22
+
23
+ def _log(*args, **kwargs) -> None:
24
+ log_print(_logger, *args, **kwargs)
25
+
26
+
27
+ def _select_ddp_backend() -> str:
28
+ """Select the appropriate DDP backend based on system capabilities.
29
+
30
+ Returns:
31
+ "nccl" if CUDA is available and NCCL is supported (non-Windows),
32
+ otherwise "gloo"
33
+ """
34
+ if not torch.cuda.is_available():
35
+ return "gloo"
36
+
37
+ if os.name == "nt": # Windows doesn't support NCCL
38
+ return "gloo"
39
+
40
+ try:
41
+ nccl_available = getattr(dist, "is_nccl_available", lambda: False)()
42
+ return "nccl" if nccl_available else "gloo"
43
+ except Exception:
44
+ return "gloo"
45
+
46
+
47
+ def _get_ddp_timeout() -> timedelta:
48
+ """Get the DDP timeout from environment variable.
49
+
50
+ Returns:
51
+ timedelta for DDP timeout (default: 1800 seconds)
52
+ """
53
+ timeout_seconds = int(os.environ.get("BAYESOPT_DDP_TIMEOUT_SECONDS", "1800"))
54
+ return timedelta(seconds=max(1, timeout_seconds))
55
+
56
+
57
+ def _cache_ddp_state(local_rank: int, rank: int, world_size: int) -> tuple:
58
+ """Cache and return DDP state tuple."""
59
+ state = (True, local_rank, rank, world_size)
60
+ DistributedUtils._cached_state = state
61
+ return state
62
+
63
+
64
+ class DistributedUtils:
65
+ """Utilities for distributed data parallel training.
66
+
67
+ This class provides methods for:
68
+ - Initializing DDP process groups
69
+ - Checking process rank and world size
70
+ - Cleanup after distributed training
71
+ """
72
+
73
+ _cached_state: Optional[tuple] = None
74
+
75
+ @staticmethod
76
+ def setup_ddp():
77
+ """Initialize the DDP process group for distributed training.
78
+
79
+ Returns:
80
+ Tuple of (success, local_rank, rank, world_size)
81
+ """
82
+ # Return cached state if already initialized
83
+ if dist.is_initialized():
84
+ if DistributedUtils._cached_state is None:
85
+ DistributedUtils._cached_state = _cache_ddp_state(
86
+ int(os.environ.get("LOCAL_RANK", 0)),
87
+ dist.get_rank(),
88
+ dist.get_world_size(),
89
+ )
90
+ return DistributedUtils._cached_state
91
+
92
+ # Check for required environment variables
93
+ if 'RANK' not in os.environ or 'WORLD_SIZE' not in os.environ:
94
+ _log(
95
+ f">>> DDP Setup Failed: RANK or WORLD_SIZE not found in env. "
96
+ f"Keys found: {list(os.environ.keys())}"
97
+ )
98
+ _log(">>> Hint: launch with torchrun --nproc_per_node=<N> <script.py>")
99
+ return False, 0, 0, 1
100
+
101
+ rank = int(os.environ["RANK"])
102
+ world_size = int(os.environ["WORLD_SIZE"])
103
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
104
+
105
+ # Windows CUDA DDP is not supported
106
+ if os.name == "nt" and torch.cuda.is_available() and world_size > 1:
107
+ _log(
108
+ ">>> DDP Setup Disabled: Windows CUDA DDP is not supported. "
109
+ "Falling back to single process."
110
+ )
111
+ return False, 0, 0, 1
112
+
113
+ # Set CUDA device for this process
114
+ if torch.cuda.is_available():
115
+ torch.cuda.set_device(local_rank)
116
+
117
+ # Initialize process group
118
+ backend = _select_ddp_backend()
119
+ timeout = _get_ddp_timeout()
120
+
121
+ dist.init_process_group(backend=backend, init_method="env://", timeout=timeout)
122
+ _log(
123
+ f">>> DDP Initialized ({backend}, timeout={timeout.total_seconds():.0f}s): "
124
+ f"Rank {rank}/{world_size}, Local Rank {local_rank}"
125
+ )
126
+
127
+ return _cache_ddp_state(local_rank, rank, world_size)
128
+
129
+ @staticmethod
130
+ def cleanup_ddp():
131
+ """Destroy the DDP process group and clear cached state."""
132
+ if dist.is_initialized():
133
+ dist.destroy_process_group()
134
+ DistributedUtils._cached_state = None
135
+
136
+ @staticmethod
137
+ def is_main_process():
138
+ """Check if current process is rank 0 (main process).
139
+
140
+ Returns:
141
+ True if main process or DDP not initialized
142
+ """
143
+ return not dist.is_initialized() or dist.get_rank() == 0
144
+
145
+ @staticmethod
146
+ def world_size() -> int:
147
+ """Get the total number of processes in the distributed group.
148
+
149
+ Returns:
150
+ World size (1 if DDP not initialized)
151
+ """
152
+ return dist.get_world_size() if dist.is_initialized() else 1
153
+
154
+
155
+ class TrainingUtils:
156
+ """General training utilities including CUDA management."""
157
+
158
+ @staticmethod
159
+ def free_cuda() -> None:
160
+ """Release CUDA memory and clear cache.
161
+
162
+ This performs aggressive cleanup:
163
+ 1. Move all PyTorch models to CPU
164
+ 2. Run garbage collection
165
+ 3. Clear CUDA cache
166
+ """
167
+ _log(">>> Moving all models to CPU...")
168
+ for obj in gc.get_objects():
169
+ try:
170
+ if hasattr(obj, "to") and callable(obj.to):
171
+ obj.to("cpu")
172
+ except Exception:
173
+ pass
174
+
175
+ _log(">>> Releasing tensor/optimizer/DataLoader references...")
176
+ gc.collect()
177
+
178
+ _log(">>> Clearing CUDA cache...")
179
+ if torch.cuda.is_available():
180
+ torch.cuda.empty_cache()
181
+ torch.cuda.synchronize()
182
+ _log(">>> CUDA memory released.")
183
+ else:
184
+ _log(">>> CUDA not available; cleanup skipped.")
185
+
186
+
187
+ # Backward compatibility function wrapper
188
+ def free_cuda():
189
+ """Legacy function wrapper for CUDA memory cleanup.
190
+
191
+ This function calls TrainingUtils.free_cuda() for backward compatibility.
192
+ """
193
+ TrainingUtils.free_cuda()
@@ -0,0 +1,7 @@
1
+ """Backward-compatible re-exports for IO utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ins_pricing.utils.io import IOUtils, csv_to_dict, ensure_parent_dir
6
+
7
+ __all__ = ["IOUtils", "csv_to_dict", "ensure_parent_dir"]
@@ -0,0 +1,27 @@
1
+ """Backward-compatible re-exports for loss utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ins_pricing.utils.losses import (
6
+ CLASSIFICATION_LOSSES,
7
+ LOSS_ALIASES,
8
+ REGRESSION_LOSSES,
9
+ infer_loss_name_from_model_name,
10
+ loss_requires_positive,
11
+ normalize_loss_name,
12
+ regression_loss,
13
+ resolve_tweedie_power,
14
+ resolve_xgb_objective,
15
+ )
16
+
17
+ __all__ = [
18
+ "LOSS_ALIASES",
19
+ "REGRESSION_LOSSES",
20
+ "CLASSIFICATION_LOSSES",
21
+ "normalize_loss_name",
22
+ "infer_loss_name_from_model_name",
23
+ "resolve_tweedie_power",
24
+ "resolve_xgb_objective",
25
+ "regression_loss",
26
+ "loss_requires_positive",
27
+ ]
@@ -0,0 +1,17 @@
1
+ """Backward-compatible re-exports for metrics and device utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from ins_pricing.utils import (
6
+ DeviceManager,
7
+ GPUMemoryManager,
8
+ MetricFactory,
9
+ get_logger,
10
+ )
11
+
12
+ __all__ = [
13
+ "get_logger",
14
+ "MetricFactory",
15
+ "GPUMemoryManager",
16
+ "DeviceManager",
17
+ ]