ins-pricing 0.1.11__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ins_pricing/README.md +9 -6
- ins_pricing/__init__.py +3 -11
- ins_pricing/cli/BayesOpt_entry.py +24 -0
- ins_pricing/{modelling → cli}/BayesOpt_incremental.py +197 -64
- ins_pricing/cli/Explain_Run.py +25 -0
- ins_pricing/{modelling → cli}/Explain_entry.py +169 -124
- ins_pricing/cli/Pricing_Run.py +25 -0
- ins_pricing/cli/__init__.py +1 -0
- ins_pricing/cli/bayesopt_entry_runner.py +1312 -0
- ins_pricing/cli/utils/__init__.py +1 -0
- ins_pricing/cli/utils/cli_common.py +320 -0
- ins_pricing/cli/utils/cli_config.py +375 -0
- ins_pricing/{modelling → cli/utils}/notebook_utils.py +74 -19
- {ins_pricing_gemini/modelling → ins_pricing/cli}/watchdog_run.py +2 -2
- ins_pricing/{modelling → docs/modelling}/BayesOpt_USAGE.md +69 -49
- ins_pricing/docs/modelling/README.md +34 -0
- ins_pricing/modelling/__init__.py +57 -6
- ins_pricing/modelling/core/__init__.py +1 -0
- ins_pricing/modelling/{bayesopt → core/bayesopt}/config_preprocess.py +64 -1
- ins_pricing/modelling/{bayesopt → core/bayesopt}/core.py +150 -810
- ins_pricing/modelling/core/bayesopt/model_explain_mixin.py +296 -0
- ins_pricing/modelling/core/bayesopt/model_plotting_mixin.py +548 -0
- ins_pricing/modelling/core/bayesopt/models/__init__.py +27 -0
- ins_pricing/modelling/core/bayesopt/models/model_ft_components.py +316 -0
- ins_pricing/modelling/core/bayesopt/models/model_ft_trainer.py +808 -0
- ins_pricing/modelling/core/bayesopt/models/model_gnn.py +675 -0
- ins_pricing/modelling/core/bayesopt/models/model_resn.py +435 -0
- ins_pricing/modelling/core/bayesopt/trainers/__init__.py +19 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_base.py +1020 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_ft.py +787 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_glm.py +195 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_gnn.py +312 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_resn.py +261 -0
- ins_pricing/modelling/core/bayesopt/trainers/trainer_xgb.py +348 -0
- ins_pricing/modelling/{bayesopt → core/bayesopt}/utils.py +2 -2
- ins_pricing/modelling/core/evaluation.py +115 -0
- ins_pricing/production/__init__.py +4 -0
- ins_pricing/production/preprocess.py +71 -0
- ins_pricing/setup.py +10 -5
- {ins_pricing_gemini/modelling/tests → ins_pricing/tests/modelling}/test_plotting.py +2 -2
- {ins_pricing-0.1.11.dist-info → ins_pricing-0.2.0.dist-info}/METADATA +4 -4
- ins_pricing-0.2.0.dist-info/RECORD +125 -0
- {ins_pricing-0.1.11.dist-info → ins_pricing-0.2.0.dist-info}/top_level.txt +0 -1
- ins_pricing/modelling/BayesOpt_entry.py +0 -633
- ins_pricing/modelling/Explain_Run.py +0 -36
- ins_pricing/modelling/Pricing_Run.py +0 -36
- ins_pricing/modelling/README.md +0 -33
- ins_pricing/modelling/bayesopt/models.py +0 -2196
- ins_pricing/modelling/bayesopt/trainers.py +0 -2446
- ins_pricing/modelling/cli_common.py +0 -136
- ins_pricing/modelling/tests/test_plotting.py +0 -63
- ins_pricing/modelling/watchdog_run.py +0 -211
- ins_pricing-0.1.11.dist-info/RECORD +0 -169
- ins_pricing_gemini/__init__.py +0 -23
- ins_pricing_gemini/governance/__init__.py +0 -20
- ins_pricing_gemini/governance/approval.py +0 -93
- ins_pricing_gemini/governance/audit.py +0 -37
- ins_pricing_gemini/governance/registry.py +0 -99
- ins_pricing_gemini/governance/release.py +0 -159
- ins_pricing_gemini/modelling/Explain_Run.py +0 -36
- ins_pricing_gemini/modelling/Pricing_Run.py +0 -36
- ins_pricing_gemini/modelling/__init__.py +0 -151
- ins_pricing_gemini/modelling/cli_common.py +0 -141
- ins_pricing_gemini/modelling/config.py +0 -249
- ins_pricing_gemini/modelling/config_preprocess.py +0 -254
- ins_pricing_gemini/modelling/core.py +0 -741
- ins_pricing_gemini/modelling/data_container.py +0 -42
- ins_pricing_gemini/modelling/explain/__init__.py +0 -55
- ins_pricing_gemini/modelling/explain/gradients.py +0 -334
- ins_pricing_gemini/modelling/explain/metrics.py +0 -176
- ins_pricing_gemini/modelling/explain/permutation.py +0 -155
- ins_pricing_gemini/modelling/explain/shap_utils.py +0 -146
- ins_pricing_gemini/modelling/features.py +0 -215
- ins_pricing_gemini/modelling/model_manager.py +0 -148
- ins_pricing_gemini/modelling/model_plotting.py +0 -463
- ins_pricing_gemini/modelling/models.py +0 -2203
- ins_pricing_gemini/modelling/notebook_utils.py +0 -294
- ins_pricing_gemini/modelling/plotting/__init__.py +0 -45
- ins_pricing_gemini/modelling/plotting/common.py +0 -63
- ins_pricing_gemini/modelling/plotting/curves.py +0 -572
- ins_pricing_gemini/modelling/plotting/diagnostics.py +0 -139
- ins_pricing_gemini/modelling/plotting/geo.py +0 -362
- ins_pricing_gemini/modelling/plotting/importance.py +0 -121
- ins_pricing_gemini/modelling/run_logging.py +0 -133
- ins_pricing_gemini/modelling/tests/conftest.py +0 -8
- ins_pricing_gemini/modelling/tests/test_cross_val_generic.py +0 -66
- ins_pricing_gemini/modelling/tests/test_distributed_utils.py +0 -18
- ins_pricing_gemini/modelling/tests/test_explain.py +0 -56
- ins_pricing_gemini/modelling/tests/test_geo_tokens_split.py +0 -49
- ins_pricing_gemini/modelling/tests/test_graph_cache.py +0 -33
- ins_pricing_gemini/modelling/tests/test_plotting_library.py +0 -150
- ins_pricing_gemini/modelling/tests/test_preprocessor.py +0 -48
- ins_pricing_gemini/modelling/trainers.py +0 -2447
- ins_pricing_gemini/modelling/utils.py +0 -1020
- ins_pricing_gemini/pricing/__init__.py +0 -27
- ins_pricing_gemini/pricing/calibration.py +0 -39
- ins_pricing_gemini/pricing/data_quality.py +0 -117
- ins_pricing_gemini/pricing/exposure.py +0 -85
- ins_pricing_gemini/pricing/factors.py +0 -91
- ins_pricing_gemini/pricing/monitoring.py +0 -99
- ins_pricing_gemini/pricing/rate_table.py +0 -78
- ins_pricing_gemini/production/__init__.py +0 -21
- ins_pricing_gemini/production/drift.py +0 -30
- ins_pricing_gemini/production/monitoring.py +0 -143
- ins_pricing_gemini/production/scoring.py +0 -40
- ins_pricing_gemini/reporting/__init__.py +0 -11
- ins_pricing_gemini/reporting/report_builder.py +0 -72
- ins_pricing_gemini/reporting/scheduler.py +0 -45
- ins_pricing_gemini/scripts/BayesOpt_incremental.py +0 -722
- ins_pricing_gemini/scripts/Explain_entry.py +0 -545
- ins_pricing_gemini/scripts/__init__.py +0 -1
- ins_pricing_gemini/scripts/train.py +0 -568
- ins_pricing_gemini/setup.py +0 -55
- ins_pricing_gemini/smoke_test.py +0 -28
- /ins_pricing/{modelling → cli/utils}/run_logging.py +0 -0
- /ins_pricing/modelling/{BayesOpt.py → core/BayesOpt.py} +0 -0
- /ins_pricing/modelling/{bayesopt → core/bayesopt}/__init__.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/conftest.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_cross_val_generic.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_distributed_utils.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_explain.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_geo_tokens_split.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_graph_cache.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_plotting_library.py +0 -0
- /ins_pricing/{modelling/tests → tests/modelling}/test_preprocessor.py +0 -0
- {ins_pricing-0.1.11.dist-info → ins_pricing-0.2.0.dist-info}/WHEEL +0 -0
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from dataclasses import asdict, dataclass, field
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import List, Optional
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@dataclass
|
|
11
|
-
class ApprovalAction:
|
|
12
|
-
actor: str
|
|
13
|
-
decision: str
|
|
14
|
-
timestamp: str
|
|
15
|
-
comment: Optional[str] = None
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@dataclass
|
|
19
|
-
class ApprovalRequest:
|
|
20
|
-
model_name: str
|
|
21
|
-
model_version: str
|
|
22
|
-
requested_by: str
|
|
23
|
-
requested_at: str
|
|
24
|
-
status: str = "pending"
|
|
25
|
-
actions: List[ApprovalAction] = field(default_factory=list)
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class ApprovalStore:
|
|
29
|
-
"""Simple approval workflow stored as JSON."""
|
|
30
|
-
|
|
31
|
-
def __init__(self, store_path: str | Path):
|
|
32
|
-
self.store_path = Path(store_path)
|
|
33
|
-
self.store_path.parent.mkdir(parents=True, exist_ok=True)
|
|
34
|
-
|
|
35
|
-
def _load(self) -> List[dict]:
|
|
36
|
-
if not self.store_path.exists():
|
|
37
|
-
return []
|
|
38
|
-
with self.store_path.open("r", encoding="utf-8") as fh:
|
|
39
|
-
return json.load(fh)
|
|
40
|
-
|
|
41
|
-
def _save(self, payload: List[dict]) -> None:
|
|
42
|
-
with self.store_path.open("w", encoding="utf-8") as fh:
|
|
43
|
-
json.dump(payload, fh, indent=2, ensure_ascii=True)
|
|
44
|
-
|
|
45
|
-
def request(self, model_name: str, model_version: str, requested_by: str) -> ApprovalRequest:
|
|
46
|
-
payload = self._load()
|
|
47
|
-
req = ApprovalRequest(
|
|
48
|
-
model_name=model_name,
|
|
49
|
-
model_version=model_version,
|
|
50
|
-
requested_by=requested_by,
|
|
51
|
-
requested_at=datetime.utcnow().isoformat(),
|
|
52
|
-
)
|
|
53
|
-
payload.append(asdict(req))
|
|
54
|
-
self._save(payload)
|
|
55
|
-
return req
|
|
56
|
-
|
|
57
|
-
def list_requests(self, model_name: Optional[str] = None) -> List[ApprovalRequest]:
|
|
58
|
-
payload = self._load()
|
|
59
|
-
requests = [ApprovalRequest(**entry) for entry in payload]
|
|
60
|
-
if model_name is None:
|
|
61
|
-
return requests
|
|
62
|
-
return [req for req in requests if req.model_name == model_name]
|
|
63
|
-
|
|
64
|
-
def act(
|
|
65
|
-
self,
|
|
66
|
-
model_name: str,
|
|
67
|
-
model_version: str,
|
|
68
|
-
*,
|
|
69
|
-
actor: str,
|
|
70
|
-
decision: str,
|
|
71
|
-
comment: Optional[str] = None,
|
|
72
|
-
) -> ApprovalRequest:
|
|
73
|
-
payload = self._load()
|
|
74
|
-
found = None
|
|
75
|
-
for entry in payload:
|
|
76
|
-
if entry["model_name"] == model_name and entry["model_version"] == model_version:
|
|
77
|
-
found = entry
|
|
78
|
-
break
|
|
79
|
-
if found is None:
|
|
80
|
-
raise ValueError("Approval request not found.")
|
|
81
|
-
action = ApprovalAction(
|
|
82
|
-
actor=actor,
|
|
83
|
-
decision=decision,
|
|
84
|
-
timestamp=datetime.utcnow().isoformat(),
|
|
85
|
-
comment=comment,
|
|
86
|
-
)
|
|
87
|
-
found["actions"].append(asdict(action))
|
|
88
|
-
if decision.lower() in {"approve", "approved"}:
|
|
89
|
-
found["status"] = "approved"
|
|
90
|
-
elif decision.lower() in {"reject", "rejected"}:
|
|
91
|
-
found["status"] = "rejected"
|
|
92
|
-
self._save(payload)
|
|
93
|
-
return ApprovalRequest(**found)
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from dataclasses import asdict, dataclass
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import Any, Dict, Optional
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@dataclass
|
|
11
|
-
class AuditEvent:
|
|
12
|
-
action: str
|
|
13
|
-
actor: str
|
|
14
|
-
timestamp: str
|
|
15
|
-
metadata: Dict[str, Any]
|
|
16
|
-
note: Optional[str] = None
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class AuditLogger:
|
|
20
|
-
"""Append-only JSONL audit log."""
|
|
21
|
-
|
|
22
|
-
def __init__(self, log_path: str | Path):
|
|
23
|
-
self.log_path = Path(log_path)
|
|
24
|
-
self.log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
25
|
-
|
|
26
|
-
def log(self, action: str, actor: str, *, metadata: Optional[Dict[str, Any]] = None,
|
|
27
|
-
note: Optional[str] = None) -> AuditEvent:
|
|
28
|
-
event = AuditEvent(
|
|
29
|
-
action=action,
|
|
30
|
-
actor=actor,
|
|
31
|
-
timestamp=datetime.utcnow().isoformat(),
|
|
32
|
-
metadata=metadata or {},
|
|
33
|
-
note=note,
|
|
34
|
-
)
|
|
35
|
-
with self.log_path.open("a", encoding="utf-8") as fh:
|
|
36
|
-
fh.write(json.dumps(asdict(event), ensure_ascii=True) + "\n")
|
|
37
|
-
return event
|
|
@@ -1,99 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from dataclasses import asdict, dataclass, field
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import Dict, List, Optional
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@dataclass
|
|
11
|
-
class ModelArtifact:
|
|
12
|
-
path: str
|
|
13
|
-
description: Optional[str] = None
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass
|
|
17
|
-
class ModelVersion:
|
|
18
|
-
name: str
|
|
19
|
-
version: str
|
|
20
|
-
created_at: str
|
|
21
|
-
metrics: Dict[str, float] = field(default_factory=dict)
|
|
22
|
-
tags: Dict[str, str] = field(default_factory=dict)
|
|
23
|
-
artifacts: List[ModelArtifact] = field(default_factory=list)
|
|
24
|
-
status: str = "candidate"
|
|
25
|
-
notes: Optional[str] = None
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class ModelRegistry:
|
|
29
|
-
"""Lightweight JSON-based model registry."""
|
|
30
|
-
|
|
31
|
-
def __init__(self, registry_path: str | Path):
|
|
32
|
-
self.registry_path = Path(registry_path)
|
|
33
|
-
self.registry_path.parent.mkdir(parents=True, exist_ok=True)
|
|
34
|
-
|
|
35
|
-
def _load(self) -> Dict[str, List[dict]]:
|
|
36
|
-
if not self.registry_path.exists():
|
|
37
|
-
return {}
|
|
38
|
-
with self.registry_path.open("r", encoding="utf-8") as fh:
|
|
39
|
-
return json.load(fh)
|
|
40
|
-
|
|
41
|
-
def _save(self, payload: Dict[str, List[dict]]) -> None:
|
|
42
|
-
with self.registry_path.open("w", encoding="utf-8") as fh:
|
|
43
|
-
json.dump(payload, fh, indent=2, ensure_ascii=True)
|
|
44
|
-
|
|
45
|
-
def register(
|
|
46
|
-
self,
|
|
47
|
-
name: str,
|
|
48
|
-
version: str,
|
|
49
|
-
*,
|
|
50
|
-
metrics: Optional[Dict[str, float]] = None,
|
|
51
|
-
tags: Optional[Dict[str, str]] = None,
|
|
52
|
-
artifacts: Optional[List[ModelArtifact]] = None,
|
|
53
|
-
status: str = "candidate",
|
|
54
|
-
notes: Optional[str] = None,
|
|
55
|
-
) -> ModelVersion:
|
|
56
|
-
payload = self._load()
|
|
57
|
-
created_at = datetime.utcnow().isoformat()
|
|
58
|
-
entry = ModelVersion(
|
|
59
|
-
name=name,
|
|
60
|
-
version=version,
|
|
61
|
-
created_at=created_at,
|
|
62
|
-
metrics=metrics or {},
|
|
63
|
-
tags=tags or {},
|
|
64
|
-
artifacts=artifacts or [],
|
|
65
|
-
status=status,
|
|
66
|
-
notes=notes,
|
|
67
|
-
)
|
|
68
|
-
payload.setdefault(name, []).append(asdict(entry))
|
|
69
|
-
self._save(payload)
|
|
70
|
-
return entry
|
|
71
|
-
|
|
72
|
-
def list_versions(self, name: str) -> List[ModelVersion]:
|
|
73
|
-
payload = self._load()
|
|
74
|
-
versions = payload.get(name, [])
|
|
75
|
-
return [ModelVersion(**v) for v in versions]
|
|
76
|
-
|
|
77
|
-
def get_version(self, name: str, version: str) -> Optional[ModelVersion]:
|
|
78
|
-
for entry in self.list_versions(name):
|
|
79
|
-
if entry.version == version:
|
|
80
|
-
return entry
|
|
81
|
-
return None
|
|
82
|
-
|
|
83
|
-
def promote(
|
|
84
|
-
self, name: str, version: str, *, new_status: str = "production"
|
|
85
|
-
) -> None:
|
|
86
|
-
payload = self._load()
|
|
87
|
-
if name not in payload:
|
|
88
|
-
raise ValueError("Model not found in registry.")
|
|
89
|
-
updated = False
|
|
90
|
-
for entry in payload[name]:
|
|
91
|
-
if entry["version"] == version:
|
|
92
|
-
entry["status"] = new_status
|
|
93
|
-
updated = True
|
|
94
|
-
elif new_status == "production":
|
|
95
|
-
if entry.get("status") == "production":
|
|
96
|
-
entry["status"] = "archived"
|
|
97
|
-
if not updated:
|
|
98
|
-
raise ValueError("Version not found in registry.")
|
|
99
|
-
self._save(payload)
|
|
@@ -1,159 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from dataclasses import asdict, dataclass, field
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from typing import List, Optional
|
|
8
|
-
|
|
9
|
-
from .audit import AuditLogger
|
|
10
|
-
from .registry import ModelRegistry
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@dataclass
|
|
14
|
-
class ModelRef:
|
|
15
|
-
name: str
|
|
16
|
-
version: str
|
|
17
|
-
activated_at: str
|
|
18
|
-
actor: Optional[str] = None
|
|
19
|
-
note: Optional[str] = None
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
@dataclass
|
|
23
|
-
class DeploymentState:
|
|
24
|
-
env: str
|
|
25
|
-
active: Optional[ModelRef] = None
|
|
26
|
-
history: List[ModelRef] = field(default_factory=list)
|
|
27
|
-
updated_at: Optional[str] = None
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class ReleaseManager:
|
|
31
|
-
"""Environment release manager with rollback support."""
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
state_dir: str | Path,
|
|
36
|
-
*,
|
|
37
|
-
registry: Optional[ModelRegistry] = None,
|
|
38
|
-
audit_logger: Optional[AuditLogger] = None,
|
|
39
|
-
):
|
|
40
|
-
self.state_dir = Path(state_dir)
|
|
41
|
-
self.state_dir.mkdir(parents=True, exist_ok=True)
|
|
42
|
-
self.registry = registry
|
|
43
|
-
self.audit_logger = audit_logger
|
|
44
|
-
|
|
45
|
-
def _state_path(self, env: str) -> Path:
|
|
46
|
-
return self.state_dir / f"{env}.json"
|
|
47
|
-
|
|
48
|
-
def _load(self, env: str) -> DeploymentState:
|
|
49
|
-
path = self._state_path(env)
|
|
50
|
-
if not path.exists():
|
|
51
|
-
return DeploymentState(env=env)
|
|
52
|
-
with path.open("r", encoding="utf-8") as fh:
|
|
53
|
-
payload = json.load(fh)
|
|
54
|
-
active = payload.get("active")
|
|
55
|
-
history = payload.get("history", [])
|
|
56
|
-
return DeploymentState(
|
|
57
|
-
env=payload.get("env", env),
|
|
58
|
-
active=ModelRef(**active) if active else None,
|
|
59
|
-
history=[ModelRef(**item) for item in history],
|
|
60
|
-
updated_at=payload.get("updated_at"),
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
def _save(self, state: DeploymentState) -> None:
|
|
64
|
-
payload = {
|
|
65
|
-
"env": state.env,
|
|
66
|
-
"active": asdict(state.active) if state.active else None,
|
|
67
|
-
"history": [asdict(item) for item in state.history],
|
|
68
|
-
"updated_at": state.updated_at,
|
|
69
|
-
}
|
|
70
|
-
path = self._state_path(state.env)
|
|
71
|
-
with path.open("w", encoding="utf-8") as fh:
|
|
72
|
-
json.dump(payload, fh, indent=2, ensure_ascii=True)
|
|
73
|
-
|
|
74
|
-
def get_active(self, env: str) -> Optional[ModelRef]:
|
|
75
|
-
state = self._load(env)
|
|
76
|
-
return state.active
|
|
77
|
-
|
|
78
|
-
def list_history(self, env: str) -> List[ModelRef]:
|
|
79
|
-
return self._load(env).history
|
|
80
|
-
|
|
81
|
-
def deploy(
|
|
82
|
-
self,
|
|
83
|
-
env: str,
|
|
84
|
-
name: str,
|
|
85
|
-
version: str,
|
|
86
|
-
*,
|
|
87
|
-
actor: Optional[str] = None,
|
|
88
|
-
note: Optional[str] = None,
|
|
89
|
-
update_registry_status: bool = True,
|
|
90
|
-
registry_status: str = "production",
|
|
91
|
-
) -> DeploymentState:
|
|
92
|
-
state = self._load(env)
|
|
93
|
-
if state.active and state.active.name == name and state.active.version == version:
|
|
94
|
-
return state
|
|
95
|
-
|
|
96
|
-
if state.active is not None:
|
|
97
|
-
state.history.append(state.active)
|
|
98
|
-
|
|
99
|
-
now = datetime.utcnow().isoformat()
|
|
100
|
-
state.active = ModelRef(
|
|
101
|
-
name=name,
|
|
102
|
-
version=version,
|
|
103
|
-
activated_at=now,
|
|
104
|
-
actor=actor,
|
|
105
|
-
note=note,
|
|
106
|
-
)
|
|
107
|
-
state.updated_at = now
|
|
108
|
-
self._save(state)
|
|
109
|
-
|
|
110
|
-
if self.registry and update_registry_status:
|
|
111
|
-
self.registry.promote(name, version, new_status=registry_status)
|
|
112
|
-
|
|
113
|
-
if self.audit_logger:
|
|
114
|
-
self.audit_logger.log(
|
|
115
|
-
"deploy",
|
|
116
|
-
actor or "unknown",
|
|
117
|
-
metadata={"env": env, "name": name, "version": version},
|
|
118
|
-
note=note,
|
|
119
|
-
)
|
|
120
|
-
|
|
121
|
-
return state
|
|
122
|
-
|
|
123
|
-
def rollback(
|
|
124
|
-
self,
|
|
125
|
-
env: str,
|
|
126
|
-
*,
|
|
127
|
-
actor: Optional[str] = None,
|
|
128
|
-
note: Optional[str] = None,
|
|
129
|
-
update_registry_status: bool = False,
|
|
130
|
-
registry_status: str = "production",
|
|
131
|
-
) -> DeploymentState:
|
|
132
|
-
state = self._load(env)
|
|
133
|
-
if not state.history:
|
|
134
|
-
raise ValueError("No history available to rollback.")
|
|
135
|
-
|
|
136
|
-
previous = state.history.pop()
|
|
137
|
-
now = datetime.utcnow().isoformat()
|
|
138
|
-
state.active = ModelRef(
|
|
139
|
-
name=previous.name,
|
|
140
|
-
version=previous.version,
|
|
141
|
-
activated_at=now,
|
|
142
|
-
actor=actor or previous.actor,
|
|
143
|
-
note=note or previous.note,
|
|
144
|
-
)
|
|
145
|
-
state.updated_at = now
|
|
146
|
-
self._save(state)
|
|
147
|
-
|
|
148
|
-
if self.registry and update_registry_status:
|
|
149
|
-
self.registry.promote(previous.name, previous.version, new_status=registry_status)
|
|
150
|
-
|
|
151
|
-
if self.audit_logger:
|
|
152
|
-
self.audit_logger.log(
|
|
153
|
-
"rollback",
|
|
154
|
-
actor or "unknown",
|
|
155
|
-
metadata={"env": env, "name": previous.name, "version": previous.version},
|
|
156
|
-
note=note,
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
return state
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import argparse
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import Optional
|
|
6
|
-
|
|
7
|
-
try:
|
|
8
|
-
from .notebook_utils import run_from_config # type: ignore
|
|
9
|
-
except Exception: # pragma: no cover
|
|
10
|
-
from notebook_utils import run_from_config # type: ignore
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def run(config_json: str | Path) -> None:
|
|
14
|
-
"""Run explain by config.json (runner.mode=explain)."""
|
|
15
|
-
run_from_config(config_json)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def _build_parser() -> argparse.ArgumentParser:
|
|
19
|
-
parser = argparse.ArgumentParser(
|
|
20
|
-
description="Explain_Run: run explain by config.json (runner.mode=explain)."
|
|
21
|
-
)
|
|
22
|
-
parser.add_argument(
|
|
23
|
-
"--config-json",
|
|
24
|
-
required=True,
|
|
25
|
-
help="Path to config.json (relative paths are resolved from ins_pricing/modelling/ when possible).",
|
|
26
|
-
)
|
|
27
|
-
return parser
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def main(argv: Optional[list[str]] = None) -> None:
|
|
31
|
-
args = _build_parser().parse_args(argv)
|
|
32
|
-
run(args.config_json)
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
if __name__ == "__main__":
|
|
36
|
-
main()
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import argparse
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import Optional
|
|
6
|
-
|
|
7
|
-
try:
|
|
8
|
-
from .notebook_utils import run_from_config # type: ignore
|
|
9
|
-
except Exception: # pragma: no cover
|
|
10
|
-
from notebook_utils import run_from_config # type: ignore
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def run(config_json: str | Path) -> None:
|
|
14
|
-
"""Unified entry point: run entry/incremental/watchdog/DDP based on config.json runner."""
|
|
15
|
-
run_from_config(config_json)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def _build_parser() -> argparse.ArgumentParser:
|
|
19
|
-
parser = argparse.ArgumentParser(
|
|
20
|
-
description="Pricing_Run: run BayesOpt by config.json (entry/incremental/watchdog/DDP)."
|
|
21
|
-
)
|
|
22
|
-
parser.add_argument(
|
|
23
|
-
"--config-json",
|
|
24
|
-
required=True,
|
|
25
|
-
help="Path to config.json (relative paths are resolved from ins_pricing/modelling/ when possible).",
|
|
26
|
-
)
|
|
27
|
-
return parser
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def main(argv: Optional[list[str]] = None) -> None:
|
|
31
|
-
args = _build_parser().parse_args(argv)
|
|
32
|
-
run(args.config_json)
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
if __name__ == "__main__":
|
|
36
|
-
main()
|
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
"""Modelling subpackage for ins_pricing."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
import types
|
|
5
|
-
import sys
|
|
6
|
-
from importlib import import_module
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
|
|
9
|
-
# Exports
|
|
10
|
-
from .config import BayesOptConfig
|
|
11
|
-
from .config_preprocess import (
|
|
12
|
-
DatasetPreprocessor,
|
|
13
|
-
OutputManager,
|
|
14
|
-
VersionManager,
|
|
15
|
-
)
|
|
16
|
-
from .core import BayesOptModel
|
|
17
|
-
from .models import (
|
|
18
|
-
FeatureTokenizer,
|
|
19
|
-
FTTransformerCore,
|
|
20
|
-
FTTransformerSklearn,
|
|
21
|
-
GraphNeuralNetSklearn,
|
|
22
|
-
MaskedTabularDataset,
|
|
23
|
-
ResBlock,
|
|
24
|
-
ResNetSequential,
|
|
25
|
-
ResNetSklearn,
|
|
26
|
-
ScaledTransformerEncoderLayer,
|
|
27
|
-
SimpleGraphLayer,
|
|
28
|
-
SimpleGNN,
|
|
29
|
-
TabularDataset,
|
|
30
|
-
)
|
|
31
|
-
from .trainers import (
|
|
32
|
-
FTTrainer,
|
|
33
|
-
GLMTrainer,
|
|
34
|
-
GNNTrainer,
|
|
35
|
-
ResNetTrainer,
|
|
36
|
-
TrainerBase,
|
|
37
|
-
XGBTrainer,
|
|
38
|
-
_xgb_cuda_available,
|
|
39
|
-
)
|
|
40
|
-
from .utils import (
|
|
41
|
-
EPS,
|
|
42
|
-
DistributedUtils,
|
|
43
|
-
IOUtils,
|
|
44
|
-
PlotUtils,
|
|
45
|
-
TorchTrainerMixin,
|
|
46
|
-
TrainingUtils,
|
|
47
|
-
compute_batch_size,
|
|
48
|
-
csv_to_dict,
|
|
49
|
-
ensure_parent_dir,
|
|
50
|
-
free_cuda,
|
|
51
|
-
infer_factor_and_cate_list,
|
|
52
|
-
plot_dlift_list,
|
|
53
|
-
plot_lift_list,
|
|
54
|
-
set_global_seed,
|
|
55
|
-
split_data,
|
|
56
|
-
tweedie_loss,
|
|
57
|
-
)
|
|
58
|
-
try:
|
|
59
|
-
import torch
|
|
60
|
-
except ImportError:
|
|
61
|
-
torch = None
|
|
62
|
-
|
|
63
|
-
# Lazy submodules
|
|
64
|
-
_LAZY_SUBMODULES = {
|
|
65
|
-
"plotting": "ins_pricing.modelling.plotting",
|
|
66
|
-
"explain": "ins_pricing.modelling.explain",
|
|
67
|
-
"cli_common": "ins_pricing.modelling.cli_common",
|
|
68
|
-
"notebook_utils": "ins_pricing.modelling.notebook_utils",
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
_PACKAGE_PATHS = {
|
|
72
|
-
"plotting": Path(__file__).resolve().parent / "plotting",
|
|
73
|
-
"explain": Path(__file__).resolve().parent / "explain",
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
__all__ = [
|
|
77
|
-
"BayesOptConfig",
|
|
78
|
-
"DatasetPreprocessor",
|
|
79
|
-
"OutputManager",
|
|
80
|
-
"VersionManager",
|
|
81
|
-
"BayesOptModel",
|
|
82
|
-
"FeatureTokenizer",
|
|
83
|
-
"FTTransformerCore",
|
|
84
|
-
"FTTransformerSklearn",
|
|
85
|
-
"GraphNeuralNetSklearn",
|
|
86
|
-
"MaskedTabularDataset",
|
|
87
|
-
"ResBlock",
|
|
88
|
-
"ResNetSequential",
|
|
89
|
-
"ResNetSklearn",
|
|
90
|
-
"ScaledTransformerEncoderLayer",
|
|
91
|
-
"SimpleGraphLayer",
|
|
92
|
-
"SimpleGNN",
|
|
93
|
-
"TabularDataset",
|
|
94
|
-
"FTTrainer",
|
|
95
|
-
"GLMTrainer",
|
|
96
|
-
"GNNTrainer",
|
|
97
|
-
"ResNetTrainer",
|
|
98
|
-
"TrainerBase",
|
|
99
|
-
"XGBTrainer",
|
|
100
|
-
"_xgb_cuda_available",
|
|
101
|
-
"EPS",
|
|
102
|
-
"DistributedUtils",
|
|
103
|
-
"IOUtils",
|
|
104
|
-
"PlotUtils",
|
|
105
|
-
"TorchTrainerMixin",
|
|
106
|
-
"TrainingUtils",
|
|
107
|
-
"compute_batch_size",
|
|
108
|
-
"csv_to_dict",
|
|
109
|
-
"ensure_parent_dir",
|
|
110
|
-
"free_cuda",
|
|
111
|
-
"infer_factor_and_cate_list",
|
|
112
|
-
"plot_dlift_list",
|
|
113
|
-
"plot_lift_list",
|
|
114
|
-
"set_global_seed",
|
|
115
|
-
"split_data",
|
|
116
|
-
"tweedie_loss",
|
|
117
|
-
"torch",
|
|
118
|
-
] + sorted(list(_LAZY_SUBMODULES.keys()))
|
|
119
|
-
|
|
120
|
-
def _lazy_module(name: str, target: str, package_path: Path | None = None) -> types.ModuleType:
|
|
121
|
-
proxy = types.ModuleType(name)
|
|
122
|
-
if package_path is not None:
|
|
123
|
-
proxy.__path__ = [str(package_path)]
|
|
124
|
-
|
|
125
|
-
def _load():
|
|
126
|
-
module = import_module(target)
|
|
127
|
-
sys.modules[name] = module
|
|
128
|
-
return module
|
|
129
|
-
|
|
130
|
-
def __getattr__(attr: str):
|
|
131
|
-
module = _load()
|
|
132
|
-
return getattr(module, attr)
|
|
133
|
-
|
|
134
|
-
def __dir__() -> list[str]:
|
|
135
|
-
module = _load()
|
|
136
|
-
return sorted(set(dir(module)))
|
|
137
|
-
|
|
138
|
-
proxy.__getattr__ = __getattr__ # type: ignore[attr-defined]
|
|
139
|
-
proxy.__dir__ = __dir__ # type: ignore[attr-defined]
|
|
140
|
-
return proxy
|
|
141
|
-
|
|
142
|
-
def _install_proxy(alias: str, target: str) -> None:
|
|
143
|
-
module_name = f"{__name__}.{alias}"
|
|
144
|
-
if module_name in sys.modules:
|
|
145
|
-
return
|
|
146
|
-
proxy = _lazy_module(module_name, target, _PACKAGE_PATHS.get(alias))
|
|
147
|
-
sys.modules[module_name] = proxy
|
|
148
|
-
globals()[alias] = proxy
|
|
149
|
-
|
|
150
|
-
for _alias, _target in _LAZY_SUBMODULES.items():
|
|
151
|
-
_install_proxy(_alias, _target)
|