policyengine 3.0.0__py3-none-any.whl → 3.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- policyengine/__pycache__/__init__.cpython-313.pyc +0 -0
- policyengine/core/__init__.py +22 -0
- policyengine/core/dataset.py +260 -0
- policyengine/core/dataset_version.py +16 -0
- policyengine/core/dynamic.py +43 -0
- policyengine/core/output.py +26 -0
- policyengine/{models → core}/parameter.py +4 -2
- policyengine/{models → core}/parameter_value.py +1 -1
- policyengine/core/policy.py +43 -0
- policyengine/{models → core}/simulation.py +10 -14
- policyengine/core/tax_benefit_model.py +11 -0
- policyengine/core/tax_benefit_model_version.py +34 -0
- policyengine/core/variable.py +15 -0
- policyengine/outputs/__init__.py +21 -0
- policyengine/outputs/aggregate.py +124 -0
- policyengine/outputs/change_aggregate.py +184 -0
- policyengine/outputs/decile_impact.py +140 -0
- policyengine/tax_benefit_models/uk/__init__.py +26 -0
- policyengine/tax_benefit_models/uk/analysis.py +97 -0
- policyengine/tax_benefit_models/uk/datasets.py +176 -0
- policyengine/tax_benefit_models/uk/model.py +268 -0
- policyengine/tax_benefit_models/uk/outputs.py +108 -0
- policyengine/tax_benefit_models/uk.py +33 -0
- policyengine/tax_benefit_models/us/__init__.py +36 -0
- policyengine/tax_benefit_models/us/analysis.py +99 -0
- policyengine/tax_benefit_models/us/datasets.py +307 -0
- policyengine/tax_benefit_models/us/model.py +447 -0
- policyengine/tax_benefit_models/us/outputs.py +108 -0
- policyengine/tax_benefit_models/us.py +32 -0
- policyengine/utils/__init__.py +3 -0
- policyengine/utils/dates.py +40 -0
- policyengine/utils/parametric_reforms.py +39 -0
- policyengine/utils/plotting.py +179 -0
- {policyengine-3.0.0.dist-info → policyengine-3.1.1.dist-info}/METADATA +185 -20
- policyengine-3.1.1.dist-info/RECORD +39 -0
- policyengine/database/__init__.py +0 -56
- policyengine/database/aggregate.py +0 -33
- policyengine/database/baseline_parameter_value_table.py +0 -66
- policyengine/database/baseline_variable_table.py +0 -40
- policyengine/database/database.py +0 -251
- policyengine/database/dataset_table.py +0 -41
- policyengine/database/dynamic_table.py +0 -34
- policyengine/database/link.py +0 -82
- policyengine/database/model_table.py +0 -27
- policyengine/database/model_version_table.py +0 -28
- policyengine/database/parameter_table.py +0 -31
- policyengine/database/parameter_value_table.py +0 -62
- policyengine/database/policy_table.py +0 -34
- policyengine/database/report_element_table.py +0 -48
- policyengine/database/report_table.py +0 -24
- policyengine/database/simulation_table.py +0 -50
- policyengine/database/user_table.py +0 -28
- policyengine/database/versioned_dataset_table.py +0 -28
- policyengine/models/__init__.py +0 -30
- policyengine/models/aggregate.py +0 -92
- policyengine/models/baseline_parameter_value.py +0 -14
- policyengine/models/baseline_variable.py +0 -12
- policyengine/models/dataset.py +0 -18
- policyengine/models/dynamic.py +0 -15
- policyengine/models/model.py +0 -124
- policyengine/models/model_version.py +0 -14
- policyengine/models/policy.py +0 -17
- policyengine/models/policyengine_uk.py +0 -114
- policyengine/models/policyengine_us.py +0 -115
- policyengine/models/report.py +0 -10
- policyengine/models/report_element.py +0 -36
- policyengine/models/user.py +0 -14
- policyengine/models/versioned_dataset.py +0 -12
- policyengine/utils/charts.py +0 -286
- policyengine/utils/compress.py +0 -20
- policyengine/utils/datasets.py +0 -71
- policyengine-3.0.0.dist-info/RECORD +0 -47
- policyengine-3.0.0.dist-info/entry_points.txt +0 -2
- {policyengine-3.0.0.dist-info → policyengine-3.1.1.dist-info}/WHEEL +0 -0
- {policyengine-3.0.0.dist-info → policyengine-3.1.1.dist-info}/licenses/LICENSE +0 -0
- {policyengine-3.0.0.dist-info → policyengine-3.1.1.dist-info}/top_level.txt +0 -0
|
Binary file
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from .dataset import Dataset
|
|
2
|
+
from .dataset import map_to_entity as map_to_entity
|
|
3
|
+
from .dataset_version import DatasetVersion as DatasetVersion
|
|
4
|
+
from .dynamic import Dynamic as Dynamic
|
|
5
|
+
from .output import Output as Output
|
|
6
|
+
from .output import OutputCollection as OutputCollection
|
|
7
|
+
from .parameter import Parameter as Parameter
|
|
8
|
+
from .parameter_value import ParameterValue as ParameterValue
|
|
9
|
+
from .policy import Policy as Policy
|
|
10
|
+
from .simulation import Simulation as Simulation
|
|
11
|
+
from .tax_benefit_model import TaxBenefitModel as TaxBenefitModel
|
|
12
|
+
from .tax_benefit_model_version import (
|
|
13
|
+
TaxBenefitModelVersion as TaxBenefitModelVersion,
|
|
14
|
+
)
|
|
15
|
+
from .variable import Variable as Variable
|
|
16
|
+
|
|
17
|
+
# Rebuild models to resolve forward references
|
|
18
|
+
Dataset.model_rebuild()
|
|
19
|
+
TaxBenefitModelVersion.model_rebuild()
|
|
20
|
+
Variable.model_rebuild()
|
|
21
|
+
Parameter.model_rebuild()
|
|
22
|
+
ParameterValue.model_rebuild()
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
from uuid import uuid4
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from microdf import MicroDataFrame
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
6
|
+
|
|
7
|
+
from .dataset_version import DatasetVersion
|
|
8
|
+
from .tax_benefit_model import TaxBenefitModel
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Dataset(BaseModel):
|
|
12
|
+
"""Base class for datasets.
|
|
13
|
+
|
|
14
|
+
The data field contains entity-level data as a BaseModel with DataFrame fields.
|
|
15
|
+
|
|
16
|
+
Example:
|
|
17
|
+
class YearData(BaseModel):
|
|
18
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
19
|
+
person: pd.DataFrame
|
|
20
|
+
household: pd.DataFrame
|
|
21
|
+
|
|
22
|
+
class MyDataset(Dataset):
|
|
23
|
+
data: YearData | None = None
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
27
|
+
|
|
28
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
29
|
+
name: str
|
|
30
|
+
description: str
|
|
31
|
+
dataset_version: DatasetVersion | None = None
|
|
32
|
+
filepath: str
|
|
33
|
+
is_output_dataset: bool = False
|
|
34
|
+
tax_benefit_model: TaxBenefitModel | None = None
|
|
35
|
+
year: int
|
|
36
|
+
|
|
37
|
+
data: BaseModel | None = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def map_to_entity(
|
|
41
|
+
entity_data: dict[str, MicroDataFrame],
|
|
42
|
+
source_entity: str,
|
|
43
|
+
target_entity: str,
|
|
44
|
+
person_entity: str = "person",
|
|
45
|
+
columns: list[str] | None = None,
|
|
46
|
+
) -> MicroDataFrame:
|
|
47
|
+
"""Map data from source entity to target entity using join keys.
|
|
48
|
+
|
|
49
|
+
This is a generic entity mapping utility that handles:
|
|
50
|
+
- Same entity mapping (returns as is)
|
|
51
|
+
- Person to group entity mapping (aggregates values)
|
|
52
|
+
- Group to person entity mapping (expands values)
|
|
53
|
+
- Group to group entity mapping (aggregates through person entity)
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
entity_data: Dictionary mapping entity names to their MicroDataFrame data
|
|
57
|
+
source_entity: The source entity name
|
|
58
|
+
target_entity: The target entity name
|
|
59
|
+
person_entity: The name of the person entity (default "person")
|
|
60
|
+
columns: List of column names to map. If None, maps all columns
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
MicroDataFrame: The mapped data at the target entity level
|
|
64
|
+
|
|
65
|
+
Raises:
|
|
66
|
+
ValueError: If source or target entity is invalid
|
|
67
|
+
"""
|
|
68
|
+
valid_entities = set(entity_data.keys())
|
|
69
|
+
|
|
70
|
+
if source_entity not in valid_entities:
|
|
71
|
+
raise ValueError(
|
|
72
|
+
f"Invalid source entity '{source_entity}'. Must be one of {valid_entities}"
|
|
73
|
+
)
|
|
74
|
+
if target_entity not in valid_entities:
|
|
75
|
+
raise ValueError(
|
|
76
|
+
f"Invalid target entity '{target_entity}'. Must be one of {valid_entities}"
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Get source data (convert to plain DataFrame to avoid weighted operations during mapping)
|
|
80
|
+
source_df = pd.DataFrame(entity_data[source_entity])
|
|
81
|
+
|
|
82
|
+
if columns:
|
|
83
|
+
# Select only requested columns (keep all ID columns for joins)
|
|
84
|
+
id_cols = {col for col in source_df.columns if col.endswith("_id")}
|
|
85
|
+
cols_to_keep = list(set(columns) | id_cols)
|
|
86
|
+
source_df = source_df[cols_to_keep]
|
|
87
|
+
|
|
88
|
+
# Determine weight column for target entity
|
|
89
|
+
target_weight = f"{target_entity}_weight"
|
|
90
|
+
|
|
91
|
+
# Same entity - return as is
|
|
92
|
+
if source_entity == target_entity:
|
|
93
|
+
return MicroDataFrame(source_df, weights=target_weight)
|
|
94
|
+
|
|
95
|
+
# Get target data and key
|
|
96
|
+
target_df = entity_data[target_entity]
|
|
97
|
+
target_key = f"{target_entity}_id"
|
|
98
|
+
|
|
99
|
+
# Person to group entity: aggregate person-level data to group level
|
|
100
|
+
if source_entity == person_entity and target_entity != person_entity:
|
|
101
|
+
# Check for both naming patterns: "entity_id" and "person_entity_id"
|
|
102
|
+
person_target_key = f"{person_entity}_{target_entity}_id"
|
|
103
|
+
join_key = (
|
|
104
|
+
person_target_key
|
|
105
|
+
if person_target_key in source_df.columns
|
|
106
|
+
else target_key
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if join_key in source_df.columns:
|
|
110
|
+
# Get columns to aggregate (exclude ID and weight columns)
|
|
111
|
+
id_cols = {col for col in source_df.columns if col.endswith("_id")}
|
|
112
|
+
weight_cols = {
|
|
113
|
+
col for col in source_df.columns if col.endswith("_weight")
|
|
114
|
+
}
|
|
115
|
+
agg_cols = [
|
|
116
|
+
c
|
|
117
|
+
for c in source_df.columns
|
|
118
|
+
if c not in id_cols and c not in weight_cols
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
# Group by join key and sum
|
|
122
|
+
aggregated = source_df.groupby(join_key, as_index=False)[
|
|
123
|
+
agg_cols
|
|
124
|
+
].sum()
|
|
125
|
+
|
|
126
|
+
# Rename join key to target key if needed
|
|
127
|
+
if join_key != target_key:
|
|
128
|
+
aggregated = aggregated.rename(columns={join_key: target_key})
|
|
129
|
+
|
|
130
|
+
# Merge with target, preserving original order
|
|
131
|
+
target_pd = pd.DataFrame(target_df)[[target_key, target_weight]]
|
|
132
|
+
target_pd = target_pd.reset_index(drop=False)
|
|
133
|
+
result = target_pd.merge(aggregated, on=target_key, how="left")
|
|
134
|
+
|
|
135
|
+
# Sort back to original order
|
|
136
|
+
result = (
|
|
137
|
+
result.sort_values("index")
|
|
138
|
+
.drop("index", axis=1)
|
|
139
|
+
.reset_index(drop=True)
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Fill NaN with 0 for groups with no members in source entity
|
|
143
|
+
result[agg_cols] = result[agg_cols].fillna(0)
|
|
144
|
+
|
|
145
|
+
return MicroDataFrame(result, weights=target_weight)
|
|
146
|
+
|
|
147
|
+
# Group entity to person: expand group-level data to person level
|
|
148
|
+
if source_entity != person_entity and target_entity == person_entity:
|
|
149
|
+
source_key = f"{source_entity}_id"
|
|
150
|
+
# Check for both naming patterns
|
|
151
|
+
person_source_key = f"{person_entity}_{source_entity}_id"
|
|
152
|
+
|
|
153
|
+
target_pd = pd.DataFrame(target_df)
|
|
154
|
+
join_key = (
|
|
155
|
+
person_source_key
|
|
156
|
+
if person_source_key in target_pd.columns
|
|
157
|
+
else source_key
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
if join_key in target_pd.columns:
|
|
161
|
+
# Rename source key to match join key if needed
|
|
162
|
+
if join_key != source_key and source_key in source_df.columns:
|
|
163
|
+
source_df = source_df.rename(columns={source_key: join_key})
|
|
164
|
+
|
|
165
|
+
result = target_pd.merge(source_df, on=join_key, how="left")
|
|
166
|
+
return MicroDataFrame(result, weights=target_weight)
|
|
167
|
+
|
|
168
|
+
# Group to group: go through person table
|
|
169
|
+
if source_entity != person_entity and target_entity != person_entity:
|
|
170
|
+
# Get person link table with both entity IDs
|
|
171
|
+
person_df = pd.DataFrame(entity_data[person_entity])
|
|
172
|
+
source_key = f"{source_entity}_id"
|
|
173
|
+
|
|
174
|
+
# Check for both naming patterns for person-level links
|
|
175
|
+
person_source_key = f"{person_entity}_{source_entity}_id"
|
|
176
|
+
person_target_key = f"{person_entity}_{target_entity}_id"
|
|
177
|
+
|
|
178
|
+
# Determine which keys exist in person table
|
|
179
|
+
source_link_key = (
|
|
180
|
+
person_source_key
|
|
181
|
+
if person_source_key in person_df.columns
|
|
182
|
+
else source_key
|
|
183
|
+
)
|
|
184
|
+
target_link_key = (
|
|
185
|
+
person_target_key
|
|
186
|
+
if person_target_key in person_df.columns
|
|
187
|
+
else target_key
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Link source -> person -> target
|
|
191
|
+
if (
|
|
192
|
+
source_link_key in person_df.columns
|
|
193
|
+
and target_link_key in person_df.columns
|
|
194
|
+
):
|
|
195
|
+
person_link = person_df[
|
|
196
|
+
[source_link_key, target_link_key]
|
|
197
|
+
].drop_duplicates()
|
|
198
|
+
|
|
199
|
+
# Rename source key to match link key if needed
|
|
200
|
+
source_df_copy = source_df.copy()
|
|
201
|
+
if (
|
|
202
|
+
source_link_key != source_key
|
|
203
|
+
and source_key in source_df_copy.columns
|
|
204
|
+
):
|
|
205
|
+
source_df_copy = source_df_copy.rename(
|
|
206
|
+
columns={source_key: source_link_key}
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Join source data with target key
|
|
210
|
+
source_with_target = source_df_copy.merge(
|
|
211
|
+
person_link, on=source_link_key, how="left"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Aggregate to target level
|
|
215
|
+
id_cols = {
|
|
216
|
+
col
|
|
217
|
+
for col in source_with_target.columns
|
|
218
|
+
if col.endswith("_id")
|
|
219
|
+
}
|
|
220
|
+
weight_cols = {
|
|
221
|
+
col
|
|
222
|
+
for col in source_with_target.columns
|
|
223
|
+
if col.endswith("_weight")
|
|
224
|
+
}
|
|
225
|
+
agg_cols = [
|
|
226
|
+
c
|
|
227
|
+
for c in source_with_target.columns
|
|
228
|
+
if c not in id_cols and c not in weight_cols
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
aggregated = source_with_target.groupby(
|
|
232
|
+
target_link_key, as_index=False
|
|
233
|
+
)[agg_cols].sum()
|
|
234
|
+
|
|
235
|
+
# Rename target link key to target key if needed
|
|
236
|
+
if target_link_key != target_key:
|
|
237
|
+
aggregated = aggregated.rename(
|
|
238
|
+
columns={target_link_key: target_key}
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Merge with target, preserving original order
|
|
242
|
+
target_pd = pd.DataFrame(target_df)[[target_key, target_weight]]
|
|
243
|
+
target_pd = target_pd.reset_index(drop=False)
|
|
244
|
+
result = target_pd.merge(aggregated, on=target_key, how="left")
|
|
245
|
+
|
|
246
|
+
# Sort back to original order
|
|
247
|
+
result = (
|
|
248
|
+
result.sort_values("index")
|
|
249
|
+
.drop("index", axis=1)
|
|
250
|
+
.reset_index(drop=True)
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Fill NaN with 0
|
|
254
|
+
result[agg_cols] = result[agg_cols].fillna(0)
|
|
255
|
+
|
|
256
|
+
return MicroDataFrame(result, weights=target_weight)
|
|
257
|
+
|
|
258
|
+
raise ValueError(
|
|
259
|
+
f"Unsupported mapping from {source_entity} to {target_entity}"
|
|
260
|
+
)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
from .tax_benefit_model import TaxBenefitModel
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from .dataset import Dataset
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class DatasetVersion(BaseModel):
|
|
13
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
14
|
+
dataset: "Dataset"
|
|
15
|
+
description: str
|
|
16
|
+
tax_benefit_model: TaxBenefitModel = None
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from .parameter_value import ParameterValue
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Dynamic(BaseModel):
|
|
11
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
12
|
+
name: str
|
|
13
|
+
description: str | None = None
|
|
14
|
+
parameter_values: list[ParameterValue] = []
|
|
15
|
+
simulation_modifier: Callable | None = None
|
|
16
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
17
|
+
updated_at: datetime = Field(default_factory=datetime.now)
|
|
18
|
+
|
|
19
|
+
def __add__(self, other: "Dynamic") -> "Dynamic":
|
|
20
|
+
"""Combine two dynamics by appending parameter values and chaining simulation modifiers."""
|
|
21
|
+
if not isinstance(other, Dynamic):
|
|
22
|
+
return NotImplemented
|
|
23
|
+
|
|
24
|
+
# Combine simulation modifiers
|
|
25
|
+
combined_modifier = None
|
|
26
|
+
if self.simulation_modifier is not None and other.simulation_modifier is not None:
|
|
27
|
+
|
|
28
|
+
def combined_modifier(sim):
|
|
29
|
+
sim = self.simulation_modifier(sim)
|
|
30
|
+
sim = other.simulation_modifier(sim)
|
|
31
|
+
return sim
|
|
32
|
+
|
|
33
|
+
elif self.simulation_modifier is not None:
|
|
34
|
+
combined_modifier = self.simulation_modifier
|
|
35
|
+
elif other.simulation_modifier is not None:
|
|
36
|
+
combined_modifier = other.simulation_modifier
|
|
37
|
+
|
|
38
|
+
return Dynamic(
|
|
39
|
+
name=f"{self.name} + {other.name}",
|
|
40
|
+
description=f"Combined dynamic: {self.name} and {other.name}",
|
|
41
|
+
parameter_values=self.parameter_values + other.parameter_values,
|
|
42
|
+
simulation_modifier=combined_modifier,
|
|
43
|
+
)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from typing import TypeVar
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from pydantic import BaseModel, ConfigDict
|
|
5
|
+
|
|
6
|
+
T = TypeVar("T", bound="Output")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Output(BaseModel):
|
|
10
|
+
"""Base class for all output templates."""
|
|
11
|
+
|
|
12
|
+
def run(self):
|
|
13
|
+
"""Calculate and populate the output fields.
|
|
14
|
+
|
|
15
|
+
Must be implemented by subclasses.
|
|
16
|
+
"""
|
|
17
|
+
raise NotImplementedError("Subclasses must implement run()")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OutputCollection[T: "Output"](BaseModel):
|
|
21
|
+
"""Container for a collection of outputs with their DataFrame representation."""
|
|
22
|
+
|
|
23
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
24
|
+
|
|
25
|
+
outputs: list[T]
|
|
26
|
+
dataframe: pd.DataFrame
|
|
@@ -2,11 +2,13 @@ from uuid import uuid4
|
|
|
2
2
|
|
|
3
3
|
from pydantic import BaseModel, Field
|
|
4
4
|
|
|
5
|
-
from .
|
|
5
|
+
from .tax_benefit_model_version import TaxBenefitModelVersion
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class Parameter(BaseModel):
|
|
9
9
|
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
10
|
+
name: str
|
|
10
11
|
description: str | None = None
|
|
11
12
|
data_type: type | None = None
|
|
12
|
-
|
|
13
|
+
tax_benefit_model_version: TaxBenefitModelVersion
|
|
14
|
+
unit: str | None = None
|
|
@@ -8,7 +8,7 @@ from .parameter import Parameter
|
|
|
8
8
|
|
|
9
9
|
class ParameterValue(BaseModel):
|
|
10
10
|
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
11
|
-
parameter: Parameter
|
|
11
|
+
parameter: Parameter | None = None
|
|
12
12
|
value: float | int | str | bool | list | None = None
|
|
13
13
|
start_date: datetime
|
|
14
14
|
end_date: datetime | None = None
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from .parameter_value import ParameterValue
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Policy(BaseModel):
|
|
11
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
12
|
+
name: str
|
|
13
|
+
description: str | None = None
|
|
14
|
+
parameter_values: list[ParameterValue] = []
|
|
15
|
+
simulation_modifier: Callable | None = None
|
|
16
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
17
|
+
updated_at: datetime = Field(default_factory=datetime.now)
|
|
18
|
+
|
|
19
|
+
def __add__(self, other: "Policy") -> "Policy":
|
|
20
|
+
"""Combine two policies by appending parameter values and chaining simulation modifiers."""
|
|
21
|
+
if not isinstance(other, Policy):
|
|
22
|
+
return NotImplemented
|
|
23
|
+
|
|
24
|
+
# Combine simulation modifiers
|
|
25
|
+
combined_modifier = None
|
|
26
|
+
if self.simulation_modifier is not None and other.simulation_modifier is not None:
|
|
27
|
+
|
|
28
|
+
def combined_modifier(sim):
|
|
29
|
+
sim = self.simulation_modifier(sim)
|
|
30
|
+
sim = other.simulation_modifier(sim)
|
|
31
|
+
return sim
|
|
32
|
+
|
|
33
|
+
elif self.simulation_modifier is not None:
|
|
34
|
+
combined_modifier = self.simulation_modifier
|
|
35
|
+
elif other.simulation_modifier is not None:
|
|
36
|
+
combined_modifier = other.simulation_modifier
|
|
37
|
+
|
|
38
|
+
return Policy(
|
|
39
|
+
name=f"{self.name} + {other.name}",
|
|
40
|
+
description=f"Combined policy: {self.name} and {other.name}",
|
|
41
|
+
parameter_values=self.parameter_values + other.parameter_values,
|
|
42
|
+
simulation_modifier=combined_modifier,
|
|
43
|
+
)
|
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
|
-
from typing import Any
|
|
3
2
|
from uuid import uuid4
|
|
4
3
|
|
|
5
4
|
from pydantic import BaseModel, Field
|
|
6
5
|
|
|
7
6
|
from .dataset import Dataset
|
|
8
7
|
from .dynamic import Dynamic
|
|
9
|
-
from .model import Model
|
|
10
|
-
from .model_version import ModelVersion
|
|
11
8
|
from .policy import Policy
|
|
9
|
+
from .tax_benefit_model_version import TaxBenefitModelVersion
|
|
12
10
|
|
|
13
11
|
|
|
14
12
|
class Simulation(BaseModel):
|
|
@@ -18,17 +16,15 @@ class Simulation(BaseModel):
|
|
|
18
16
|
|
|
19
17
|
policy: Policy | None = None
|
|
20
18
|
dynamic: Dynamic | None = None
|
|
21
|
-
dataset: Dataset
|
|
19
|
+
dataset: Dataset = None
|
|
22
20
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
21
|
+
tax_benefit_model_version: TaxBenefitModelVersion = None
|
|
22
|
+
output_dataset: Dataset | None = None
|
|
23
|
+
|
|
24
|
+
variables: dict[str, list[str]] | None = Field(
|
|
25
|
+
default=None,
|
|
26
|
+
description="Optional dictionary mapping entity names to lists of variable names to calculate. If None, uses model defaults.",
|
|
27
|
+
)
|
|
26
28
|
|
|
27
29
|
def run(self):
|
|
28
|
-
self.
|
|
29
|
-
dataset=self.dataset,
|
|
30
|
-
policy=self.policy,
|
|
31
|
-
dynamic=self.dynamic,
|
|
32
|
-
)
|
|
33
|
-
self.updated_at = datetime.now()
|
|
34
|
-
return self.result
|
|
30
|
+
self.tax_benefit_model_version.run(self)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from .tax_benefit_model import TaxBenefitModel
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from .parameter import Parameter
|
|
11
|
+
from .parameter_value import ParameterValue
|
|
12
|
+
from .simulation import Simulation
|
|
13
|
+
from .variable import Variable
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TaxBenefitModelVersion(BaseModel):
|
|
17
|
+
id: str = Field(default_factory=lambda: str(uuid4()))
|
|
18
|
+
model: TaxBenefitModel
|
|
19
|
+
version: str
|
|
20
|
+
description: str | None = None
|
|
21
|
+
created_at: datetime | None = Field(default_factory=datetime.utcnow)
|
|
22
|
+
|
|
23
|
+
variables: list["Variable"] = Field(default_factory=list)
|
|
24
|
+
parameters: list["Parameter"] = Field(default_factory=list)
|
|
25
|
+
parameter_values: list["ParameterValue"] = Field(default_factory=list)
|
|
26
|
+
|
|
27
|
+
def run(self, simulation: "Simulation") -> "Simulation":
|
|
28
|
+
raise NotImplementedError(
|
|
29
|
+
"The TaxBenefitModel class must define a method to execute simulations."
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def __repr__(self) -> str:
|
|
33
|
+
# Give the id and version, and the number of variables, parameters, parameter values
|
|
34
|
+
return f"<TaxBenefitModelVersion id={self.id} variables={len(self.variables)} parameters={len(self.parameters)} parameter_values={len(self.parameter_values)}>"
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from .tax_benefit_model_version import TaxBenefitModelVersion
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Variable(BaseModel):
|
|
9
|
+
id: str
|
|
10
|
+
name: str
|
|
11
|
+
tax_benefit_model_version: TaxBenefitModelVersion
|
|
12
|
+
entity: str
|
|
13
|
+
description: str | None = None
|
|
14
|
+
data_type: type = None
|
|
15
|
+
possible_values: list[Any] | None = None
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from policyengine.core import Output, OutputCollection
|
|
2
|
+
from policyengine.outputs.aggregate import Aggregate, AggregateType
|
|
3
|
+
from policyengine.outputs.change_aggregate import (
|
|
4
|
+
ChangeAggregate,
|
|
5
|
+
ChangeAggregateType,
|
|
6
|
+
)
|
|
7
|
+
from policyengine.outputs.decile_impact import (
|
|
8
|
+
DecileImpact,
|
|
9
|
+
calculate_decile_impacts,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"Output",
|
|
14
|
+
"OutputCollection",
|
|
15
|
+
"Aggregate",
|
|
16
|
+
"AggregateType",
|
|
17
|
+
"ChangeAggregate",
|
|
18
|
+
"ChangeAggregateType",
|
|
19
|
+
"DecileImpact",
|
|
20
|
+
"calculate_decile_impacts",
|
|
21
|
+
]
|