mergeron 2024.739139.0__tar.gz → 2024.739145.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mergeron might be problematic. Click here for more details.
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/PKG-INFO +3 -3
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/README.rst +2 -2
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/pyproject.toml +4 -4
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/__init__.py +4 -7
- mergeron-2024.739145.1/src/mergeron/core/__init__.py +8 -0
- mergeron-2024.739139.0/src/mergeron/core/damodaran_margin_data.py → mergeron-2024.739145.1/src/mergeron/core/empirical_margin_distribution.py +6 -3
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/ftc_merger_investigations_data.py +23 -20
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/guidelines_boundaries.py +69 -69
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/guidelines_boundary_functions.py +203 -61
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/guidelines_boundary_functions_extra.py +19 -25
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/pseudorandom_numbers.py +3 -3
- {mergeron-2024.739139.0/src/mergeron/core → mergeron-2024.739145.1/src/mergeron/data}/__init__.py +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/demo/visualize_empirical_margin_distribution.py +2 -3
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/gen/__init__.py +52 -33
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/gen/data_generation.py +13 -13
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/gen/data_generation_functions.py +63 -53
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/gen/upp_tests.py +6 -9
- mergeron-2024.739139.0/src/mergeron/demo/__init__.py +0 -3
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/License.txt +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/data/damodaran_margin_data.xls +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/data/damodaran_margin_data_dict.msgpack +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/data/ftc_invdata.msgpack +0 -0
- {mergeron-2024.739139.0/src/mergeron/data → mergeron-2024.739145.1/src/mergeron/demo}/__init__.py +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/gen/enforcement_stats.py +0 -0
- {mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: mergeron
|
|
3
|
-
Version: 2024.
|
|
3
|
+
Version: 2024.739145.1
|
|
4
4
|
Summary: Merger Policy Analysis using Python
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: merger policy analysis,merger guidelines,merger screening,policy presumptions,concentration standards,upward pricing pressure,GUPPI
|
|
@@ -101,7 +101,7 @@ FTC Premerger Notification Office. “To File or Not to File: When You Must File
|
|
|
101
101
|
|
|
102
102
|
.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
|
|
103
103
|
:alt: Ruff
|
|
104
|
-
:target: https://github.com/astral-sh/ruff
|
|
104
|
+
:target: https://github.com/astral-sh/ruff/
|
|
105
105
|
|
|
106
106
|
.. image:: https://www.mypy-lang.org/static/mypy_badge.svg
|
|
107
107
|
:alt: Checked with mypy
|
|
@@ -109,6 +109,6 @@ FTC Premerger Notification Office. “To File or Not to File: When You Must File
|
|
|
109
109
|
|
|
110
110
|
.. image:: https://img.shields.io/badge/License-MIT-yellow.svg
|
|
111
111
|
:alt: License: MIT
|
|
112
|
-
:target: https://opensource.org/licenses/MIT
|
|
112
|
+
:target: https://opensource.org/licenses/MIT/
|
|
113
113
|
|
|
114
114
|
|
|
@@ -61,7 +61,7 @@ FTC Premerger Notification Office. “To File or Not to File: When You Must File
|
|
|
61
61
|
|
|
62
62
|
.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
|
|
63
63
|
:alt: Ruff
|
|
64
|
-
:target: https://github.com/astral-sh/ruff
|
|
64
|
+
:target: https://github.com/astral-sh/ruff/
|
|
65
65
|
|
|
66
66
|
.. image:: https://www.mypy-lang.org/static/mypy_badge.svg
|
|
67
67
|
:alt: Checked with mypy
|
|
@@ -69,5 +69,5 @@ FTC Premerger Notification Office. “To File or Not to File: When You Must File
|
|
|
69
69
|
|
|
70
70
|
.. image:: https://img.shields.io/badge/License-MIT-yellow.svg
|
|
71
71
|
:alt: License: MIT
|
|
72
|
-
:target: https://opensource.org/licenses/MIT
|
|
72
|
+
:target: https://opensource.org/licenses/MIT/
|
|
73
73
|
|
|
@@ -13,7 +13,7 @@ keywords = [
|
|
|
13
13
|
"upward pricing pressure",
|
|
14
14
|
"GUPPI",
|
|
15
15
|
]
|
|
16
|
-
version = "2024.
|
|
16
|
+
version = "2024.739145.1"
|
|
17
17
|
|
|
18
18
|
# Classifiers list: https://pypi.org/classifiers/
|
|
19
19
|
classifiers = [
|
|
@@ -74,6 +74,7 @@ sphinx-autoapi = ">=3.0"
|
|
|
74
74
|
sphinx-immaterial = ">=0.11"
|
|
75
75
|
pipdeptree = ">=2.15.1"
|
|
76
76
|
types-openpyxl = ">=3.0.0"
|
|
77
|
+
pyright = "^1.1.380"
|
|
77
78
|
|
|
78
79
|
[tool.ruff]
|
|
79
80
|
|
|
@@ -168,6 +169,7 @@ preview = true
|
|
|
168
169
|
python_version = "3.12"
|
|
169
170
|
ignore_missing_imports = false
|
|
170
171
|
strict = true
|
|
172
|
+
enable_incomplete_feature = ["NewGenericSyntax", "PreciseTupleTypes"]
|
|
171
173
|
|
|
172
174
|
show_column_numbers = true
|
|
173
175
|
show_error_codes = true
|
|
@@ -180,8 +182,6 @@ allow_redefinition = true
|
|
|
180
182
|
|
|
181
183
|
plugins = "numpy.typing.mypy_plugin"
|
|
182
184
|
|
|
183
|
-
enable_incomplete_feature = "NewGenericSyntax"
|
|
184
|
-
|
|
185
185
|
[tool.pytest.ini_options]
|
|
186
186
|
log_auto_indent = 4
|
|
187
187
|
minversion = "8.0"
|
|
@@ -193,7 +193,7 @@ filterwarnings = [
|
|
|
193
193
|
"ignore::DeprecationWarning:jinja2.lexer",
|
|
194
194
|
"ignore::DeprecationWarning:joblib._utils",
|
|
195
195
|
"ignore::DeprecationWarning:openpyxl.packaging.core",
|
|
196
|
-
"ignore::RuntimeWarning:mergeron.gen.enforcement_stats",
|
|
197
196
|
"ignore::RuntimeWarning:mergeron.core.proportions_tests",
|
|
197
|
+
"ignore::RuntimeWarning:mergeron.gen.enforcement_stats",
|
|
198
198
|
]
|
|
199
199
|
tmp_path_retention_policy = "failed"
|
|
@@ -8,7 +8,7 @@ from numpy.typing import NDArray
|
|
|
8
8
|
|
|
9
9
|
_PKG_NAME: str = Path(__file__).parent.stem
|
|
10
10
|
|
|
11
|
-
VERSION = "2024.
|
|
11
|
+
VERSION = "2024.739145.1"
|
|
12
12
|
|
|
13
13
|
__version__ = VERSION
|
|
14
14
|
|
|
@@ -21,14 +21,11 @@ If the subdirectory doesn't exist, it is created on package invocation.
|
|
|
21
21
|
if not DATA_DIR.is_dir():
|
|
22
22
|
DATA_DIR.mkdir(parents=False)
|
|
23
23
|
|
|
24
|
-
np.set_printoptions(precision=
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
type ArrayINT = NDArray[np.intp]
|
|
28
|
-
type ArrayFloat = NDArray[np.half | np.single | np.double]
|
|
29
|
-
|
|
24
|
+
np.set_printoptions(precision=24, floatmode="fixed")
|
|
30
25
|
|
|
31
26
|
type ArrayBoolean = NDArray[np.bool_]
|
|
27
|
+
type ArrayFloat = NDArray[np.half | np.single | np.double]
|
|
28
|
+
type ArrayINT = NDArray[np.intp]
|
|
32
29
|
|
|
33
30
|
type ArrayDouble = NDArray[np.double]
|
|
34
31
|
type ArrayBIGINT = NDArray[np.int64]
|
|
@@ -2,6 +2,9 @@
|
|
|
2
2
|
Functions to parse margin data compiled by
|
|
3
3
|
Prof. Aswath Damodaran, Stern School of Business, NYU.
|
|
4
4
|
|
|
5
|
+
Provides :func:`mgn_data_resampler` for generating margin data
|
|
6
|
+
from an estimated Gaussian KDE from the source (margin) data.
|
|
7
|
+
|
|
5
8
|
Data are downloaded or reused from a local copy, on demand.
|
|
6
9
|
|
|
7
10
|
For terms of use of Prof. Damodaran's data, please see:
|
|
@@ -103,7 +106,7 @@ def mgn_data_getter( # noqa: PLR0912
|
|
|
103
106
|
if not _mgn_path.is_file():
|
|
104
107
|
with resources.as_file(
|
|
105
108
|
resources.files(f"{_PKG_NAME}.data").joinpath(
|
|
106
|
-
"
|
|
109
|
+
"empirical_margin_distribution.xls"
|
|
107
110
|
)
|
|
108
111
|
) as _mgn_data_archive_path:
|
|
109
112
|
shutil.copy2(_mgn_data_archive_path, _mgn_path)
|
|
@@ -129,7 +132,7 @@ def mgn_data_getter( # noqa: PLR0912
|
|
|
129
132
|
_xl_row[1] = int(_xl_row[1])
|
|
130
133
|
_mgn_dict[_xl_row[0]] = dict(zip(_mgn_row_keys[1:], _xl_row[1:], strict=True))
|
|
131
134
|
|
|
132
|
-
_ = _data_archive_path.write_bytes(msgpack.packb(_mgn_dict))
|
|
135
|
+
_ = _data_archive_path.write_bytes(msgpack.packb(_mgn_dict)) # pyright: ignore
|
|
133
136
|
|
|
134
137
|
return MappingProxyType(_mgn_dict)
|
|
135
138
|
|
|
@@ -218,7 +221,7 @@ def mgn_data_resampler(
|
|
|
218
221
|
_x, _w, _ = mgn_data_builder(mgn_data_getter())
|
|
219
222
|
|
|
220
223
|
_mgn_kde = stats.gaussian_kde(_x, weights=_w, bw_method="silverman")
|
|
221
|
-
_mgn_kde.set_bandwidth(bw_method=_mgn_kde.factor / 3.0)
|
|
224
|
+
_mgn_kde.set_bandwidth(bw_method=_mgn_kde.factor / 3.0) # pyright: ignore
|
|
222
225
|
|
|
223
226
|
if isinstance(_sample_size, int):
|
|
224
227
|
return np.array(
|
|
@@ -96,10 +96,11 @@ class INVTableData(NamedTuple):
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
type INVData = Mapping[str, Mapping[str, Mapping[str, INVTableData]]]
|
|
99
|
+
type _INVData_in = dict[str, dict[str, dict[str, INVTableData]]]
|
|
99
100
|
|
|
100
101
|
|
|
101
102
|
def construct_data(
|
|
102
|
-
_archive_path: Path
|
|
103
|
+
_archive_path: Path = INVDATA_ARCHIVE_PATH,
|
|
103
104
|
*,
|
|
104
105
|
flag_backward_compatibility: bool = True,
|
|
105
106
|
flag_pharma_for_exclusion: bool = True,
|
|
@@ -134,11 +135,11 @@ def construct_data(
|
|
|
134
135
|
A dictionary of merger investigations data keyed to reporting periods
|
|
135
136
|
|
|
136
137
|
"""
|
|
137
|
-
|
|
138
|
+
|
|
138
139
|
if _archive_path.is_file() and not rebuild_data:
|
|
139
140
|
_archived_data = msgpack.unpackb(_archive_path.read_bytes(), use_list=False)
|
|
140
141
|
|
|
141
|
-
_invdata:
|
|
142
|
+
_invdata: _INVData_in = {}
|
|
142
143
|
for _period in _archived_data:
|
|
143
144
|
_invdata[_period] = {}
|
|
144
145
|
for _table_type in _archived_data[_period]:
|
|
@@ -149,7 +150,7 @@ def construct_data(
|
|
|
149
150
|
)
|
|
150
151
|
return MappingProxyType(_invdata)
|
|
151
152
|
|
|
152
|
-
_invdata = dict(_parse_invdata()) # Convert immutable to mutable
|
|
153
|
+
_invdata = dict(_parse_invdata()) # type: ignore # Convert immutable to mutable
|
|
153
154
|
|
|
154
155
|
# Add some data periods (
|
|
155
156
|
# only periods ending in 2011, others have few observations and
|
|
@@ -197,12 +198,12 @@ def construct_data(
|
|
|
197
198
|
)
|
|
198
199
|
}
|
|
199
200
|
|
|
200
|
-
_ = INVDATA_ARCHIVE_PATH.write_bytes(msgpack.packb(_invdata))
|
|
201
|
+
_ = INVDATA_ARCHIVE_PATH.write_bytes(msgpack.packb(_invdata)) # pyright: ignore
|
|
201
202
|
|
|
202
203
|
return MappingProxyType(_invdata)
|
|
203
204
|
|
|
204
205
|
|
|
205
|
-
def _construct_no_evidence_data(_invdata:
|
|
206
|
+
def _construct_no_evidence_data(_invdata: _INVData_in, _data_period: str, /) -> None:
|
|
206
207
|
_invdata_ind_grp = "All Markets"
|
|
207
208
|
_table_nos_map = dict(
|
|
208
209
|
zip(
|
|
@@ -231,18 +232,20 @@ def _construct_no_evidence_data(_invdata: INVData, _data_period: str, /) -> None
|
|
|
231
232
|
_stn0 = "Table 4.1" if _stats_grp == "ByFirmCount" else "Table 3.1"
|
|
232
233
|
_stn1, _stn2 = (_dtn.replace(".X", f".{_i}") for _i in ("1", "2"))
|
|
233
234
|
|
|
234
|
-
_invdata_sub_evid_cond_conc
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
235
|
+
_invdata_sub_evid_cond_conc |= {
|
|
236
|
+
_dtn: INVTableData(
|
|
237
|
+
_invdata_ind_grp,
|
|
238
|
+
_invdata_evid_cond,
|
|
239
|
+
np.column_stack((
|
|
240
|
+
_invdata_sub_evid_cond_conc[_stn0].data_array[:, :2],
|
|
241
|
+
(
|
|
242
|
+
_invdata_sub_evid_cond_conc[_stn0].data_array[:, 2:]
|
|
243
|
+
- _invdata_sub_evid_cond_conc[_stn1].data_array[:, 2:]
|
|
244
|
+
- _invdata_sub_evid_cond_conc[_stn2].data_array[:, 2:]
|
|
245
|
+
),
|
|
246
|
+
)),
|
|
247
|
+
)
|
|
248
|
+
}
|
|
246
249
|
|
|
247
250
|
|
|
248
251
|
def _construct_new_period_data(
|
|
@@ -494,7 +497,7 @@ def _parse_invdata() -> INVData:
|
|
|
494
497
|
|
|
495
498
|
|
|
496
499
|
def _parse_page_blocks(
|
|
497
|
-
_invdata:
|
|
500
|
+
_invdata: _INVData_in, _data_period: str, _doc_pg_blocks: Sequence[Sequence[Any]], /
|
|
498
501
|
) -> None:
|
|
499
502
|
if _data_period != "1996-2011":
|
|
500
503
|
_parse_table_blocks(_invdata, _data_period, _doc_pg_blocks)
|
|
@@ -521,7 +524,7 @@ def _parse_page_blocks(
|
|
|
521
524
|
|
|
522
525
|
|
|
523
526
|
def _parse_table_blocks(
|
|
524
|
-
_invdata:
|
|
527
|
+
_invdata: _INVData_in, _data_period: str, _table_blocks: Sequence[Sequence[str]], /
|
|
525
528
|
) -> None:
|
|
526
529
|
_invdata_evid_cond = "Unrestricted on additional evidence"
|
|
527
530
|
_table_num, _table_ser, _table_type = _identify_table_type(
|
{mergeron-2024.739139.0 → mergeron-2024.739145.1}/src/mergeron/core/guidelines_boundaries.py
RENAMED
|
@@ -20,12 +20,13 @@ from .. import ( # noqa: TID252
|
|
|
20
20
|
RECForm,
|
|
21
21
|
UPPAggrSelector,
|
|
22
22
|
)
|
|
23
|
+
from . import MPFloat
|
|
23
24
|
from . import guidelines_boundary_functions as gbfn
|
|
24
25
|
|
|
25
26
|
__version__ = VERSION
|
|
26
27
|
|
|
27
28
|
|
|
28
|
-
mp.
|
|
29
|
+
mp.dps = 32
|
|
29
30
|
mp.trap_complex = True
|
|
30
31
|
|
|
31
32
|
type HMGPubYear = Literal[1992, 2004, 2010, 2023]
|
|
@@ -162,38 +163,40 @@ class GuidelinesThresholds:
|
|
|
162
163
|
)
|
|
163
164
|
|
|
164
165
|
|
|
165
|
-
def _concentration_threshold_validator(
|
|
166
|
-
_instance: ConcentrationBoundary, _attribute: Attribute[float], _value: float, /
|
|
167
|
-
) -> None:
|
|
168
|
-
if not 0 <= _value <= 1:
|
|
169
|
-
raise ValueError("Concentration threshold must lie between 0 and 1.")
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
def _concentration_measure_name_validator(
|
|
173
|
-
_instance: ConcentrationBoundary, _attribute: Attribute[str], _value: str, /
|
|
174
|
-
) -> None:
|
|
175
|
-
if _value not in ("ΔHHI", "Combined share", "Pre-merger HHI", "Post-merger HHI"):
|
|
176
|
-
raise ValueError(f"Invalid name for a concentration measure, {_value!r}.")
|
|
177
|
-
|
|
178
|
-
|
|
179
166
|
@frozen
|
|
180
167
|
class ConcentrationBoundary:
|
|
181
168
|
"""Concentration parameters, boundary coordinates, and area under concentration boundary."""
|
|
182
169
|
|
|
183
|
-
threshold: float = field(
|
|
184
|
-
kw_only=False,
|
|
185
|
-
default=0.01,
|
|
186
|
-
validator=(validators.instance_of(float), _concentration_threshold_validator),
|
|
187
|
-
)
|
|
188
|
-
precision: int = field(
|
|
189
|
-
kw_only=False, default=5, validator=validators.instance_of(int)
|
|
190
|
-
)
|
|
191
170
|
measure_name: Literal[
|
|
192
171
|
"ΔHHI", "Combined share", "Pre-merger HHI", "Post-merger HHI"
|
|
193
172
|
] = field(
|
|
194
173
|
kw_only=False,
|
|
195
|
-
default="ΔHHI",
|
|
196
|
-
|
|
174
|
+
default="ΔHHI", # pyright: ignore
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
@measure_name.validator # pyright: ignore
|
|
178
|
+
def __mnv(
|
|
179
|
+
_instance: ConcentrationBoundary, _attribute: Attribute[str], _value: str, /
|
|
180
|
+
) -> None:
|
|
181
|
+
if _value not in (
|
|
182
|
+
"ΔHHI",
|
|
183
|
+
"Combined share",
|
|
184
|
+
"Pre-merger HHI",
|
|
185
|
+
"Post-merger HHI",
|
|
186
|
+
):
|
|
187
|
+
raise ValueError(f"Invalid name for a concentration measure, {_value!r}.")
|
|
188
|
+
|
|
189
|
+
threshold: float = field(kw_only=False, default=0.01)
|
|
190
|
+
|
|
191
|
+
@threshold.validator # pyright: ignore
|
|
192
|
+
def __tv(
|
|
193
|
+
_instance: ConcentrationBoundary, _attribute: Attribute[float], _value: float, /
|
|
194
|
+
) -> None:
|
|
195
|
+
if not 0 <= _value <= 1:
|
|
196
|
+
raise ValueError("Concentration threshold must lie between 0 and 1.") # pyright: ignore
|
|
197
|
+
|
|
198
|
+
precision: int = field(
|
|
199
|
+
kw_only=False, default=5, validator=validators.instance_of(int)
|
|
197
200
|
)
|
|
198
201
|
|
|
199
202
|
coordinates: ArrayDouble = field(init=False, kw_only=True)
|
|
@@ -213,39 +216,11 @@ class ConcentrationBoundary:
|
|
|
213
216
|
case "Post-merger HHI":
|
|
214
217
|
_conc_fn = gbfn.hhi_post_contrib_boundary
|
|
215
218
|
|
|
216
|
-
_boundary = _conc_fn(self.threshold,
|
|
219
|
+
_boundary = _conc_fn(self.threshold, dps=self.precision)
|
|
217
220
|
object.__setattr__(self, "coordinates", _boundary.coordinates)
|
|
218
221
|
object.__setattr__(self, "area", _boundary.area)
|
|
219
222
|
|
|
220
223
|
|
|
221
|
-
def _divr_value_validator(
|
|
222
|
-
_instance: DiversionRatioBoundary, _attribute: Attribute[float], _value: float, /
|
|
223
|
-
) -> None:
|
|
224
|
-
if not 0 <= _value <= 1:
|
|
225
|
-
raise ValueError(
|
|
226
|
-
"Margin-adjusted benchmark share ratio must lie between 0 and 1."
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
def _rec_spec_validator(
|
|
231
|
-
_instance: DiversionRatioBoundary,
|
|
232
|
-
_attribute: Attribute[RECForm],
|
|
233
|
-
_value: RECForm,
|
|
234
|
-
/,
|
|
235
|
-
) -> None:
|
|
236
|
-
if _value == RECForm.OUTIN and _instance.recapture_ratio:
|
|
237
|
-
raise ValueError(
|
|
238
|
-
f"Invalid recapture specification, {_value!r}. "
|
|
239
|
-
"You may consider specifying `mergeron.RECForm.INOUT` here, and "
|
|
240
|
-
'assigning the default recapture ratio as attribute, "recapture_ratio" of '
|
|
241
|
-
"this `DiversionRatioBoundarySpec` object."
|
|
242
|
-
)
|
|
243
|
-
if _value is None and _instance.agg_method != UPPAggrSelector.MAX:
|
|
244
|
-
raise ValueError(
|
|
245
|
-
f"Specified aggregation method, {_instance.agg_method} requires a recapture specification."
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
|
|
249
224
|
@frozen
|
|
250
225
|
class DiversionRatioBoundary:
|
|
251
226
|
"""
|
|
@@ -261,21 +236,25 @@ class DiversionRatioBoundary:
|
|
|
261
236
|
|
|
262
237
|
"""
|
|
263
238
|
|
|
264
|
-
diversion_ratio: float = field(
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
239
|
+
diversion_ratio: float = field(kw_only=False, default=0.065)
|
|
240
|
+
|
|
241
|
+
@diversion_ratio.validator # pyright: ignore
|
|
242
|
+
def __dvv(
|
|
243
|
+
_instance: DiversionRatioBoundary,
|
|
244
|
+
_attribute: Attribute[float],
|
|
245
|
+
_value: float,
|
|
246
|
+
/,
|
|
247
|
+
) -> None:
|
|
248
|
+
if not 0 <= _value <= 1:
|
|
249
|
+
raise ValueError(
|
|
250
|
+
"Margin-adjusted benchmark share ratio must lie between 0 and 1."
|
|
251
|
+
)
|
|
269
252
|
|
|
270
253
|
recapture_ratio: float = field(
|
|
271
254
|
kw_only=False, default=DEFAULT_REC_RATE, validator=validators.instance_of(float)
|
|
272
255
|
)
|
|
273
256
|
|
|
274
|
-
recapture_form: RECForm | None = field(
|
|
275
|
-
kw_only=True,
|
|
276
|
-
default=RECForm.INOUT,
|
|
277
|
-
validator=(validators.instance_of((type(None), RECForm)), _rec_spec_validator),
|
|
278
|
-
)
|
|
257
|
+
recapture_form: RECForm | None = field(kw_only=True, default=RECForm.INOUT)
|
|
279
258
|
"""
|
|
280
259
|
The form of the recapture ratio.
|
|
281
260
|
|
|
@@ -296,6 +275,27 @@ class DiversionRatioBoundary:
|
|
|
296
275
|
|
|
297
276
|
"""
|
|
298
277
|
|
|
278
|
+
@recapture_form.validator # pyright: ignore
|
|
279
|
+
def __rsv(
|
|
280
|
+
_instance: DiversionRatioBoundary,
|
|
281
|
+
_attribute: Attribute[RECForm],
|
|
282
|
+
_value: RECForm,
|
|
283
|
+
/,
|
|
284
|
+
) -> None:
|
|
285
|
+
if _value and not (isinstance(_value, RECForm)):
|
|
286
|
+
raise ValueError(f"Invalid recapture specification, {_value!r}.")
|
|
287
|
+
if _value == RECForm.OUTIN and _instance.recapture_ratio:
|
|
288
|
+
raise ValueError(
|
|
289
|
+
f"Invalid recapture specification, {_value!r}. "
|
|
290
|
+
"You may consider specifying `mergeron.RECForm.INOUT` here, and "
|
|
291
|
+
'assigning the default recapture ratio as attribute, "recapture_ratio" of '
|
|
292
|
+
"this `DiversionRatioBoundarySpec` object."
|
|
293
|
+
)
|
|
294
|
+
if _value is None and _instance.agg_method != UPPAggrSelector.MAX:
|
|
295
|
+
raise ValueError(
|
|
296
|
+
f"Specified aggregation method, {_instance.agg_method} requires a recapture specification."
|
|
297
|
+
)
|
|
298
|
+
|
|
299
299
|
agg_method: UPPAggrSelector = field(
|
|
300
300
|
kw_only=True,
|
|
301
301
|
default=UPPAggrSelector.MAX,
|
|
@@ -335,7 +335,7 @@ class DiversionRatioBoundary:
|
|
|
335
335
|
)
|
|
336
336
|
_upp_agg_kwargs: gbfn.ShareRatioBoundaryKeywords = {
|
|
337
337
|
"recapture_form": getattr(self.recapture_form, "value", "inside-out"),
|
|
338
|
-
"
|
|
338
|
+
"dps": self.precision,
|
|
339
339
|
}
|
|
340
340
|
match self.agg_method:
|
|
341
341
|
case UPPAggrSelector.DIS:
|
|
@@ -345,10 +345,10 @@ class DiversionRatioBoundary:
|
|
|
345
345
|
_upp_agg_fn = gbfn.shrratio_boundary_xact_avg # type: ignore
|
|
346
346
|
case UPPAggrSelector.MAX:
|
|
347
347
|
_upp_agg_fn = gbfn.shrratio_boundary_max # type: ignore
|
|
348
|
-
_upp_agg_kwargs = {"
|
|
348
|
+
_upp_agg_kwargs = {"dps": 10} # replace here
|
|
349
349
|
case UPPAggrSelector.MIN:
|
|
350
350
|
_upp_agg_fn = gbfn.shrratio_boundary_min # type: ignore
|
|
351
|
-
_upp_agg_kwargs |= {"
|
|
351
|
+
_upp_agg_kwargs |= {"dps": 10} # update here
|
|
352
352
|
case _:
|
|
353
353
|
_upp_agg_fn = gbfn.shrratio_boundary_wtd_avg
|
|
354
354
|
|
|
@@ -370,7 +370,7 @@ class DiversionRatioBoundary:
|
|
|
370
370
|
|
|
371
371
|
_upp_agg_kwargs |= {"agg_method": _aggregator, "weighting": _wgt_type}
|
|
372
372
|
|
|
373
|
-
_boundary = _upp_agg_fn(_share_ratio, self.recapture_ratio, **_upp_agg_kwargs)
|
|
373
|
+
_boundary = _upp_agg_fn(_share_ratio, self.recapture_ratio, **_upp_agg_kwargs) # pyright: ignore # TypedDict redefinition
|
|
374
374
|
object.__setattr__(self, "coordinates", _boundary.coordinates)
|
|
375
375
|
object.__setattr__(self, "area", _boundary.area)
|
|
376
376
|
|
|
@@ -413,7 +413,7 @@ def critical_share_ratio(
|
|
|
413
413
|
m_star: float = 1.00,
|
|
414
414
|
r_bar: float = 1.00,
|
|
415
415
|
frac: float = 1e-16,
|
|
416
|
-
) ->
|
|
416
|
+
) -> MPFloat:
|
|
417
417
|
"""
|
|
418
418
|
Corollary to GUPPI bound.
|
|
419
419
|
|