mergeron 2025.739290.6__tar.gz → 2025.739290.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

Files changed (25) hide show
  1. mergeron-2025.739290.9/PKG-INFO +178 -0
  2. mergeron-2025.739290.9/README.rst +136 -0
  3. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/pyproject.toml +1 -1
  4. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/__init__.py +1 -1
  5. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/__init__.py +30 -32
  6. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/empirical_margin_distribution.py +6 -8
  7. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/ftc_merger_investigations_data.py +9 -5
  8. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/guidelines_boundaries.py +11 -11
  9. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/guidelines_boundary_functions.py +115 -115
  10. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/guidelines_boundary_functions_extra.py +208 -1
  11. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/data/__init__.py +4 -7
  12. mergeron-2025.739290.9/src/mergeron/data/ftc_merger_investigations_data.zip +0 -0
  13. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/gen/__init__.py +29 -35
  14. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/gen/data_generation.py +3 -14
  15. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/gen/data_generation_functions.py +1 -1
  16. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/gen/enforcement_stats.py +22 -11
  17. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/gen/upp_tests.py +50 -144
  18. mergeron-2025.739290.6/PKG-INFO +0 -115
  19. mergeron-2025.739290.6/README.rst +0 -73
  20. mergeron-2025.739290.6/src/mergeron/data/ftc_merger_investigations_data.zip +0 -0
  21. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/core/pseudorandom_numbers.py +0 -0
  22. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/data/damodaran_margin_data.xls +0 -0
  23. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/demo/__init__.py +0 -0
  24. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/demo/visualize_empirical_margin_distribution.py +0 -0
  25. {mergeron-2025.739290.6 → mergeron-2025.739290.9}/src/mergeron/py.typed +0 -0
@@ -0,0 +1,178 @@
1
+ Metadata-Version: 2.3
2
+ Name: mergeron
3
+ Version: 2025.739290.9
4
+ Summary: Analyze merger enforcement policy using Python
5
+ License: MIT
6
+ Keywords: merger policy analysis,merger guidelines,merger screening,policy presumptions,concentration standards,upward pricing pressure,GUPPI
7
+ Author: Murthy Kambhampaty
8
+ Author-email: smk@capeconomics.com
9
+ Requires-Python: >=3.12,<4.0
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Environment :: Console
12
+ Classifier: Intended Audience :: End Users/Desktop
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Programming Language :: Python :: 3 :: Only
21
+ Classifier: Programming Language :: Python :: Implementation :: CPython
22
+ Requires-Dist: aenum (>=3.1.15,<4.0.0)
23
+ Requires-Dist: attrs (>=23.2)
24
+ Requires-Dist: bs4 (>=0.0.1)
25
+ Requires-Dist: certifi (>=2023.11.17)
26
+ Requires-Dist: h5py (>=3.13.0,<4.0.0)
27
+ Requires-Dist: jinja2 (>=3.1)
28
+ Requires-Dist: joblib (>=1.3)
29
+ Requires-Dist: matplotlib (>=3.8)
30
+ Requires-Dist: mpmath (>=1.3)
31
+ Requires-Dist: msgpack (>=1.0)
32
+ Requires-Dist: msgpack-numpy (>=0.4)
33
+ Requires-Dist: ruamel-yaml (>=0.18.10,<0.19.0)
34
+ Requires-Dist: scipy (>=1.12)
35
+ Requires-Dist: sympy (>=1.12)
36
+ Requires-Dist: types-beautifulsoup4 (>=4.11.2)
37
+ Requires-Dist: urllib3 (>=2.2.2,<3.0.0)
38
+ Requires-Dist: xlrd (>=2.0.1,<3.0.0)
39
+ Requires-Dist: xlsxwriter (>=3.1)
40
+ Description-Content-Type: text/x-rst
41
+
42
+ mergeron: Merger Policy Analysis with Python
43
+ ============================================
44
+
45
+ Usage
46
+ -----
47
+
48
+ *Visualizing Guidelines boundaries*
49
+
50
+ .. code:: python
51
+
52
+ %matplotlib inline
53
+ from mergeron.core import guidelines_boundaries as gbl
54
+ from mergeron.core import guidelines_boundary_functions as gbf
55
+ from math import sqrt
56
+
57
+ delta_bound = 0.01
58
+ conc_boundary = gbl.ConcentrationBoundary(delta_bound, "ΔHHI")
59
+ share_boundary = gbl.ConcentrationBoundary(2 * sqrt(delta_bound / 2), "Combined share")
60
+
61
+ divr_boundary_a = gbl.DiversionRatioBoundary(
62
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
63
+ agg_method=gbl.UPPAggrSelector.AVG
64
+ )
65
+
66
+ divr_boundary_i = gbl.DiversionRatioBoundary(
67
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
68
+ agg_method=gbl.UPPAggrSelector.MIN
69
+ )
70
+
71
+ divr_boundary_x = gbl.DiversionRatioBoundary(
72
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
73
+ agg_method=gbl.UPPAggrSelector.MAX
74
+ )
75
+
76
+
77
+ Plots are written to PDF, typically, with ``backend="pgf"`` as the
78
+ default backend in the function, ``gbf.boundary_plot``. Here, we set the
79
+ backend to ``None`` to skip fine-tuning plots for PDF generation.
80
+
81
+ .. code:: python
82
+
83
+ plt, fig, ax, layout_axis = gbf.boundary_plot(backend=None)
84
+
85
+ ax.set_title("Concentration and Diversion Ratio Boundaries")
86
+
87
+ ax.plot(conc_boundary.coordinates[:, 0], conc_boundary.coordinates[:, 1], color="black", linestyle="-", label="ΔHHI")
88
+ ax.plot(share_boundary.coordinates[:, 0], share_boundary.coordinates[:, 1], color="black", linestyle=":", label="Combined share")
89
+ ax.plot(divr_boundary_a.coordinates[:, 0], divr_boundary_a.coordinates[:, 1], "b-", label="Average Diversion Ratio")
90
+ ax.plot(divr_boundary_i.coordinates[:, 0], divr_boundary_i.coordinates[:, 1], "r-", label="Minimum Diversion Ratio")
91
+ ax.plot(divr_boundary_x.coordinates[:, 0], divr_boundary_x.coordinates[:, 1], "g-", label="Maximum Diversion Ratio")
92
+
93
+ _ = fig.legend(loc=(0.4, 0.7), frameon=False)
94
+
95
+
96
+
97
+ .. image:: ./docs/readme_content/output_5_0.png
98
+
99
+
100
+ *Analyzing FTC Merger Investigations Data*
101
+
102
+ .. code:: python
103
+
104
+ from mergeron.core import ftc_merger_investigations_data as fid
105
+ import tabulate
106
+
107
+ inv_data = fid.construct_data(fid.INVDATA_ARCHIVE_PATH)
108
+
109
+
110
+ We can now analyze counts of markets reported in the source data, by
111
+ table number. Note that odd-numbered tables report FTC investigations
112
+ data organized by HHI and ΔHHI, while even-numbered tables report by
113
+ firm-count.
114
+
115
+ .. code:: python
116
+
117
+ from mergeron.gen import enforcement_stats as esl
118
+
119
+ print("Enforcement Rates in Markets with Entry Barriers, 1996-2003 vs 2004-2011")
120
+ print()
121
+ counts_by_delta_1 = esl.enf_cnts_bydelta(
122
+ inv_data["1996-2003"]["ByHHIandDelta"]["Table 9.2"].data_array
123
+ )
124
+ counts_by_delta_2 = esl.enf_cnts_bydelta(
125
+ inv_data["2004-2011"]["ByHHIandDelta"]["Table 9.2"].data_array
126
+ )
127
+ observed_enforcement_rates = list(zip(
128
+ (
129
+ {_v: _k for _k, _v in fid.CONC_DELTA_DICT.items()}[i]
130
+ for i in counts_by_delta_1[:, 0]
131
+ ),
132
+ (
133
+ f"{_a[1] / _a[-1]: <12.2%}" if _a[-1] else "--"
134
+ for _a in counts_by_delta_1
135
+ ),
136
+ (
137
+ f"{_e[1] / _e[-1]: <12.2%}" if _e[-1] else "--"
138
+ for _e in counts_by_delta_2
139
+ ),
140
+ ))
141
+
142
+ observed_enforcement_rates.append([
143
+ "Total",
144
+ f"{counts_by_delta_1[:, 1].sum() / counts_by_delta_1[:, -1].sum(): <12.2%}",
145
+ f"{counts_by_delta_2[:, 1].sum() / counts_by_delta_2[:, -1].sum(): <12.2%}",
146
+ ])
147
+
148
+ print(tabulate.tabulate(
149
+ observed_enforcement_rates,
150
+ tablefmt="simple",
151
+ headers=("ΔHHI", "1996-2003", "2004-2011"),
152
+ stralign="center",
153
+ maxcolwidths=36,
154
+ maxheadercolwidths=36,
155
+ ))
156
+
157
+
158
+ .. parsed-literal::
159
+
160
+ Enforcement Rates in Markets with Entry Barriers, 1996-2003 vs 2004-2011
161
+
162
+ ΔHHI 1996-2003 2004-2011
163
+ ------------- ----------- -----------
164
+ 0 - 100 -- 100.00%
165
+ 100 - 200 33.33% 50.00%
166
+ 200 - 300 33.33% 50.00%
167
+ 300 - 500 75.00% 77.78%
168
+ 500 - 800 59.09% 54.55%
169
+ 800 - 1,200 93.33% 81.82%
170
+ 1,200 - 2,500 90.91% 84.38%
171
+ 2,500 + 96.00% 100.00%
172
+ Total 81.65% 82.86%
173
+
174
+
175
+ Generating synthetic market data and analyzing enforcement rates
176
+
177
+
178
+
@@ -0,0 +1,136 @@
1
+ mergeron: Merger Policy Analysis with Python
2
+ ============================================
3
+
4
+ Usage
5
+ -----
6
+
7
+ *Visualizing Guidelines boundaries*
8
+
9
+ .. code:: python
10
+
11
+ %matplotlib inline
12
+ from mergeron.core import guidelines_boundaries as gbl
13
+ from mergeron.core import guidelines_boundary_functions as gbf
14
+ from math import sqrt
15
+
16
+ delta_bound = 0.01
17
+ conc_boundary = gbl.ConcentrationBoundary(delta_bound, "ΔHHI")
18
+ share_boundary = gbl.ConcentrationBoundary(2 * sqrt(delta_bound / 2), "Combined share")
19
+
20
+ divr_boundary_a = gbl.DiversionRatioBoundary(
21
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
22
+ agg_method=gbl.UPPAggrSelector.AVG
23
+ )
24
+
25
+ divr_boundary_i = gbl.DiversionRatioBoundary(
26
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
27
+ agg_method=gbl.UPPAggrSelector.MIN
28
+ )
29
+
30
+ divr_boundary_x = gbl.DiversionRatioBoundary(
31
+ gbl.guppi_from_delta(delta_bound, m_star=1.0, r_bar=0.85),
32
+ agg_method=gbl.UPPAggrSelector.MAX
33
+ )
34
+
35
+
36
+ Plots are written to PDF, typically, with ``backend="pgf"`` as the
37
+ default backend in the function, ``gbf.boundary_plot``. Here, we set the
38
+ backend to ``None`` to skip fine-tuning plots for PDF generation.
39
+
40
+ .. code:: python
41
+
42
+ plt, fig, ax, layout_axis = gbf.boundary_plot(backend=None)
43
+
44
+ ax.set_title("Concentration and Diversion Ratio Boundaries")
45
+
46
+ ax.plot(conc_boundary.coordinates[:, 0], conc_boundary.coordinates[:, 1], color="black", linestyle="-", label="ΔHHI")
47
+ ax.plot(share_boundary.coordinates[:, 0], share_boundary.coordinates[:, 1], color="black", linestyle=":", label="Combined share")
48
+ ax.plot(divr_boundary_a.coordinates[:, 0], divr_boundary_a.coordinates[:, 1], "b-", label="Average Diversion Ratio")
49
+ ax.plot(divr_boundary_i.coordinates[:, 0], divr_boundary_i.coordinates[:, 1], "r-", label="Minimum Diversion Ratio")
50
+ ax.plot(divr_boundary_x.coordinates[:, 0], divr_boundary_x.coordinates[:, 1], "g-", label="Maximum Diversion Ratio")
51
+
52
+ _ = fig.legend(loc=(0.4, 0.7), frameon=False)
53
+
54
+
55
+
56
+ .. image:: ./docs/readme_content/output_5_0.png
57
+
58
+
59
+ *Analyzing FTC Merger Investigations Data*
60
+
61
+ .. code:: python
62
+
63
+ from mergeron.core import ftc_merger_investigations_data as fid
64
+ import tabulate
65
+
66
+ inv_data = fid.construct_data(fid.INVDATA_ARCHIVE_PATH)
67
+
68
+
69
+ We can now analyze counts of markets reported in the source data, by
70
+ table number. Note that odd-numbered tables report FTC investigations
71
+ data organized by HHI and ΔHHI, while even-numbered tables report by
72
+ firm-count.
73
+
74
+ .. code:: python
75
+
76
+ from mergeron.gen import enforcement_stats as esl
77
+
78
+ print("Enforcement Rates in Markets with Entry Barriers, 1996-2003 vs 2004-2011")
79
+ print()
80
+ counts_by_delta_1 = esl.enf_cnts_bydelta(
81
+ inv_data["1996-2003"]["ByHHIandDelta"]["Table 9.2"].data_array
82
+ )
83
+ counts_by_delta_2 = esl.enf_cnts_bydelta(
84
+ inv_data["2004-2011"]["ByHHIandDelta"]["Table 9.2"].data_array
85
+ )
86
+ observed_enforcement_rates = list(zip(
87
+ (
88
+ {_v: _k for _k, _v in fid.CONC_DELTA_DICT.items()}[i]
89
+ for i in counts_by_delta_1[:, 0]
90
+ ),
91
+ (
92
+ f"{_a[1] / _a[-1]: <12.2%}" if _a[-1] else "--"
93
+ for _a in counts_by_delta_1
94
+ ),
95
+ (
96
+ f"{_e[1] / _e[-1]: <12.2%}" if _e[-1] else "--"
97
+ for _e in counts_by_delta_2
98
+ ),
99
+ ))
100
+
101
+ observed_enforcement_rates.append([
102
+ "Total",
103
+ f"{counts_by_delta_1[:, 1].sum() / counts_by_delta_1[:, -1].sum(): <12.2%}",
104
+ f"{counts_by_delta_2[:, 1].sum() / counts_by_delta_2[:, -1].sum(): <12.2%}",
105
+ ])
106
+
107
+ print(tabulate.tabulate(
108
+ observed_enforcement_rates,
109
+ tablefmt="simple",
110
+ headers=("ΔHHI", "1996-2003", "2004-2011"),
111
+ stralign="center",
112
+ maxcolwidths=36,
113
+ maxheadercolwidths=36,
114
+ ))
115
+
116
+
117
+ .. parsed-literal::
118
+
119
+ Enforcement Rates in Markets with Entry Barriers, 1996-2003 vs 2004-2011
120
+
121
+ ΔHHI 1996-2003 2004-2011
122
+ ------------- ----------- -----------
123
+ 0 - 100 -- 100.00%
124
+ 100 - 200 33.33% 50.00%
125
+ 200 - 300 33.33% 50.00%
126
+ 300 - 500 75.00% 77.78%
127
+ 500 - 800 59.09% 54.55%
128
+ 800 - 1,200 93.33% 81.82%
129
+ 1,200 - 2,500 90.91% 84.38%
130
+ 2,500 + 96.00% 100.00%
131
+ Total 81.65% 82.86%
132
+
133
+
134
+ Generating synthetic market data and analyzing enforcement rates
135
+
136
+
@@ -13,7 +13,7 @@ keywords = [
13
13
  "upward pricing pressure",
14
14
  "GUPPI",
15
15
  ]
16
- version = "2025.739290.6"
16
+ version = "2025.739290.9"
17
17
 
18
18
  # Classifiers list: https://pypi.org/classifiers/
19
19
  classifiers = [
@@ -12,7 +12,7 @@ from ruamel import yaml
12
12
 
13
13
  _PKG_NAME: str = Path(__file__).parent.stem
14
14
 
15
- VERSION = "2025.739290.6"
15
+ VERSION = "2025.739290.9"
16
16
 
17
17
  __version__ = VERSION
18
18
 
@@ -9,7 +9,14 @@ import mpmath # type: ignore
9
9
  import numpy as np
10
10
  from attrs import cmp_using, field, frozen
11
11
 
12
- from .. import VERSION, ArrayBIGINT, this_yaml, yaml_rt_mapper # noqa: TID252
12
+ from .. import ( # noqa: TID252
13
+ VERSION,
14
+ ArrayBIGINT,
15
+ ArrayDouble,
16
+ this_yaml,
17
+ yamelize_attrs,
18
+ yaml_rt_mapper,
19
+ )
13
20
 
14
21
  __version__ = VERSION
15
22
 
@@ -17,6 +24,17 @@ type MPFloat = mpmath.ctx_mp_python.mpf
17
24
  type MPMatrix = mpmath.matrix # type: ignore
18
25
 
19
26
 
27
+ @frozen
28
+ class GuidelinesBoundary:
29
+ """Output of a Guidelines boundary function."""
30
+
31
+ coordinates: ArrayDouble
32
+ """Market-share pairs as Cartesian coordinates of points on the boundary."""
33
+
34
+ area: float
35
+ """Area under the boundary."""
36
+
37
+
20
38
  @frozen
21
39
  class INVTableData:
22
40
  industry_group: str
@@ -29,6 +47,7 @@ type INVData = MappingProxyType[
29
47
  ]
30
48
  type INVData_in = Mapping[str, Mapping[str, Mapping[str, INVTableData]]]
31
49
 
50
+ yamelize_attrs(INVTableData)
32
51
 
33
52
  (_, _) = (
34
53
  this_yaml.representer.add_representer(
@@ -59,6 +78,16 @@ type INVData_in = Mapping[str, Mapping[str, Mapping[str, INVTableData]]]
59
78
  ),
60
79
  )
61
80
 
81
+ _, _ = (
82
+ this_yaml.representer.add_representer(
83
+ MappingProxyType,
84
+ lambda _r, _d: _r.represent_mapping("!mappingproxy", dict(_d.items())),
85
+ ),
86
+ this_yaml.constructor.add_constructor(
87
+ "!mappingproxy", lambda _c, _n: MappingProxyType(dict(**yaml_rt_mapper(_c, _n)))
88
+ ),
89
+ )
90
+
62
91
 
63
92
  def _dict_from_mapping(_p: Mapping[Any, Any], /) -> dict[Any, Any]:
64
93
  retval: dict[Any, Any] = {}
@@ -76,34 +105,3 @@ def _mappingproxy_from_mapping(_p: Mapping[Any, Any], /) -> MappingProxyType[Any
76
105
  else {_k: _v}
77
106
  )
78
107
  return MappingProxyType(retval)
79
-
80
-
81
- _, _ = (
82
- this_yaml.representer.add_representer(
83
- MappingProxyType,
84
- lambda _r, _d: _r.represent_mapping("!mappingproxy", dict(_d.items())),
85
- ),
86
- this_yaml.constructor.add_constructor(
87
- "!mappingproxy", lambda _c, _n: MappingProxyType(yaml_rt_mapper(_c, _n))
88
- ),
89
- )
90
-
91
-
92
- for _typ in (INVTableData,):
93
- _, _ = (
94
- this_yaml.representer.add_representer(
95
- _typ,
96
- lambda _r, _d: _r.represent_mapping(
97
- f"!{_d.__class__.__name__}",
98
- {
99
- _a.name: getattr(_d, _a.name)
100
- for _a in _d.__attrs_attrs__
101
- if _a.name not in {"coordinates", "area"}
102
- },
103
- ),
104
- ),
105
- this_yaml.constructor.add_constructor(
106
- f"!{_typ.__name__}",
107
- lambda _c, _n: globals()[_n.tag.lstrip("!")](**yaml_rt_mapper(_c, _n)),
108
- ),
109
- )
@@ -238,7 +238,7 @@ def margin_data_resampler(
238
238
 
239
239
  """
240
240
 
241
- seed_sequence_ = seed_sequence or SeedSequence(pool_size=8)
241
+ _seed = seed_sequence or SeedSequence(pool_size=8)
242
242
 
243
243
  _x, _w, _ = margin_data_builder(margin_data_getter())
244
244
 
@@ -247,16 +247,14 @@ def margin_data_resampler(
247
247
 
248
248
  if isinstance(_sample_size, int):
249
249
  return np.array(
250
- margin_kde.resample(
251
- _sample_size, seed=Generator(PCG64DXSM(seed_sequence_))
252
- )[0]
250
+ margin_kde.resample(_sample_size, seed=Generator(PCG64DXSM(_seed)))[0]
253
251
  )
254
252
  elif isinstance(_sample_size, tuple) and len(_sample_size) == 2:
255
- ssz, num_cols = _sample_size
256
- ret_array = np.empty(_sample_size, np.float64)
257
- for idx, seed_seq in enumerate(seed_sequence_.spawn(num_cols)):
253
+ _ssz, _ncol = _sample_size
254
+ ret_array = np.empty(_sample_size, float)
255
+ for idx, _col_seed in enumerate(_seed.spawn(_ncol)):
258
256
  ret_array[:, idx] = margin_kde.resample(
259
- ssz, seed=Generator(PCG64DXSM(seed_seq))
257
+ _ssz, seed=Generator(PCG64DXSM(_col_seed))
260
258
  )[0]
261
259
  return ret_array
262
260
  else:
@@ -12,7 +12,7 @@ from __future__ import annotations
12
12
 
13
13
  import re
14
14
  import shutil
15
- from collections.abc import Sequence
15
+ from collections.abc import Mapping, Sequence
16
16
  from operator import itemgetter
17
17
  from pathlib import Path
18
18
  from types import MappingProxyType
@@ -94,6 +94,10 @@ CNT_FCOUNT_DICT = {
94
94
  }
95
95
 
96
96
 
97
+ def reverse_map(_dict: Mapping[Any, Any]) -> Mapping[Any, Any]:
98
+ return {_v: _k for _k, _v in _dict.items()}
99
+
100
+
97
101
  def construct_data(
98
102
  _archive_path: Path = INVDATA_ARCHIVE_PATH,
99
103
  *,
@@ -239,7 +243,7 @@ def _construct_no_evidence_data(_invdata: INVData_in, _data_period: str, /) -> N
239
243
  dtn: INVTableData(
240
244
  invdata_ind_grp,
241
245
  invdata_evid_cond,
242
- np.column_stack((
246
+ np.hstack((
243
247
  invdata_sub_evid_cond_conc[stn0].data_array[:, :2],
244
248
  (
245
249
  invdata_sub_evid_cond_conc[stn0].data_array[:, 2:]
@@ -373,10 +377,10 @@ def _construct_new_period_data(
373
377
  np.zeros_like(invdata_array_bld_enfcls),
374
378
  )).max(axis=0)
375
379
 
376
- invdata_array_bld = np.column_stack((
380
+ invdata_array_bld = np.hstack((
377
381
  invdata_cuml_array[:, :-3],
378
382
  invdata_array_bld_enfcls,
379
- np.einsum("ij->i", invdata_array_bld_enfcls),
383
+ np.einsum("ij->i", invdata_array_bld_enfcls)[:, None],
380
384
  ))
381
385
 
382
386
  data_typesubdict[table_no] = INVTableData(
@@ -397,7 +401,7 @@ def invdata_build_aggregate_table(
397
401
  return INVTableData(
398
402
  "Industries in Common",
399
403
  "Unrestricted on additional evidence",
400
- np.column_stack((
404
+ np.hstack((
401
405
  _data_typesub[hdr_table_no].data_array[:, :-3],
402
406
  np.einsum(
403
407
  "ijk->jk",
@@ -146,6 +146,15 @@ class GuidelinesThresholds:
146
146
  class ConcentrationBoundary:
147
147
  """Concentration parameters, boundary coordinates, and area under concentration boundary."""
148
148
 
149
+ threshold: float = field(kw_only=False, default=0.01)
150
+
151
+ @threshold.validator
152
+ def _tv(
153
+ _instance: ConcentrationBoundary, _attribute: Attribute[float], _value: float, /
154
+ ) -> None:
155
+ if not 0 <= _value <= 1:
156
+ raise ValueError("Concentration threshold must lie between 0 and 1.")
157
+
149
158
  measure_name: Literal[
150
159
  "ΔHHI",
151
160
  "Combined share",
@@ -165,17 +174,8 @@ class ConcentrationBoundary:
165
174
  }:
166
175
  raise ValueError(f"Invalid name for a concentration measure, {_value!r}.")
167
176
 
168
- threshold: float = field(kw_only=False, default=0.01)
169
-
170
- @threshold.validator
171
- def _tv(
172
- _instance: ConcentrationBoundary, _attribute: Attribute[float], _value: float, /
173
- ) -> None:
174
- if not 0 <= _value <= 1:
175
- raise ValueError("Concentration threshold must lie between 0 and 1.")
176
-
177
177
  precision: int = field(
178
- kw_only=False, default=5, validator=validators.instance_of(int)
178
+ kw_only=True, default=5, validator=validators.instance_of(int)
179
179
  )
180
180
 
181
181
  area: float = field(init=False, kw_only=True)
@@ -284,7 +284,7 @@ class DiversionRatioBoundary:
284
284
 
285
285
  agg_method: UPPAggrSelector = field(
286
286
  kw_only=True,
287
- default=UPPAggrSelector.MAX,
287
+ default=UPPAggrSelector.MIN,
288
288
  validator=validators.instance_of(UPPAggrSelector),
289
289
  )
290
290
  """