mergeron 2024.739125.2__tar.gz → 2024.739127.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

Files changed (33) hide show
  1. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/PKG-INFO +1 -1
  2. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/pyproject.toml +1 -1
  3. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/__init__.py +1 -1
  4. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/gen/data_generation.py +21 -21
  5. mergeron-2024.739127.0/src/mergeron/gen/enforcement_stats.py +355 -0
  6. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/gen/upp_tests.py +16 -17
  7. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/clrrate_cis_summary_table_template.tex.jinja2 +0 -121
  8. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/ftcinvdata_byhhianddelta_table_template.tex.jinja2 +0 -82
  9. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/ftcinvdata_summary_table_template.tex.jinja2 +0 -57
  10. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/ftcinvdata_summarypaired_table_template_tabularray.tex.jinja2 +0 -81
  11. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/ftcinvdata_summarypaired_table_template_tikz.tex.jinja2 +0 -142
  12. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/mergeron.cls +0 -155
  13. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/mergeron_table_collection_template.tex.jinja2 +0 -93
  14. mergeron-2024.739125.2/src/mergeron/data/jinja2_LaTeX_templates/setup_tikz_tables.sty +0 -129
  15. mergeron-2024.739125.2/src/mergeron/gen/enforcement_stats.py +0 -905
  16. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/README.rst +0 -0
  17. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/License.txt +0 -0
  18. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/__init__.py +0 -0
  19. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/damodaran_margin_data.py +0 -0
  20. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/ftc_merger_investigations_data.py +0 -0
  21. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/guidelines_boundaries.py +0 -0
  22. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/guidelines_boundary_functions.py +0 -0
  23. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/guidelines_boundary_functions_extra.py +0 -0
  24. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/core/pseudorandom_numbers.py +0 -0
  25. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/data/__init__.py +0 -0
  26. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/data/damodaran_margin_data.xls +0 -0
  27. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/data/damodaran_margin_data_dict.msgpack +0 -0
  28. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/data/ftc_invdata.msgpack +0 -0
  29. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/demo/__init__.py +0 -0
  30. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/demo/visualize_empirical_margin_distribution.py +0 -0
  31. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/gen/__init__.py +0 -0
  32. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/gen/data_generation_functions.py +0 -0
  33. {mergeron-2024.739125.2 → mergeron-2024.739127.0}/src/mergeron/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mergeron
3
- Version: 2024.739125.2
3
+ Version: 2024.739127.0
4
4
  Summary: Merger Policy Analysis using Python
5
5
  License: MIT
6
6
  Keywords: merger policy analysis,merger guidelines,merger screening,policy presumptions,concentration standards,upward pricing pressure,GUPPI
@@ -13,7 +13,7 @@ keywords = [
13
13
  "upward pricing pressure",
14
14
  "GUPPI",
15
15
  ]
16
- version = "2024.739125.2"
16
+ version = "2024.739127.0"
17
17
 
18
18
  # Classifiers list: https://pypi.org/classifiers/
19
19
  classifiers = [
@@ -9,7 +9,7 @@ from numpy.typing import NDArray
9
9
 
10
10
  _PKG_NAME: str = Path(__file__).parent.stem
11
11
 
12
- VERSION = "2024.739125.2"
12
+ VERSION = "2024.739127.0"
13
13
 
14
14
  __version__ = VERSION
15
15
 
@@ -34,7 +34,7 @@ from .data_generation_functions import (
34
34
  gen_share_data,
35
35
  parse_seed_seq_list,
36
36
  )
37
- from .upp_tests import SaveData, enf_cnts, save_data_to_hdf5
37
+ from .upp_tests import SaveData, compute_upp_test_counts, save_data_to_hdf5
38
38
 
39
39
  __version__ = VERSION
40
40
 
@@ -113,15 +113,13 @@ class MarketSample:
113
113
 
114
114
  enf_counts: UPPTestsCounts = field(default=None)
115
115
 
116
- def gen_market_sample(
116
+ def _gen_market_sample(
117
117
  self,
118
118
  /,
119
119
  *,
120
- sample_size: int = 10**6,
121
- seed_seq_list: Sequence[SeedSequence] | None = None,
122
- nthreads: int = 16,
123
- save_data_to_file: SaveData = False,
124
- saved_array_name_suffix: str = "",
120
+ sample_size: int,
121
+ seed_seq_list: Sequence[SeedSequence] | None,
122
+ nthreads: int,
125
123
  ) -> MarketDataSample:
126
124
  """
127
125
  Generate share, diversion ratio, price, and margin data for MarketSpec.
@@ -242,11 +240,11 @@ class MarketSample:
242
240
  self,
243
241
  /,
244
242
  *,
245
- sample_size: int,
246
- seed_seq_list: Sequence[SeedSequence],
247
- nthreads: int,
248
- save_data_to_file: SaveData,
249
- saved_array_name_suffix: str,
243
+ sample_size: int = 10**6,
244
+ seed_seq_list: Sequence[SeedSequence] | None = None,
245
+ nthreads: int = 16,
246
+ save_data_to_file: SaveData = False,
247
+ saved_array_name_suffix: str = "",
250
248
  ) -> None:
251
249
  """Populate :attr:`data` with generated data
252
250
 
@@ -258,7 +256,7 @@ class MarketSample:
258
256
 
259
257
  """
260
258
 
261
- self.data = self.gen_market_sample(
259
+ self.data = self._gen_market_sample(
262
260
  sample_size=sample_size, seed_seq_list=seed_seq_list, nthreads=nthreads
263
261
  )
264
262
 
@@ -275,7 +273,7 @@ class MarketSample:
275
273
  save_data_to_file=save_data_to_file,
276
274
  )
277
275
 
278
- def sim_enf_cnts(
276
+ def _sim_enf_cnts(
279
277
  self,
280
278
  _upp_test_parms: gbl.HMGThresholds,
281
279
  _sim_test_regime: UPPTestRegime,
@@ -322,7 +320,7 @@ class MarketSample:
322
320
 
323
321
  """
324
322
 
325
- _market_data_sample = self.gen_market_sample(
323
+ _market_data_sample = self._gen_market_sample(
326
324
  sample_size=sample_size, seed_seq_list=seed_seq_list, nthreads=nthreads
327
325
  )
328
326
 
@@ -339,7 +337,7 @@ class MarketSample:
339
337
  save_data_to_file=save_data_to_file,
340
338
  )
341
339
 
342
- _upp_test_arrays = enf_cnts(
340
+ _upp_test_arrays = compute_upp_test_counts(
343
341
  _market_data_sample, _upp_test_parms, _sim_test_regime
344
342
  )
345
343
 
@@ -351,7 +349,7 @@ class MarketSample:
351
349
 
352
350
  return _upp_test_arrays
353
351
 
354
- def sim_enf_cnts_ll(
352
+ def _sim_enf_cnts_ll(
355
353
  self,
356
354
  _enf_parm_vec: gbl.HMGThresholds,
357
355
  _sim_test_regime: UPPTestRegime,
@@ -435,7 +433,7 @@ class MarketSample:
435
433
  })
436
434
 
437
435
  _res_list = Parallel(n_jobs=_thread_count, prefer="threads")(
438
- delayed(self.sim_enf_cnts)(
436
+ delayed(self._sim_enf_cnts)(
439
437
  _enf_parm_vec,
440
438
  _sim_test_regime,
441
439
  **_sim_enf_cnts_kwargs,
@@ -474,7 +472,7 @@ class MarketSample:
474
472
  save_data_to_file: SaveData = False,
475
473
  saved_array_name_suffix: str = "",
476
474
  ) -> None:
477
- """Populate :attr:`enf_counts` etimated test counts.
475
+ """Populate :attr:`enf_counts` with estimated UPP test counts.
478
476
 
479
477
  Parameters
480
478
  ----------
@@ -510,7 +508,7 @@ class MarketSample:
510
508
  """
511
509
 
512
510
  if self.data is None:
513
- self.enf_counts = self.sim_enf_cnts_ll(
511
+ self.enf_counts = self._sim_enf_cnts_ll(
514
512
  _enf_parm_vec,
515
513
  _upp_test_regime,
516
514
  sample_size=sample_size,
@@ -520,7 +518,9 @@ class MarketSample:
520
518
  saved_array_name_suffix=saved_array_name_suffix,
521
519
  )
522
520
  else:
523
- self.enf_counts = enf_cnts(self.data, _enf_parm_vec, _upp_test_regime)
521
+ self.enf_counts = compute_upp_test_counts(
522
+ self.data, _enf_parm_vec, _upp_test_regime
523
+ )
524
524
  if save_data_to_file:
525
525
  save_data_to_hdf5(
526
526
  self.enf_counts,
@@ -0,0 +1,355 @@
1
+ """
2
+ Methods to format and print summary statistics on merger enforcement patterns.
3
+
4
+ """
5
+
6
+ import enum
7
+ from collections.abc import Mapping
8
+
9
+ import numpy as np
10
+ from scipy.interpolate import interp1d # type: ignore
11
+
12
+ from .. import VERSION, ArrayBIGINT # noqa: TID252
13
+ from ..core import ftc_merger_investigations_data as fid # noqa: TID252
14
+ from . import INVResolution
15
+
16
+ __version__ = VERSION
17
+
18
+
19
+ @enum.unique
20
+ class IndustryGroup(enum.StrEnum):
21
+ ALL = "All Markets"
22
+ GRO = "Grocery Markets"
23
+ OIL = "Oil Markets"
24
+ CHM = "Chemical Markets"
25
+ PHM = "Pharmaceuticals Markets"
26
+ HOS = "Hospital Markets"
27
+ EDS = "Electronically-Controlled Devices and Systems Markets"
28
+ BRD = "Branded Consumer Goods Markets"
29
+ OTH = '"Other" Markets'
30
+ IIC = "Industries in Common"
31
+
32
+
33
+ @enum.unique
34
+ class OtherEvidence(enum.StrEnum):
35
+ UR = "Unrestricted on additional evidence"
36
+ HD = "Hot Documents Identified"
37
+ HN = "No Hot Documents Identified"
38
+ HU = "No Evidence on Hot Documents"
39
+ CN = "No Strong Customer Complaints"
40
+ CS = "Strong Customer Complaints"
41
+ CU = "No Evidence on Customer Complaints"
42
+ ED = "Entry Difficult"
43
+ EE = "Entry Easy"
44
+ NE = "No Entry Evidence"
45
+
46
+
47
+ @enum.unique
48
+ class StatsGrpSelector(enum.StrEnum):
49
+ FC = "ByFirmCount"
50
+ HD = "ByHHIandDelta"
51
+ DL = "ByDelta"
52
+ ZN = "ByConcZone"
53
+
54
+
55
+ @enum.unique
56
+ class StatsReturnSelector(enum.StrEnum):
57
+ CNT = "count"
58
+ RPT = "rate, point"
59
+ RIN = "rate, interval"
60
+
61
+
62
+ @enum.unique
63
+ class SortSelector(enum.StrEnum):
64
+ UCH = "unchanged"
65
+ REV = "reversed"
66
+
67
+
68
+ # Parameters and functions to interpolate selected HHI and ΔHHI values
69
+ # recorded in fractions to ranges of values in points on the HHI scale
70
+ HHI_DELTA_KNOTS = np.array(
71
+ [0, 100, 200, 300, 500, 800, 1200, 2500, 5001], dtype=np.int64
72
+ )
73
+ HHI_POST_ZONE_KNOTS = np.array([0, 1800, 2400, 10001], dtype=np.int64)
74
+ hhi_delta_ranger, hhi_zone_post_ranger = (
75
+ interp1d(_f / 1e4, _f, kind="previous", assume_sorted=True)
76
+ for _f in (HHI_DELTA_KNOTS, HHI_POST_ZONE_KNOTS)
77
+ )
78
+
79
+
80
+ HMG_PRESUMPTION_ZONE_MAP = {
81
+ HHI_POST_ZONE_KNOTS[0]: {
82
+ HHI_DELTA_KNOTS[0]: (0, 0, 0),
83
+ HHI_DELTA_KNOTS[1]: (0, 0, 0),
84
+ HHI_DELTA_KNOTS[2]: (0, 0, 0),
85
+ },
86
+ HHI_POST_ZONE_KNOTS[1]: {
87
+ HHI_DELTA_KNOTS[0]: (0, 1, 1),
88
+ HHI_DELTA_KNOTS[1]: (1, 1, 2),
89
+ HHI_DELTA_KNOTS[2]: (1, 1, 2),
90
+ },
91
+ HHI_POST_ZONE_KNOTS[2]: {
92
+ HHI_DELTA_KNOTS[0]: (0, 2, 1),
93
+ HHI_DELTA_KNOTS[1]: (1, 2, 3),
94
+ HHI_DELTA_KNOTS[2]: (2, 2, 4),
95
+ },
96
+ }
97
+
98
+ ZONE_VALS = np.unique(
99
+ np.vstack([
100
+ tuple(HMG_PRESUMPTION_ZONE_MAP[_k].values()) for _k in HMG_PRESUMPTION_ZONE_MAP
101
+ ]),
102
+ axis=0,
103
+ )
104
+
105
+ ZONE_STRINGS = {
106
+ 0: R"Green Zone (Safeharbor)",
107
+ 1: R"Yellow Zone",
108
+ 2: R"Red Zone (SLC Presumption)",
109
+ fid.TTL_KEY: "TOTAL",
110
+ }
111
+ ZONE_DETAIL_STRINGS_HHI = {
112
+ 0: Rf"HHI < {HHI_POST_ZONE_KNOTS[1]} pts.",
113
+ 1: R"HHI ∈ [{}, {}) pts. and ".format(*HHI_POST_ZONE_KNOTS[1:3]),
114
+ 2: Rf"HHI ⩾ {HHI_POST_ZONE_KNOTS[2]} pts. and ",
115
+ }
116
+ ZONE_DETAIL_STRINGS_DELTA = {
117
+ 0: "",
118
+ 1: Rf"ΔHHI < {HHI_DELTA_KNOTS[1]} pts.",
119
+ 2: Rf"ΔHHI ⩾ {HHI_DELTA_KNOTS[1]} pts.}}",
120
+ 3: R"ΔHHI ∈ [{}, {}) pts.".format(*HHI_DELTA_KNOTS[1:3]),
121
+ 4: Rf"ΔHHI ⩾ {HHI_DELTA_KNOTS[2]} pts.",
122
+ }
123
+
124
+
125
+ def enf_cnts_obs_by_group(
126
+ _invdata_array_dict: Mapping[str, Mapping[str, Mapping[str, fid.INVTableData]]],
127
+ _study_period: str,
128
+ _table_ind_grp: IndustryGroup,
129
+ _table_evid_cond: OtherEvidence,
130
+ _stats_group: StatsGrpSelector,
131
+ _enf_spec: INVResolution,
132
+ /,
133
+ ) -> ArrayBIGINT:
134
+ if _stats_group == StatsGrpSelector.HD:
135
+ raise ValueError(
136
+ f"Clearance/enforcement statistics, '{_stats_group}' not valied here."
137
+ )
138
+
139
+ match _stats_group:
140
+ case StatsGrpSelector.FC:
141
+ _cnts_func = enf_cnts_byfirmcount
142
+ _cnts_listing_func = enf_cnts_obs_byfirmcount
143
+ case StatsGrpSelector.DL:
144
+ _cnts_func = enf_cnts_bydelta
145
+ _cnts_listing_func = enf_cnts_obs_byhhianddelta
146
+ case StatsGrpSelector.ZN:
147
+ _cnts_func = enf_cnts_byconczone
148
+ _cnts_listing_func = enf_cnts_obs_byhhianddelta
149
+
150
+ return _cnts_func(
151
+ _cnts_listing_func(
152
+ _invdata_array_dict,
153
+ _study_period,
154
+ _table_ind_grp,
155
+ _table_evid_cond,
156
+ _enf_spec,
157
+ )
158
+ )
159
+
160
+
161
+ def enf_cnts_obs_byfirmcount(
162
+ _data_array_dict: Mapping[str, Mapping[str, Mapping[str, fid.INVTableData]]],
163
+ _data_period: str = "1996-2003",
164
+ _table_ind_group: IndustryGroup = IndustryGroup.ALL,
165
+ _table_evid_cond: OtherEvidence = OtherEvidence.UR,
166
+ _enf_spec: INVResolution = INVResolution.CLRN,
167
+ /,
168
+ ) -> ArrayBIGINT:
169
+ if _data_period not in _data_array_dict:
170
+ raise ValueError(
171
+ f"Invalid value of data period, {f'"{_data_period}"'}."
172
+ f"Must be one of, {tuple(_data_array_dict.keys())!r}."
173
+ )
174
+
175
+ _data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[1]]
176
+
177
+ _table_no = table_no_lku(_data_array_dict_sub, _table_ind_group, _table_evid_cond)
178
+
179
+ _cnts_array = _data_array_dict_sub[_table_no].data_array
180
+
181
+ _ndim_in = 1
182
+ _stats_kept_indxs = []
183
+ match _enf_spec:
184
+ case INVResolution.CLRN:
185
+ _stats_kept_indxs = [-1, -2]
186
+ case INVResolution.ENFT:
187
+ _stats_kept_indxs = [-1, -3]
188
+ case INVResolution.BOTH:
189
+ _stats_kept_indxs = [-1, -3, -2]
190
+
191
+ return np.column_stack([
192
+ _cnts_array[:, :_ndim_in],
193
+ _cnts_array[:, _stats_kept_indxs],
194
+ ])
195
+
196
+
197
+ def enf_cnts_obs_byhhianddelta(
198
+ _data_array_dict: Mapping[str, Mapping[str, Mapping[str, fid.INVTableData]]],
199
+ _data_period: str = "1996-2003",
200
+ _table_ind_group: IndustryGroup = IndustryGroup.ALL,
201
+ _table_evid_cond: OtherEvidence = OtherEvidence.UR,
202
+ _enf_spec: INVResolution = INVResolution.CLRN,
203
+ /,
204
+ ) -> ArrayBIGINT:
205
+ if _data_period not in _data_array_dict:
206
+ raise ValueError(
207
+ f"Invalid value of data period, {f'"{_data_period}"'}."
208
+ f"Must be one of, {tuple(_data_array_dict.keys())!r}."
209
+ )
210
+
211
+ _data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[0]]
212
+
213
+ _table_no = table_no_lku(_data_array_dict_sub, _table_ind_group, _table_evid_cond)
214
+
215
+ _cnts_array = _data_array_dict_sub[_table_no].data_array
216
+
217
+ _ndim_in = 2
218
+ _stats_kept_indxs = []
219
+ match _enf_spec:
220
+ case INVResolution.CLRN:
221
+ _stats_kept_indxs = [-1, -2]
222
+ case INVResolution.ENFT:
223
+ _stats_kept_indxs = [-1, -3]
224
+ case INVResolution.BOTH:
225
+ _stats_kept_indxs = [-1, -3, -2]
226
+
227
+ return np.column_stack([
228
+ _cnts_array[:, :_ndim_in],
229
+ _cnts_array[:, _stats_kept_indxs],
230
+ ])
231
+
232
+
233
+ def table_no_lku(
234
+ _data_array_dict_sub: Mapping[str, fid.INVTableData],
235
+ _table_ind_group: IndustryGroup = IndustryGroup.ALL,
236
+ _table_evid_cond: OtherEvidence = OtherEvidence.UR,
237
+ /,
238
+ ) -> str:
239
+ if _table_ind_group not in (
240
+ _igl := [_data_array_dict_sub[_v].industry_group for _v in _data_array_dict_sub]
241
+ ):
242
+ raise ValueError(
243
+ f"Invalid value for industry group, {f'"{_table_ind_group}"'}."
244
+ f"Must be one of {_igl!r}"
245
+ )
246
+
247
+ _tno = next(
248
+ _t
249
+ for _t in _data_array_dict_sub
250
+ if all((
251
+ _data_array_dict_sub[_t].industry_group == _table_ind_group,
252
+ _data_array_dict_sub[_t].additional_evidence == _table_evid_cond,
253
+ ))
254
+ )
255
+
256
+ return _tno
257
+
258
+
259
+ def enf_cnts_byfirmcount(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
260
+ _ndim_in = 1
261
+ return np.vstack([
262
+ np.concatenate([
263
+ (f,),
264
+ np.einsum("ij->j", _cnts_array[_cnts_array[:, 0] == f][:, _ndim_in:]),
265
+ ])
266
+ for f in np.unique(_cnts_array[:, 0])
267
+ ])
268
+
269
+
270
+ def enf_cnts_bydelta(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
271
+ _ndim_in = 2
272
+ return np.vstack([
273
+ np.concatenate([
274
+ (f,),
275
+ np.einsum("ij->j", _cnts_array[_cnts_array[:, 1] == f][:, _ndim_in:]),
276
+ ])
277
+ for f in HHI_DELTA_KNOTS[:-1]
278
+ ])
279
+
280
+
281
+ def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
282
+ # Prepare to tag clearance stats by presumption zone
283
+ _hhi_zone_post_ranged = hhi_zone_post_ranger(_cnts_array[:, 0] / 1e4)
284
+ _hhi_delta_ranged = hhi_delta_ranger(_cnts_array[:, 1] / 1e4)
285
+
286
+ # Step 1: Tag and agg. from HHI-post and Delta to zone triple
287
+ # NOTE: Although you could just map and not (partially) aggregate in this step,
288
+ # the mapped array is a copy, and is larger without partial aggregation, so
289
+ # aggregation reduces the footprint of this step in memory. Although this point
290
+ # is more relevant for generated than observed data, using the same coding pattern
291
+ # in both cases does make life easier
292
+ _ndim_in = 2
293
+ _nkeys = 3
294
+ _cnts_byhhipostanddelta = -1 * np.ones(
295
+ _nkeys + _cnts_array.shape[1] - _ndim_in, dtype=np.int64
296
+ )
297
+ _cnts_byconczone = -1 * np.ones_like(_cnts_byhhipostanddelta)
298
+ for _hhi_zone_post_lim in HHI_POST_ZONE_KNOTS[:-1]:
299
+ _level_test = _hhi_zone_post_ranged == _hhi_zone_post_lim
300
+
301
+ for _hhi_zone_delta_lim in HHI_DELTA_KNOTS[:3]:
302
+ _delta_test = (
303
+ (_hhi_delta_ranged >= _hhi_zone_delta_lim)
304
+ if _hhi_zone_delta_lim == HHI_DELTA_KNOTS[2]
305
+ else (_hhi_delta_ranged == _hhi_zone_delta_lim)
306
+ )
307
+
308
+ _zone_val = HMG_PRESUMPTION_ZONE_MAP[_hhi_zone_post_lim][
309
+ _hhi_zone_delta_lim
310
+ ]
311
+
312
+ _conc_test = _level_test & _delta_test
313
+
314
+ _cnts_byhhipostanddelta = np.vstack((
315
+ _cnts_byhhipostanddelta,
316
+ np.array(
317
+ (
318
+ *_zone_val,
319
+ *np.einsum("ij->j", _cnts_array[:, _ndim_in:][_conc_test]),
320
+ ),
321
+ dtype=np.int64,
322
+ ),
323
+ ))
324
+ _cnts_byhhipostanddelta = _cnts_byhhipostanddelta[1:]
325
+
326
+ for _zone_val in ZONE_VALS:
327
+ # Logical-and of multiple vectors:
328
+ _hhi_zone_test = (
329
+ 1
330
+ * np.column_stack([
331
+ _cnts_byhhipostanddelta[:, _idx] == _val
332
+ for _idx, _val in enumerate(_zone_val)
333
+ ])
334
+ ).prod(axis=1) == 1
335
+
336
+ _cnts_byconczone = np.vstack((
337
+ _cnts_byconczone,
338
+ np.concatenate(
339
+ (
340
+ _zone_val,
341
+ np.einsum(
342
+ "ij->j", _cnts_byhhipostanddelta[_hhi_zone_test][:, _nkeys:]
343
+ ),
344
+ ),
345
+ dtype=np.int64,
346
+ ),
347
+ ))
348
+
349
+ return _cnts_byconczone[1:]
350
+
351
+
352
+ if __name__ == "__main__":
353
+ print(
354
+ "This module provides methods to aggregate statistics on merger enforcement patterns for reporting."
355
+ )
@@ -53,7 +53,7 @@ class INVRESCntsArgs(TypedDict, total=False):
53
53
  saved_array_name_suffix: str
54
54
 
55
55
 
56
- def enf_cnts(
56
+ def compute_upp_test_counts(
57
57
  _market_data_sample: MarketDataSample,
58
58
  _upp_test_parms: gbl.HMGThresholds,
59
59
  _upp_test_regime: UPPTestRegime,
@@ -84,7 +84,7 @@ def enf_cnts(
84
84
  """
85
85
 
86
86
  _enf_cnts_sim_array = -1 * np.ones((6, 2), np.int64)
87
- _upp_test_arrays = gen_upp_test_arrays(
87
+ _upp_test_arrays = compute_upp_test_arrays(
88
88
  _market_data_sample, _upp_test_parms, _upp_test_regime
89
89
  )
90
90
 
@@ -191,7 +191,7 @@ def enf_cnts(
191
191
  )
192
192
 
193
193
 
194
- def gen_upp_test_arrays(
194
+ def compute_upp_test_arrays(
195
195
  _market_data: MarketDataSample,
196
196
  _upp_test_parms: gbl.HMGThresholds,
197
197
  _sim_test_regime: UPPTestRegime,
@@ -213,8 +213,7 @@ def gen_upp_test_arrays(
213
213
 
214
214
  """
215
215
  _g_bar, _divr_bar, _cmcr_bar, _ipr_bar = (
216
- getattr(_upp_test_parms, _f) for _f in ("guppi", "divr", "cmcr", "ipr")
217
- )
216
+ getattr(_upp_test_parms, _f) for _f in ("guppi", "divr", "cmcr", "ipr"))
218
217
 
219
218
  _guppi_array, _ipr_array, _cmcr_array = (
220
219
  np.empty_like(_market_data.price_array) for _ in range(3)
@@ -236,13 +235,13 @@ def gen_upp_test_arrays(
236
235
 
237
236
  np.divide(_ipr_array, 1 - _market_data.pcm_array, out=_cmcr_array)
238
237
 
239
- (_divr_test_vector,) = _compute_test_value_seq(
238
+ (_divr_test_vector,) = _compute_test_array_seq(
240
239
  (_market_data.divr_array,),
241
240
  _market_data.frmshr_array,
242
241
  _sim_test_regime.divr_aggregator,
243
242
  )
244
243
 
245
- (_guppi_test_vector, _cmcr_test_vector, _ipr_test_vector) = _compute_test_value_seq(
244
+ (_guppi_test_vector, _cmcr_test_vector, _ipr_test_vector) = _compute_test_array_seq(
246
245
  (_guppi_array, _cmcr_array, _ipr_array),
247
246
  _market_data.frmshr_array,
248
247
  _sim_test_regime.guppi_aggregator,
@@ -267,7 +266,7 @@ def gen_upp_test_arrays(
267
266
  return _upp_test_arrays
268
267
 
269
268
 
270
- def _compute_test_value_seq(
269
+ def _compute_test_array_seq(
271
270
  _test_measure_seq: tuple[ArrayDouble, ...],
272
271
  _wt_array: ArrayDouble,
273
272
  _aggregator: UPPAggrSelector,
@@ -286,45 +285,45 @@ def _compute_test_value_seq(
286
285
 
287
286
  match _aggregator:
288
287
  case UPPAggrSelector.AVG:
289
- _test_value_seq = (
288
+ _test_array_seq = (
290
289
  1 / 2 * np.einsum("ij->i", _g)[:, None] for _g in _test_measure_seq
291
290
  )
292
291
  case UPPAggrSelector.CPA:
293
- _test_value_seq = (
292
+ _test_array_seq = (
294
293
  np.einsum("ij,ij->i", _wt_array[:, ::-1], _g)[:, None]
295
294
  for _g in _test_measure_seq
296
295
  )
297
296
  case UPPAggrSelector.CPD:
298
- _test_value_seq = (
297
+ _test_array_seq = (
299
298
  np.sqrt(np.einsum("ij,ij,ij->i", _wt_array[:, ::-1], _g, _g))[:, None]
300
299
  for _g in _test_measure_seq
301
300
  )
302
301
  case UPPAggrSelector.DIS:
303
- _test_value_seq = (
302
+ _test_array_seq = (
304
303
  np.sqrt(1 / 2 * np.einsum("ij,ij->i", _g, _g))[:, None]
305
304
  for _g in _test_measure_seq
306
305
  )
307
306
  case UPPAggrSelector.MAX:
308
- _test_value_seq = (
307
+ _test_array_seq = (
309
308
  _g.max(axis=1, keepdims=True) for _g in _test_measure_seq
310
309
  )
311
310
  case UPPAggrSelector.MIN:
312
- _test_value_seq = (
311
+ _test_array_seq = (
313
312
  _g.min(axis=1, keepdims=True) for _g in _test_measure_seq
314
313
  )
315
314
  case UPPAggrSelector.OSA:
316
- _test_value_seq = (
315
+ _test_array_seq = (
317
316
  np.einsum("ij,ij->i", _wt_array, _g)[:, None]
318
317
  for _g in _test_measure_seq
319
318
  )
320
319
  case UPPAggrSelector.OSD:
321
- _test_value_seq = (
320
+ _test_array_seq = (
322
321
  np.sqrt(np.einsum("ij,ij,ij->i", _wt_array, _g, _g))[:, None]
323
322
  for _g in _test_measure_seq
324
323
  )
325
324
  case _:
326
325
  raise ValueError("GUPPI/diversion ratio aggregation method is invalid.")
327
- return tuple(_test_value_seq)
326
+ return tuple(_test_array_seq)
328
327
 
329
328
 
330
329
  def initialize_hd5(