mergeron 2025.739290.3__py3-none-any.whl → 2025.739290.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

@@ -9,15 +9,16 @@ from collections.abc import Mapping
9
9
  import numpy as np
10
10
  from scipy.interpolate import interp1d # type: ignore
11
11
 
12
- from .. import VERSION, ArrayBIGINT, this_yaml # noqa: TID252
12
+ from .. import VERSION, ArrayBIGINT, Enameled, this_yaml # noqa: TID252
13
13
  from ..core import ftc_merger_investigations_data as fid # noqa: TID252
14
14
  from . import INVResolution
15
15
 
16
16
  __version__ = VERSION
17
17
 
18
18
 
19
+ @this_yaml.register_class
19
20
  @enum.unique
20
- class IndustryGroup(enum.StrEnum):
21
+ class IndustryGroup(str, Enameled):
21
22
  ALL = "All Markets"
22
23
  GRO = "Grocery Markets"
23
24
  OIL = "Oil Markets"
@@ -30,8 +31,9 @@ class IndustryGroup(enum.StrEnum):
30
31
  IIC = "Industries in Common"
31
32
 
32
33
 
34
+ @this_yaml.register_class
33
35
  @enum.unique
34
- class OtherEvidence(enum.StrEnum):
36
+ class OtherEvidence(str, Enameled):
35
37
  UR = "Unrestricted on additional evidence"
36
38
  HD = "Hot Documents Identified"
37
39
  HN = "No Hot Documents Identified"
@@ -44,23 +46,26 @@ class OtherEvidence(enum.StrEnum):
44
46
  NE = "No Entry Evidence"
45
47
 
46
48
 
49
+ @this_yaml.register_class
47
50
  @enum.unique
48
- class StatsGrpSelector(enum.StrEnum):
51
+ class StatsGrpSelector(str, Enameled):
49
52
  FC = "ByFirmCount"
50
53
  HD = "ByHHIandDelta"
51
54
  DL = "ByDelta"
52
55
  ZN = "ByConcZone"
53
56
 
54
57
 
58
+ @this_yaml.register_class
55
59
  @enum.unique
56
- class StatsReturnSelector(enum.StrEnum):
60
+ class StatsReturnSelector(str, Enameled):
57
61
  CNT = "count"
58
62
  RPT = "rate, point"
59
63
  RIN = "rate, interval"
60
64
 
61
65
 
66
+ @this_yaml.register_class
62
67
  @enum.unique
63
- class SortSelector(enum.StrEnum):
68
+ class SortSelector(str, Enameled):
64
69
  UCH = "unchanged"
65
70
  REV = "reversed"
66
71
 
@@ -138,17 +143,17 @@ def enf_cnts_obs_by_group(
138
143
 
139
144
  match _stats_group:
140
145
  case StatsGrpSelector.FC:
141
- _cnts_func = enf_cnts_byfirmcount
142
- _cnts_listing_func = enf_cnts_obs_byfirmcount
146
+ cnts_func = enf_cnts_byfirmcount
147
+ cnts_listing_func = enf_cnts_obs_byfirmcount
143
148
  case StatsGrpSelector.DL:
144
- _cnts_func = enf_cnts_bydelta
145
- _cnts_listing_func = enf_cnts_obs_byhhianddelta
149
+ cnts_func = enf_cnts_bydelta
150
+ cnts_listing_func = enf_cnts_obs_byhhianddelta
146
151
  case StatsGrpSelector.ZN:
147
- _cnts_func = enf_cnts_byconczone
148
- _cnts_listing_func = enf_cnts_obs_byhhianddelta
152
+ cnts_func = enf_cnts_byconczone
153
+ cnts_listing_func = enf_cnts_obs_byhhianddelta
149
154
 
150
- return _cnts_func(
151
- _cnts_listing_func(
155
+ return cnts_func(
156
+ cnts_listing_func(
152
157
  _invdata_array_dict,
153
158
  _study_period,
154
159
  _table_ind_grp,
@@ -172,26 +177,23 @@ def enf_cnts_obs_byfirmcount(
172
177
  f"Must be one of, {tuple(_data_array_dict.keys())!r}."
173
178
  )
174
179
 
175
- _data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[1]]
180
+ data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[1]]
176
181
 
177
- _table_no = table_no_lku(_data_array_dict_sub, _table_ind_group, _table_evid_cond)
182
+ table_no_ = table_no_lku(data_array_dict_sub, _table_ind_group, _table_evid_cond)
178
183
 
179
- _cnts_array = _data_array_dict_sub[_table_no].data_array
184
+ cnts_array = data_array_dict_sub[table_no_].data_array
180
185
 
181
- _ndim_in = 1
182
- _stats_kept_indxs = []
186
+ ndim_in = 1
187
+ stats_kept_indxs = []
183
188
  match _enf_spec:
184
189
  case INVResolution.CLRN:
185
- _stats_kept_indxs = [-1, -2]
190
+ stats_kept_indxs = [-1, -2]
186
191
  case INVResolution.ENFT:
187
- _stats_kept_indxs = [-1, -3]
192
+ stats_kept_indxs = [-1, -3]
188
193
  case INVResolution.BOTH:
189
- _stats_kept_indxs = [-1, -3, -2]
194
+ stats_kept_indxs = [-1, -3, -2]
190
195
 
191
- return np.column_stack([
192
- _cnts_array[:, :_ndim_in],
193
- _cnts_array[:, _stats_kept_indxs],
194
- ])
196
+ return np.column_stack([cnts_array[:, :ndim_in], cnts_array[:, stats_kept_indxs]])
195
197
 
196
198
 
197
199
  def enf_cnts_obs_byhhianddelta(
@@ -208,26 +210,23 @@ def enf_cnts_obs_byhhianddelta(
208
210
  f"Must be one of, {tuple(_data_array_dict.keys())!r}."
209
211
  )
210
212
 
211
- _data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[0]]
213
+ data_array_dict_sub = _data_array_dict[_data_period][fid.TABLE_TYPES[0]]
212
214
 
213
- _table_no = table_no_lku(_data_array_dict_sub, _table_ind_group, _table_evid_cond)
215
+ table_no_ = table_no_lku(data_array_dict_sub, _table_ind_group, _table_evid_cond)
214
216
 
215
- _cnts_array = _data_array_dict_sub[_table_no].data_array
217
+ cnts_array = data_array_dict_sub[table_no_].data_array
216
218
 
217
- _ndim_in = 2
218
- _stats_kept_indxs = []
219
+ ndim_in = 2
220
+ stats_kept_indxs = []
219
221
  match _enf_spec:
220
222
  case INVResolution.CLRN:
221
- _stats_kept_indxs = [-1, -2]
223
+ stats_kept_indxs = [-1, -2]
222
224
  case INVResolution.ENFT:
223
- _stats_kept_indxs = [-1, -3]
225
+ stats_kept_indxs = [-1, -3]
224
226
  case INVResolution.BOTH:
225
- _stats_kept_indxs = [-1, -3, -2]
227
+ stats_kept_indxs = [-1, -3, -2]
226
228
 
227
- return np.column_stack([
228
- _cnts_array[:, :_ndim_in],
229
- _cnts_array[:, _stats_kept_indxs],
230
- ])
229
+ return np.column_stack([cnts_array[:, :ndim_in], cnts_array[:, stats_kept_indxs]])
231
230
 
232
231
 
233
232
  def table_no_lku(
@@ -244,7 +243,7 @@ def table_no_lku(
244
243
  f"Must be one of {_igl!r}"
245
244
  )
246
245
 
247
- _tno = next(
246
+ tno_ = next(
248
247
  _t
249
248
  for _t in _data_array_dict_sub
250
249
  if all((
@@ -253,36 +252,34 @@ def table_no_lku(
253
252
  ))
254
253
  )
255
254
 
256
- return _tno
255
+ return tno_
257
256
 
258
257
 
259
258
  def enf_cnts_byfirmcount(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
260
- _ndim_in = 1
259
+ ndim_in = 1
261
260
  return np.vstack([
262
261
  np.concatenate([
263
- (f,),
264
- np.einsum("ij->j", _cnts_array[_cnts_array[:, 0] == f][:, _ndim_in:]),
262
+ (_i,),
263
+ np.einsum("ij->j", _cnts_array[_cnts_array[:, 0] == _i][:, ndim_in:]),
265
264
  ])
266
- for f in np.unique(_cnts_array[:, 0])
265
+ for _i in np.unique(_cnts_array[:, 0])
267
266
  ])
268
267
 
269
268
 
270
269
  def enf_cnts_bydelta(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
271
- _ndim_in = 2
270
+ ndim_in = 2
272
271
  return np.vstack([
273
272
  np.concatenate([
274
- (f,),
275
- np.einsum("ij->j", _cnts_array[_cnts_array[:, 1] == f][:, _ndim_in:]),
273
+ (_k,),
274
+ np.einsum("ij->j", _cnts_array[_cnts_array[:, 1] == _k][:, ndim_in:]),
276
275
  ])
277
- for f in HHI_DELTA_KNOTS[:-1]
276
+ for _k in HHI_DELTA_KNOTS[:-1]
278
277
  ])
279
278
 
280
279
 
281
280
  def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
282
- # Prepare to tag clearance stats by presumption zone
283
- _hhi_zone_post_ranged = hhi_zone_post_ranger(_cnts_array[:, 0] / 1e4)
284
- _hhi_delta_ranged = hhi_delta_ranger(_cnts_array[:, 1] / 1e4)
285
-
281
+ if not _cnts_array.any():
282
+ return np.array([], int)
286
283
  # Step 1: Tag and agg. from HHI-post and Delta to zone triple
287
284
  # NOTE: Although you could just map and not (partially) aggregate in this step,
288
285
  # the mapped array is a copy, and is larger without partial aggregation, so
@@ -291,89 +288,61 @@ def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
291
288
  # in both cases does make life easier
292
289
  _ndim_in = 2
293
290
  _nkeys = 3
294
- _cnts_byhhipostanddelta: ArrayBIGINT = -1 * np.ones(
295
- _nkeys + _cnts_array.shape[1] - _ndim_in, dtype=int
291
+ cnts_byhhipostanddelta, cnts_byconczone = (
292
+ np.zeros((1, _nkeys + _cnts_array.shape[1] - _ndim_in), dtype=int)
293
+ for _ in range(2)
296
294
  )
297
- _cnts_byconczone: ArrayBIGINT = -1 * np.ones_like(_cnts_byhhipostanddelta)
295
+
296
+ # Prepare to tag clearance stats by presumption zone
297
+ hhi_zone_post_ranged = hhi_zone_post_ranger(_cnts_array[:, 0] / 1e4)
298
+ hhi_delta_ranged = hhi_delta_ranger(_cnts_array[:, 1] / 1e4)
298
299
  for _hhi_zone_post_lim in HHI_POST_ZONE_KNOTS[:-1]:
299
- _level_test = _hhi_zone_post_ranged == _hhi_zone_post_lim
300
+ zone_test = hhi_zone_post_ranged == _hhi_zone_post_lim
300
301
 
301
- for _hhi_zone_delta_lim in HHI_DELTA_KNOTS[:3]:
302
- _delta_test = (
303
- (_hhi_delta_ranged >= _hhi_zone_delta_lim)
304
- if _hhi_zone_delta_lim == HHI_DELTA_KNOTS[2]
305
- else (_hhi_delta_ranged == _hhi_zone_delta_lim)
302
+ for hhi_zone_delta_lim in HHI_DELTA_KNOTS[:3]:
303
+ delta_test = (
304
+ (hhi_delta_ranged >= hhi_zone_delta_lim)
305
+ if hhi_zone_delta_lim == HHI_DELTA_KNOTS[2]
306
+ else (hhi_delta_ranged == hhi_zone_delta_lim)
306
307
  )
307
308
 
308
- _zone_val = HMG_PRESUMPTION_ZONE_MAP[_hhi_zone_post_lim][
309
- _hhi_zone_delta_lim
310
- ]
309
+ zone_val = HMG_PRESUMPTION_ZONE_MAP[_hhi_zone_post_lim][hhi_zone_delta_lim]
311
310
 
312
- _conc_test = _level_test & _delta_test
311
+ conc_test = zone_test & delta_test
313
312
 
314
- _cnts_byhhipostanddelta = np.vstack((
315
- _cnts_byhhipostanddelta,
313
+ cnts_byhhipostanddelta = np.vstack((
314
+ cnts_byhhipostanddelta,
316
315
  np.array(
317
316
  (
318
- *_zone_val,
319
- *np.einsum("ij->j", _cnts_array[:, _ndim_in:][_conc_test]),
317
+ *zone_val,
318
+ *np.einsum("ij->j", _cnts_array[:, _ndim_in:][conc_test]),
320
319
  ),
321
320
  dtype=int,
322
321
  ),
323
322
  ))
324
- _cnts_byhhipostanddelta = _cnts_byhhipostanddelta[1:]
323
+ cnts_byhhipostanddelta = cnts_byhhipostanddelta[1:]
325
324
 
326
- for _zone_val in ZONE_VALS:
325
+ for zone_val in ZONE_VALS:
327
326
  # Logical-and of multiple vectors:
328
- _hhi_zone_test = (
327
+ hhi_zone_test = (
329
328
  1
330
329
  * np.column_stack([
331
- _cnts_byhhipostanddelta[:, _idx] == _val
332
- for _idx, _val in enumerate(_zone_val)
330
+ cnts_byhhipostanddelta[:, _idx] == _val
331
+ for _idx, _val in enumerate(zone_val)
333
332
  ])
334
333
  ).prod(axis=1) == 1
335
334
 
336
- _cnts_byconczone = np.vstack((
337
- _cnts_byconczone,
335
+ cnts_byconczone = np.vstack((
336
+ cnts_byconczone,
338
337
  np.concatenate(
339
338
  (
340
- _zone_val,
339
+ zone_val,
341
340
  np.einsum(
342
- "ij->j", _cnts_byhhipostanddelta[_hhi_zone_test][:, _nkeys:]
341
+ "ij->j", cnts_byhhipostanddelta[hhi_zone_test][:, _nkeys:]
343
342
  ),
344
343
  ),
345
344
  dtype=int,
346
345
  ),
347
346
  ))
348
347
 
349
- return _cnts_byconczone[1:]
350
-
351
-
352
- for _typ in (
353
- IndustryGroup,
354
- OtherEvidence,
355
- StatsGrpSelector,
356
- StatsReturnSelector,
357
- SortSelector,
358
- ):
359
- # NOTE: If additional enums are defined in this module,
360
- # add themn to the list above
361
-
362
- _, _ = (
363
- this_yaml.representer.add_representer(
364
- _typ,
365
- lambda _r, _d: _r.represent_scalar(f"!{_d.__class__.__name__}", _d.name),
366
- ),
367
- this_yaml.constructor.add_constructor(
368
- f"!{_typ.__name__}",
369
- lambda _c, _n, /: getattr(
370
- globals().get(_n.tag.lstrip("!")), _c.construct_scalar(_n)
371
- ),
372
- ),
373
- )
374
-
375
-
376
- if __name__ == "__main__":
377
- print(
378
- "This module provides methods to aggregate statistics on merger enforcement patterns for reporting."
379
- )
348
+ return cnts_byconczone[1:]