mergeron 2025.739290.6__py3-none-any.whl → 2025.739290.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

@@ -17,6 +17,7 @@ from scipy.spatial.distance import minkowski as distance_function # type: ignor
17
17
  from sympy import lambdify, simplify, solve, symbols # type: ignore
18
18
 
19
19
  from .. import DEFAULT_REC_RATIO, VERSION, ArrayDouble # noqa: TID252
20
+ from . import GuidelinesBoundary, MPFloat
20
21
  from . import guidelines_boundary_functions as gbf
21
22
 
22
23
  __version__ = VERSION
@@ -529,3 +530,209 @@ def shrratio_boundary_xact_avg_mp( # noqa: PLR0914
529
530
  ) - mp.power(_s_mid, 2)
530
531
 
531
532
  return gbf.GuidelinesBoundary(bdry, float(mp.nstr(bdry_area_simpson, dps)))
533
+
534
+
535
+ # shrratio_boundary_wtd_avg_autoroot
536
+ # this function is about half as fast as the manual one! ... and a touch less precise
537
+ def _shrratio_boundary_wtd_avg_autoroot( # noqa: PLR0914
538
+ _delta_star: float = 0.075,
539
+ _r_val: float = DEFAULT_REC_RATIO,
540
+ /,
541
+ *,
542
+ agg_method: Literal[
543
+ "arithmetic mean", "geometric mean", "distance"
544
+ ] = "arithmetic mean",
545
+ weighting: Literal["own-share", "cross-product-share", None] = "own-share",
546
+ recapture_form: Literal["inside-out", "proportional"] = "inside-out",
547
+ dps: int = 5,
548
+ ) -> GuidelinesBoundary:
549
+ """
550
+ Share combinations on the share-weighted average diversion ratio boundary.
551
+
552
+ Parameters
553
+ ----------
554
+ _delta_star
555
+ Share ratio (:math:`\\overline{d} / \\overline{r}`)
556
+ _r_val
557
+ recapture ratio
558
+ agg_method
559
+ Whether "arithmetic mean", "geometric mean", or "distance".
560
+ weighting
561
+ Whether "own-share" or "cross-product-share" (or None for simple, unweighted average).
562
+ recapture_form
563
+ Whether recapture-ratio is MNL-consistent ("inside-out") or has fixed
564
+ value for both merging firms ("proportional").
565
+ dps
566
+ Number of decimal places for rounding returned shares and area.
567
+
568
+ Returns
569
+ -------
570
+ Array of share-pairs, area under boundary.
571
+
572
+ Notes
573
+ -----
574
+ An analytical expression for the share-weighted arithmetic mean boundary
575
+ is derived and plotted from y-intercept to the ray of symmetry as follows::
576
+
577
+ from sympy import plot as symplot, solve, symbols
578
+ s_1, s_2 = symbols("s_1 s_2", positive=True)
579
+
580
+ g_val, r_val, m_val = 0.06, 0.80, 0.30
581
+ delta_star = g_val / (r_val * m_val)
582
+
583
+ # recapture_form == "inside-out"
584
+ oswag = solve(
585
+ s_1 * s_2 / (1 - s_1)
586
+ + s_2 * s_1 / (1 - (r_val * s_2 + (1 - r_val) * s_1))
587
+ - (s_1 + s_2) * delta_star,
588
+ s_2
589
+ )[0]
590
+ symplot(
591
+ oswag,
592
+ (s_1, 0., d_hat / (1 + d_hat)),
593
+ ylabel=s_2
594
+ )
595
+
596
+ cpswag = solve(
597
+ s_2 * s_2 / (1 - s_1)
598
+ + s_1 * s_1 / (1 - (r_val * s_2 + (1 - r_val) * s_1))
599
+ - (s_1 + s_2) * delta_star,
600
+ s_2
601
+ )[1]
602
+ symplot(
603
+ cpwag,
604
+ (s_1, 0.0, d_hat / (1 + d_hat)), ylabel=s_2
605
+ )
606
+
607
+ # recapture_form == "proportional"
608
+ oswag = solve(
609
+ s_1 * s_2 / (1 - s_1)
610
+ + s_2 * s_1 / (1 - s_2)
611
+ - (s_1 + s_2) * delta_star,
612
+ s_2
613
+ )[0]
614
+ symplot(
615
+ oswag,
616
+ (s_1, 0., d_hat / (1 + d_hat)),
617
+ ylabel=s_2
618
+ )
619
+
620
+ cpswag = solve(
621
+ s_2 * s_2 / (1 - s_1)
622
+ + s_1 * s_1 / (1 - s_2)
623
+ - (s_1 + s_2) * delta_star,
624
+ s_2
625
+ )[1]
626
+ symplot(
627
+ cpswag,
628
+ (s_1, 0.0, d_hat / (1 + d_hat)),
629
+ ylabel=s_2
630
+ )
631
+
632
+
633
+ """
634
+
635
+ _delta_star, _r_val = (mpf(f"{_v}") for _v in (_delta_star, _r_val))
636
+ _s_mid = mp.fdiv(_delta_star, 1 + _delta_star)
637
+
638
+ # initial conditions
639
+ bdry = [(_s_mid, _s_mid)]
640
+ s_1_pre, s_2_pre = _s_mid, _s_mid
641
+ s_2_oddval, s_2_oddsum, s_2_evnsum = True, 0.0, 0.0
642
+
643
+ # parameters for iteration
644
+ _step_size = mp.power(10, -dps)
645
+ theta_ = _step_size * (10 if weighting == "cross-product-share" else 1)
646
+ for s_1 in mp.arange(_s_mid - _step_size, 0, -_step_size):
647
+
648
+ def delta_test(x: MPFloat) -> MPFloat:
649
+ _de_1 = x / (1 - s_1)
650
+ _de_2 = (
651
+ s_1 / (1 - gbf.lerp(s_1, x, _r_val))
652
+ if recapture_form == "inside-out"
653
+ else s_1 / (1 - x)
654
+ )
655
+ _w = (
656
+ mp.fdiv(s_1 if weighting == "cross-product-share" else x, s_1 + x)
657
+ if weighting
658
+ else 0.5
659
+ )
660
+
661
+ match agg_method:
662
+ case "geometric mean":
663
+ delta_test = mp.expm1(
664
+ gbf.lerp(mp.log1p(_de_1), mp.log1p(_de_2), _w)
665
+ )
666
+ case "distance":
667
+ delta_test = mp.sqrt(gbf.lerp(_de_1**2, _de_2**2, _w))
668
+ case _:
669
+ delta_test = gbf.lerp(_de_1, _de_2, _w)
670
+
671
+ return _delta_star - delta_test
672
+
673
+ try:
674
+ s_2 = mp.findroot(
675
+ delta_test,
676
+ x0=(s_2_pre * (1 - theta_), s_2_pre * (1 + theta_)),
677
+ tol=mp.sqrt(_step_size),
678
+ solver="ridder",
679
+ )
680
+ except (mp.ComplexResult, ValueError, ZeroDivisionError) as _e:
681
+ print(s_1, s_2_pre)
682
+ raise _e
683
+
684
+ # Build-up boundary points
685
+ bdry.append((s_1, s_2))
686
+
687
+ # Build up area terms
688
+ s_2_oddsum += s_2 if s_2_oddval else 0
689
+ s_2_evnsum += s_2 if not s_2_oddval else 0
690
+ s_2_oddval = not s_2_oddval
691
+
692
+ # Hold share points
693
+ s_2_pre = s_2
694
+ s_1_pre = s_1
695
+
696
+ if (s_1_pre + s_2_pre) > mpf("0.99875"):
697
+ # Loss of accuracy at 3-9s and up
698
+ break
699
+
700
+ if s_2_oddval:
701
+ s_2_evnsum -= s_2_pre
702
+ else:
703
+ s_2_oddsum -= s_1_pre
704
+
705
+ _s_intcpt = gbf._shrratio_boundary_intcpt(
706
+ s_2_pre,
707
+ _delta_star,
708
+ _r_val,
709
+ recapture_form=recapture_form,
710
+ agg_method=agg_method,
711
+ weighting=weighting,
712
+ )
713
+
714
+ if weighting == "own-share":
715
+ gbd_prtlarea = (
716
+ _step_size * (4 * s_2_oddsum + 2 * s_2_evnsum + _s_mid + s_2_pre) / 3
717
+ )
718
+ # Area under boundary
719
+ bdry_area_total = float(
720
+ 2 * (s_1_pre + gbd_prtlarea)
721
+ - (mp.power(_s_mid, "2") + mp.power(s_1_pre, "2"))
722
+ )
723
+
724
+ else:
725
+ gbd_prtlarea = (
726
+ _step_size * (4 * s_2_oddsum + 2 * s_2_evnsum + _s_mid + _s_intcpt) / 3
727
+ )
728
+ # Area under boundary
729
+ bdry_area_total = float(2 * gbd_prtlarea - mp.power(_s_mid, "2"))
730
+
731
+ bdry.append((mpf("0.0"), _s_intcpt))
732
+ bdry_array = np.array(bdry, float)
733
+
734
+ # Points defining boundary to point-of-symmetry
735
+ return GuidelinesBoundary(
736
+ np.vstack((bdry_array[::-1], bdry_array[1:, ::-1]), dtype=float),
737
+ round(float(bdry_area_total), dps),
738
+ )
mergeron/data/__init__.py CHANGED
@@ -12,10 +12,9 @@ from .. import _PKG_NAME, VERSION # noqa: TID252
12
12
 
13
13
  __version__ = VERSION
14
14
 
15
+ data_resources = resources.files(f"{_PKG_NAME}.data")
15
16
 
16
- DAMODARAN_MARGIN_WORKBOOK = resources.files(f"{_PKG_NAME}.data").joinpath(
17
- "damodaran_margin_data.xls"
18
- )
17
+ DAMODARAN_MARGIN_WORKBOOK = data_resources / "damodaran_margin_data.xls"
19
18
  """
20
19
  Python object pointing to included copy of Prof. Damodaran's margin data
21
20
 
@@ -36,9 +35,7 @@ Use as, for example:
36
35
  shutil.copy2(DAMODARAN_MARGIN_WORKBOOK, Path.home() / f"{DAMODARAN_MARGIN_WORKBOOK.name}")
37
36
  """
38
37
 
39
- FTC_MERGER_INVESTIGATIONS_DATA = resources.files(f"{_PKG_NAME}.data").joinpath(
40
- "ftc_merger_investigations_data.zip"
41
- )
38
+ FTC_MERGER_INVESTIGATIONS_DATA = data_resources / "ftc_merger_investigations_data.zip"
42
39
  """
43
40
  FTC merger investigtions data published in 2004, 2007, 2008, and 2013
44
41
 
@@ -46,7 +43,7 @@ NOTES
46
43
  -----
47
44
  Raw data tables published by the FTC are loaded into a nested distionary, organized by
48
45
  data period, table type, and table number. Each table is stored as a numerical array
49
- (:module:`numpy` arrray), with additonal attrubutes for the industry group and additonal
46
+ (:mod:`numpy` arrray), with additonal attrubutes for the industry group and additonal
50
47
  evidence noted in the source data.
51
48
 
52
49
  Data for additonal data periods (time spans) not reported in the source data,
mergeron/gen/__init__.py CHANGED
@@ -13,7 +13,7 @@ from operator import attrgetter
13
13
 
14
14
  import h5py # type: ignore
15
15
  import numpy as np
16
- from attrs import Attribute, Converter, cmp_using, field, frozen, validators
16
+ from attrs import Attribute, Converter, cmp_using, field, frozen
17
17
  from numpy.random import SeedSequence
18
18
 
19
19
  from .. import ( # noqa: TID252
@@ -588,45 +588,39 @@ class INVResolution(str, Enameled):
588
588
  class UPPTestRegime:
589
589
  """Configuration for UPP tests."""
590
590
 
591
- resolution: INVResolution = field(
592
- kw_only=False,
593
- default=INVResolution.ENFT,
594
- validator=validators.in_([INVResolution.CLRN, INVResolution.ENFT]),
595
- )
596
- """Whether to test clearance, enforcement, or both."""
597
-
598
- guppi_aggregator: UPPAggrSelector = field(
599
- kw_only=False, default=UPPAggrSelector.MIN
600
- )
601
- """Aggregator for GUPPI test."""
602
-
603
- divr_aggregator: UPPAggrSelector = field(kw_only=False, default=UPPAggrSelector.MIN)
604
- """Aggregator for diversion ratio test."""
605
-
591
+ resolution: INVResolution = field(kw_only=False, default=INVResolution.ENFT)
592
+ """Whether to test clearance, enforcement."""
606
593
 
607
- @frozen
608
- class UPPTestsRaw:
609
- """Container for arrays marking test failures and successes
610
-
611
- A test success is a draw ("market") that meeets the
612
- specified test criterion, and a test failure is
613
- one that does not; test criteria are evaluated in
614
- :func:`enforcement_stats.gen_upp_arrays`.
615
- """
594
+ @resolution.validator
595
+ def _resvdtr(
596
+ _i: UPPTestRegime, _a: Attribute[INVResolution], _v: INVResolution
597
+ ) -> None:
598
+ if _v == INVResolution.BOTH:
599
+ raise ValueError(
600
+ "GUPPI test cannot be performed with both resolutions; only useful for reporting"
601
+ )
602
+ elif _v not in {INVResolution.CLRN, INVResolution.ENFT}:
603
+ raise ValueError(
604
+ f"Must be one of, {INVResolution.CLRN!r} or {INVResolution.ENFT!r}"
605
+ )
616
606
 
617
- guppi_test_simple: ArrayBoolean
618
- """True if GUPPI estimate meets criterion"""
607
+ guppi_aggregator: UPPAggrSelector = field(kw_only=False)
608
+ """Aggregator for GUPPI test."""
619
609
 
620
- guppi_test_compound: ArrayBoolean
621
- """True if both GUPPI estimate and diversion ratio estimate
622
- meet criterion
623
- """
610
+ @guppi_aggregator.default
611
+ def __gad(_i: UPPTestRegime) -> UPPAggrSelector:
612
+ return (
613
+ UPPAggrSelector.MIN
614
+ if _i.resolution == INVResolution.ENFT
615
+ else UPPAggrSelector.MAX
616
+ )
624
617
 
625
- cmcr_test: ArrayBoolean
626
- """True if CMCR estimate meets criterion"""
618
+ divr_aggregator: UPPAggrSelector = field(kw_only=False)
619
+ """Aggregator for diversion ratio test."""
627
620
 
628
- ipr_test: ArrayBoolean
629
- """True if IPR (partial price-simulation) estimate meets criterion"""
621
+ @divr_aggregator.default
622
+ def __dad(_i: UPPTestRegime) -> UPPAggrSelector:
623
+ return _i.guppi_aggregator
630
624
 
631
625
 
632
626
  @frozen
@@ -453,15 +453,6 @@ class MarketSample:
453
453
  )
454
454
 
455
455
  if not _ndt:
456
- # byte_stream = io.BytesIO()
457
- # with h5py.File(byte_stream, "w") as h5f:
458
- # for _a in self.dataset.__attrs_attrs__:
459
- # if all((
460
- # (_arr := getattr(self.dataset, _a.name)).any(),
461
- # not np.isnan(_arr).all(),
462
- # )):
463
- # h5f.create_dataset(_a.name, data=_arr, fletcher32=True)
464
-
465
456
  with (zpath / f"{name_root}_dataset.h5").open("wb") as _hfh:
466
457
  _hfh.write(self.dataset.to_h5bin())
467
458
 
@@ -490,10 +481,7 @@ class MarketSample:
490
481
  if _dt:
491
482
  with _dp.open("rb") as _hfh:
492
483
  object.__setattr__( # noqa: PLC2801
493
- market_sample_,
494
- "dataset",
495
- # MarketSampleData(**{_a: h5f[_a][:] for _a in h5f}),
496
- MarketSampleData.from_h5f(_hfh),
484
+ market_sample_, "dataset", MarketSampleData.from_h5f(_hfh)
497
485
  )
498
486
  if _et:
499
487
  object.__setattr__( # noqa: PLC2801
@@ -7,7 +7,7 @@ import enum
7
7
  from collections.abc import Mapping
8
8
 
9
9
  import numpy as np
10
- from scipy.interpolate import interp1d # type: ignore
10
+ from scipy.interpolate import make_interp_spline # type: ignore
11
11
 
12
12
  from .. import VERSION, ArrayBIGINT, Enameled, this_yaml # noqa: TID252
13
13
  from ..core import ftc_merger_investigations_data as fid # noqa: TID252
@@ -77,7 +77,7 @@ HHI_DELTA_KNOTS = np.array(
77
77
  )
78
78
  HHI_POST_ZONE_KNOTS = np.array([0, 1800, 2400, 10001], dtype=np.int64)
79
79
  hhi_delta_ranger, hhi_zone_post_ranger = (
80
- interp1d(_f / 1e4, _f, kind="previous", assume_sorted=True)
80
+ make_interp_spline(_f / 1e4, _f, k=0)
81
81
  for _f in (HHI_DELTA_KNOTS, HHI_POST_ZONE_KNOTS)
82
82
  )
83
83
 
@@ -256,11 +256,16 @@ def table_no_lku(
256
256
 
257
257
 
258
258
  def enf_cnts_byfirmcount(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
259
+ if not _cnts_array[:, 0].any():
260
+ return np.array([], int)
261
+
259
262
  ndim_in = 1
260
263
  return np.vstack([
261
264
  np.concatenate([
262
265
  (_i,),
263
- np.einsum("ij->j", _cnts_array[_cnts_array[:, 0] == _i][:, ndim_in:]),
266
+ np.einsum(
267
+ "ij->j", _cnts_array[_cnts_array[:, 0] == _i][:, ndim_in:], dtype=int
268
+ ),
264
269
  ])
265
270
  for _i in np.unique(_cnts_array[:, 0])
266
271
  ])
@@ -271,14 +276,16 @@ def enf_cnts_bydelta(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
271
276
  return np.vstack([
272
277
  np.concatenate([
273
278
  (_k,),
274
- np.einsum("ij->j", _cnts_array[_cnts_array[:, 1] == _k][:, ndim_in:]),
279
+ np.einsum(
280
+ "ij->j", _cnts_array[_cnts_array[:, 1] == _k][:, ndim_in:], dtype=int
281
+ ),
275
282
  ])
276
283
  for _k in HHI_DELTA_KNOTS[:-1]
277
284
  ])
278
285
 
279
286
 
280
287
  def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
281
- if not _cnts_array.any():
288
+ if not _cnts_array[:, 0].any() or np.isnan(_cnts_array[:, 0]).all():
282
289
  return np.array([], int)
283
290
  # Step 1: Tag and agg. from HHI-post and Delta to zone triple
284
291
  # NOTE: Although you could just map and not (partially) aggregate in this step,
@@ -315,7 +322,9 @@ def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
315
322
  np.array(
316
323
  (
317
324
  *zone_val,
318
- *np.einsum("ij->j", _cnts_array[:, _ndim_in:][conc_test]),
325
+ *np.einsum(
326
+ "ij->j", _cnts_array[:, _ndim_in:][conc_test], dtype=int
327
+ ),
319
328
  ),
320
329
  dtype=int,
321
330
  ),
@@ -338,7 +347,9 @@ def enf_cnts_byconczone(_cnts_array: ArrayBIGINT, /) -> ArrayBIGINT:
338
347
  (
339
348
  zone_val,
340
349
  np.einsum(
341
- "ij->j", cnts_byhhipostanddelta[hhi_zone_test][:, _nkeys:]
350
+ "ij->j",
351
+ cnts_byhhipostanddelta[hhi_zone_test][:, _nkeys:],
352
+ dtype=int,
342
353
  ),
343
354
  ),
344
355
  dtype=int,
mergeron/gen/upp_tests.py CHANGED
@@ -21,13 +21,7 @@ from .. import ( # noqa
21
21
  UPPAggrSelector,
22
22
  )
23
23
  from ..core import guidelines_boundaries as gbl # noqa: TID252
24
- from . import (
25
- INVResolution,
26
- MarketSampleData,
27
- UPPTestRegime,
28
- UPPTestsCounts,
29
- UPPTestsRaw,
30
- )
24
+ from . import INVResolution, MarketSampleData, UPPTestRegime, UPPTestsCounts
31
25
  from . import enforcement_stats as esl
32
26
 
33
27
  __version__ = VERSION
@@ -41,7 +35,7 @@ class INVRESCntsArgs(TypedDict, total=False):
41
35
  nthreads: int
42
36
 
43
37
 
44
- def compute_upp_test_counts(
38
+ def compute_upp_test_counts( # noqa: PLR0914
45
39
  _market_data_sample: MarketSampleData,
46
40
  _upp_test_parms: gbl.HMGThresholds,
47
41
  _upp_test_regime: UPPTestRegime,
@@ -71,134 +65,6 @@ def compute_upp_test_counts(
71
65
 
72
66
  """
73
67
 
74
- upp_test_arrays = compute_upp_test_arrays(
75
- _market_data_sample, _upp_test_parms, _upp_test_regime
76
- )
77
-
78
- fcounts, hhi_delta, hhi_post = (
79
- getattr(_market_data_sample, _g) for _g in ("fcounts", "hhi_delta", "hhi_post")
80
- )
81
-
82
- stats_rowlen = 6
83
- # Clearance/enforcement counts --- by firm count
84
- enf_cnts_sim_byfirmcount_array: ArrayBIGINT = np.zeros(stats_rowlen, int)
85
- firmcounts_list = np.unique(fcounts)
86
- if firmcounts_list.any():
87
- for _fc in firmcounts_list:
88
- fc_test = fcounts == _fc
89
-
90
- enf_cnts_sim_byfirmcount_array = np.vstack((
91
- enf_cnts_sim_byfirmcount_array,
92
- np.array([
93
- _fc,
94
- np.einsum("ij->", 1 * fc_test),
95
- *[
96
- np.einsum(
97
- "ij->", 1 * (fc_test & getattr(upp_test_arrays, _a.name))
98
- )
99
- for _a in upp_test_arrays.__attrs_attrs__
100
- ],
101
- ]),
102
- ))
103
-
104
- enf_cnts_sim_byfirmcount_array = enf_cnts_sim_byfirmcount_array[1:]
105
- else:
106
- enf_cnts_sim_byfirmcount_array = np.array([], int)
107
-
108
- # Clearance/enforcement counts --- by delta
109
- enf_cnts_sim_bydelta_array: ArrayBIGINT = np.zeros(stats_rowlen, int)
110
- hhi_deltaranged = esl.hhi_delta_ranger(hhi_delta)
111
- for hhi_deltalim in esl.HHI_DELTA_KNOTS[:-1]:
112
- hhi_deltatest = hhi_deltaranged == hhi_deltalim
113
-
114
- enf_cnts_sim_bydelta_array = np.vstack((
115
- enf_cnts_sim_bydelta_array,
116
- np.array([
117
- hhi_deltalim,
118
- np.einsum("ij->", 1 * hhi_deltatest),
119
- *[
120
- np.einsum(
121
- "ij->", 1 * (hhi_deltatest & getattr(upp_test_arrays, _a.name))
122
- )
123
- for _a in upp_test_arrays.__attrs_attrs__
124
- ],
125
- ]),
126
- ))
127
-
128
- enf_cnts_sim_bydelta_array = enf_cnts_sim_bydelta_array[1:]
129
-
130
- # Clearance/enforcement counts --- by zone
131
- if np.isnan(hhi_post).all():
132
- stats_byconczone_sim = np.array([], int)
133
- else:
134
- try:
135
- hhi_zone_post_ranged = esl.hhi_zone_post_ranger(hhi_post)
136
- except ValueError as _err:
137
- print(hhi_post)
138
- raise _err
139
-
140
- stats_byconczone_sim = np.zeros(stats_rowlen + 1, int)
141
- for hhi_zone_post_knot in esl.HHI_POST_ZONE_KNOTS[:-1]:
142
- level_test = hhi_zone_post_ranged == hhi_zone_post_knot
143
-
144
- for hhi_zone_delta_knot in [0, 100, 200]:
145
- delta_test = (
146
- hhi_deltaranged > 100
147
- if hhi_zone_delta_knot == 200
148
- else hhi_deltaranged == hhi_zone_delta_knot
149
- )
150
-
151
- conc_test = level_test & delta_test
152
-
153
- stats_byconczone_sim = np.vstack((
154
- stats_byconczone_sim,
155
- np.array([
156
- hhi_zone_post_knot,
157
- hhi_zone_delta_knot,
158
- np.einsum("ij->", 1 * conc_test),
159
- *[
160
- np.einsum(
161
- "ij->",
162
- 1 * (conc_test & getattr(upp_test_arrays, _a.name)),
163
- )
164
- for _a in upp_test_arrays.__attrs_attrs__
165
- ],
166
- ]),
167
- ))
168
-
169
- enf_cnts_sim_byconczone_array = esl.enf_cnts_byconczone(stats_byconczone_sim[1:])
170
-
171
- del stats_byconczone_sim
172
- del hhi_delta, hhi_post, fcounts
173
-
174
- return UPPTestsCounts(
175
- enf_cnts_sim_byfirmcount_array,
176
- enf_cnts_sim_bydelta_array,
177
- enf_cnts_sim_byconczone_array,
178
- )
179
-
180
-
181
- def compute_upp_test_arrays(
182
- _market_data_sample: MarketSampleData,
183
- _upp_test_parms: gbl.HMGThresholds,
184
- _sim_test_regime: UPPTestRegime,
185
- /,
186
- ) -> UPPTestsRaw:
187
- """
188
- Generate UPP tests arrays for given configuration and market sample
189
-
190
- Given a standards vector, market
191
-
192
- Parameters
193
- ----------
194
- _market_data_sample
195
- market data sample
196
- _upp_test_parms
197
- guidelines thresholds for testing UPP and related statistics
198
- _sim_test_regime
199
- configuration to use for generating UPP tests
200
-
201
- """
202
68
  g_bar_, divr_bar_, cmcr_bar_, ipr_bar_ = (
203
69
  getattr(_upp_test_parms, _f) for _f in ("guppi", "divr", "cmcr", "ipr")
204
70
  )
@@ -228,32 +94,72 @@ def compute_upp_test_arrays(
228
94
  (divr_test_vector,) = _compute_test_array_seq(
229
95
  (_market_data_sample.divr_array,),
230
96
  _market_data_sample.frmshr_array,
231
- _sim_test_regime.divr_aggregator,
97
+ _upp_test_regime.divr_aggregator,
232
98
  )
233
99
 
234
100
  (guppi_test_vector, cmcr_test_vector, ipr_test_vector) = _compute_test_array_seq(
235
101
  (guppi_array, cmcr_array, ipr_array),
236
102
  _market_data_sample.frmshr_array,
237
- _sim_test_regime.guppi_aggregator,
103
+ _upp_test_regime.guppi_aggregator,
238
104
  )
239
105
  del cmcr_array, ipr_array, guppi_array
240
106
 
241
- if _sim_test_regime.resolution == INVResolution.ENFT:
242
- upp_test_arrays = UPPTestsRaw(
107
+ if _upp_test_regime.resolution == INVResolution.ENFT:
108
+ upp_test_arrays = np.hstack((
243
109
  guppi_test_vector >= g_bar_,
244
110
  (guppi_test_vector >= g_bar_) | (divr_test_vector >= divr_bar_),
245
111
  cmcr_test_vector >= cmcr_bar_,
246
112
  ipr_test_vector >= ipr_bar_,
247
- )
113
+ ))
248
114
  else:
249
- upp_test_arrays = UPPTestsRaw(
115
+ upp_test_arrays = np.hstack((
250
116
  guppi_test_vector < g_bar_,
251
117
  (guppi_test_vector < g_bar_) & (divr_test_vector < divr_bar_),
252
118
  cmcr_test_vector < cmcr_bar_,
253
119
  ipr_test_vector < ipr_bar_,
254
- )
120
+ ))
121
+
122
+ fcounts, hhi_delta, hhi_post = (
123
+ getattr(_market_data_sample, _g) for _g in ("fcounts", "hhi_delta", "hhi_post")
124
+ )
255
125
 
256
- return upp_test_arrays
126
+ # Clearance counts by firm count
127
+ enf_cnts_sim_byfirmcount_array = esl.enf_cnts_byfirmcount(
128
+ np.hstack((fcounts, np.ones_like(fcounts), upp_test_arrays))
129
+ )
130
+
131
+ # Clearance counts by Delta and Concentration Zone
132
+ hhi_zone_ranged = (
133
+ esl.hhi_zone_post_ranger(hhi_post).astype(int)
134
+ if hhi_post.any() and not np.isnan(hhi_post).all()
135
+ else np.zeros_like(hhi_post, int)
136
+ )
137
+ hhi_delta_ranged = esl.hhi_delta_ranger(hhi_delta).astype(int)
138
+
139
+ enf_cnts_sim_byhhianddelta_array = np.hstack(
140
+ (
141
+ hhi_zone_ranged,
142
+ hhi_delta_ranged,
143
+ np.ones_like(hhi_delta_ranged),
144
+ upp_test_arrays,
145
+ # *[
146
+ # 1 * getattr(upp_test_arrays, _a.name)
147
+ # for _a in upp_test_arrays.__attrs_attrs__
148
+ # ],
149
+ ),
150
+ dtype=int,
151
+ )
152
+
153
+ enf_cnts_sim_bydelta_array = esl.enf_cnts_bydelta(enf_cnts_sim_byhhianddelta_array)
154
+ enf_cnts_sim_byconczone_array = esl.enf_cnts_byconczone(
155
+ enf_cnts_sim_byhhianddelta_array
156
+ )
157
+
158
+ return UPPTestsCounts(
159
+ enf_cnts_sim_byfirmcount_array,
160
+ enf_cnts_sim_bydelta_array,
161
+ enf_cnts_sim_byconczone_array,
162
+ )
257
163
 
258
164
 
259
165
  def _compute_test_array_seq(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: mergeron
3
- Version: 2025.739290.6
3
+ Version: 2025.739290.7
4
4
  Summary: Analyze merger enforcement policy using Python
5
5
  License: MIT
6
6
  Keywords: merger policy analysis,merger guidelines,merger screening,policy presumptions,concentration standards,upward pricing pressure,GUPPI