mergeron 2025.739355.1__tar.gz → 2025.739355.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

Files changed (20) hide show
  1. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/PKG-INFO +5 -5
  2. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/pyproject.toml +6 -8
  3. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/__init__.py +2 -2
  4. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/ftc_merger_investigations_data.py +22 -17
  5. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/guidelines_boundary_functions.py +62 -8
  6. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/guidelines_boundary_functions_extra.py +19 -21
  7. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/gen/data_generation_functions.py +31 -36
  8. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/README.rst +0 -0
  9. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/__init__.py +0 -0
  10. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/empirical_margin_distribution.py +0 -0
  11. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/guidelines_boundaries.py +0 -0
  12. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/core/pseudorandom_numbers.py +0 -0
  13. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/data/__init__.py +0 -0
  14. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/data/damodaran_margin_data_serialized.zip +0 -0
  15. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/data/ftc_merger_investigations_data.zip +0 -0
  16. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/gen/__init__.py +0 -0
  17. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/gen/data_generation.py +0 -0
  18. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/gen/enforcement_stats.py +0 -0
  19. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/gen/upp_tests.py +0 -0
  20. {mergeron-2025.739355.1 → mergeron-2025.739355.3}/src/mergeron/py.typed +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: mergeron
3
- Version: 2025.739355.1
3
+ Version: 2025.739355.3
4
4
  Summary: Python for analyzing merger enforcement policy
5
5
  License: MIT
6
6
  Keywords: merger enforcement policy,merger guidelines,merger screening,enforcement presumptions,concentration standards,diversion ratio,upward pricing pressure,GUPPI
7
7
  Author: Murthy Kambhampaty
8
8
  Author-email: smk@capeconomics.com
9
- Requires-Python: >=3.12
9
+ Requires-Python: >=3.12,<4.0
10
10
  Classifier: Development Status :: 4 - Beta
11
11
  Classifier: Environment :: Console
12
12
  Classifier: Intended Audience :: End Users/Desktop
@@ -14,10 +14,9 @@ Classifier: Intended Audience :: Science/Research
14
14
  Classifier: License :: OSI Approved :: MIT License
15
15
  Classifier: Operating System :: OS Independent
16
16
  Classifier: Programming Language :: Python
17
- Classifier: Programming Language :: Python :: 3
18
- Classifier: Programming Language :: Python :: 3 :: Only
19
- Classifier: Programming Language :: Python :: 3.12
20
17
  Classifier: Programming Language :: Python :: Implementation :: CPython
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
21
20
  Requires-Dist: aenum (>=3.1.15)
22
21
  Requires-Dist: attrs (>=25.3.0)
23
22
  Requires-Dist: beautifulsoup4 (>=4.13.3)
@@ -28,6 +27,7 @@ Requires-Dist: joblib (>=1.4.2)
28
27
  Requires-Dist: lxml (>=5.3.2)
29
28
  Requires-Dist: matplotlib (>=3.10.1)
30
29
  Requires-Dist: mpmath (>=1.3.0)
30
+ Requires-Dist: numexpr (>=2.10.2,<3.0.0)
31
31
  Requires-Dist: python-calamine (>=0.3.2)
32
32
  Requires-Dist: ruamel-yaml (>=0.18.10)
33
33
  Requires-Dist: scipy (>=1.15.2)
@@ -1,6 +1,6 @@
1
1
  [project]
2
- name = "mergeron"
3
2
  authors = [{ name = "Murthy Kambhampaty", email = "smk@capeconomics.com" }]
3
+ name = "mergeron"
4
4
  description = "Python for analyzing merger enforcement policy"
5
5
  readme = "README.rst"
6
6
  license = "MIT"
@@ -15,7 +15,8 @@ keywords = [
15
15
  "upward pricing pressure",
16
16
  "GUPPI",
17
17
  ]
18
- version = "2025.739355.1"
18
+ version = "2025.739355.3"
19
+ requires-python = ">=3.12,<4.0"
19
20
 
20
21
  # Classifiers list: https://pypi.org/classifiers/
21
22
  classifiers = [
@@ -26,14 +27,11 @@ classifiers = [
26
27
  "License :: OSI Approved :: MIT License",
27
28
  "Operating System :: OS Independent",
28
29
  "Programming Language :: Python",
29
- "Programming Language :: Python :: 3",
30
- "Programming Language :: Python :: 3 :: Only",
31
- "Programming Language :: Python :: 3.12",
32
30
  "Programming Language :: Python :: Implementation :: CPython",
31
+ "Programming Language :: Python :: 3.12",
32
+ "Programming Language :: Python :: 3.13",
33
33
  ]
34
34
 
35
- requires-python = ">=3.12"
36
-
37
35
  dependencies = [
38
36
  "aenum>=3.1.15",
39
37
  "attrs>=25.3.0",
@@ -51,6 +49,7 @@ dependencies = [
51
49
  "sympy>=1.13.3",
52
50
  "types-beautifulsoup4>=4.12.0",
53
51
  "urllib3>=2.3.0",
52
+ "numexpr (>=2.10.2,<3.0.0)",
54
53
  ]
55
54
 
56
55
  [build-system]
@@ -202,7 +201,6 @@ plugins = "numpy.typing.mypy_plugin"
202
201
  [tool.pytest.ini_options]
203
202
  log_auto_indent = 4
204
203
  minversion = "8.0"
205
- testpaths = ["tests"]
206
204
  addopts = [
207
205
  "--import-mode=importlib",
208
206
  "--cov=mergeron",
@@ -15,7 +15,7 @@ from ruamel import yaml
15
15
 
16
16
  _PKG_NAME: str = Path(__file__).parent.name
17
17
 
18
- VERSION = "2025.739355.1"
18
+ VERSION = "2025.739355.3"
19
19
 
20
20
  __version__ = VERSION
21
21
 
@@ -39,7 +39,7 @@ NTHREADS = 2 * cpu_count()
39
39
 
40
40
  PKG_ATTRS_MAP: dict[str, type] = {}
41
41
 
42
- np.set_printoptions(precision=24, floatmode="fixed")
42
+ np.set_printoptions(precision=28, floatmode="fixed", legacy=False)
43
43
 
44
44
  type HMGPubYear = Literal[1992, 2010, 2023]
45
45
 
@@ -36,6 +36,8 @@ from . import (
36
36
 
37
37
  __version__ = VERSION
38
38
 
39
+ # cspell: "includeRegExpList": ["strings", "comments", /( {3}['"]{3}).*?\\1/g]
40
+
39
41
  WORK_DIR = globals().get("WORK_DIR", PKG_WORK_DIR)
40
42
  """Redefined, in case the user defines WORK_DIR betweeen module imports."""
41
43
 
@@ -61,7 +63,7 @@ CONC_HHI_DICT = {
61
63
  "3,000 - 3,999": 3000,
62
64
  "4,000 - 4,999": 4000,
63
65
  "5,000 - 6,999": 5000,
64
- "7,000 +": 7000,
66
+ "7,000 - 10,000": 7000,
65
67
  "TOTAL": TTL_KEY,
66
68
  }
67
69
  CONC_DELTA_DICT = {
@@ -72,7 +74,7 @@ CONC_DELTA_DICT = {
72
74
  "500 - 800": 500,
73
75
  "800 - 1,200": 800,
74
76
  "1,200 - 2,500": 1200,
75
- "2,500 +": 2500,
77
+ "2,500 - 5,000": 2500,
76
78
  "TOTAL": TTL_KEY,
77
79
  }
78
80
  CNT_FCOUNT_DICT = {
@@ -90,8 +92,8 @@ CNT_FCOUNT_DICT = {
90
92
  }
91
93
 
92
94
 
93
- def reverse_map(_dict: Mapping[Any, Any]) -> Mapping[Any, Any]:
94
- """Reverse a mapping."""
95
+ def invert_map(_dict: Mapping[Any, Any]) -> Mapping[Any, Any]:
96
+ """Invert mapping, mapping values to keys of the original mapping."""
95
97
  return {_v: _k for _k, _v in _dict.items()}
96
98
 
97
99
 
@@ -432,7 +434,7 @@ def _parse_invdata() -> INVData:
432
434
  " the source code as well as to install an additional package"
433
435
  " not distributed with this package or identified as a requirement."
434
436
  )
435
- import pymupdf # type: ignore
437
+ import pymupdf # type: ignore # noqa: PLC0415
436
438
 
437
439
  invdata_docnames = _download_invdata(FID_WORK_DIR)
438
440
 
@@ -610,41 +612,44 @@ def _process_table_blks_conc_type(
610
612
  ) -> ArrayBIGINT:
611
613
  conc_row_pat = re.compile(r"((?:0|\d,\d{3}) (?:- \d+,\d{3}|\+)|TOTAL)")
612
614
 
613
- col_titles_array = tuple(CONC_DELTA_DICT.values())
614
- col_totals: ArrayBIGINT = np.zeros(len(col_titles_array), int)
615
+ col_titles = tuple(CONC_DELTA_DICT.values())
616
+ col_totals: ArrayBIGINT = np.zeros(len(col_titles), int)
615
617
  invdata_array: ArrayBIGINT = np.array(None)
616
618
 
617
619
  for tbl_blk in _table_blocks:
618
620
  if conc_row_pat.match(_blk_str := tbl_blk[-3]):
619
621
  row_list: list[str] = _blk_str.strip().split("\n")
620
622
  row_title: str = row_list.pop(0)
621
- row_key: int = CONC_HHI_DICT[row_title]
623
+ row_key: int = (
624
+ 7000 if row_title.startswith("7,000") else CONC_HHI_DICT[row_title]
625
+ )
622
626
  row_total = np.array(row_list.pop().replace(",", "").split("/"), int)
623
- row_array_list: list[list[int]] = []
627
+ data_row_list: list[list[int]] = []
624
628
  while row_list:
625
629
  enfd_val, clsd_val = row_list.pop(0).split("/")
626
- row_array_list += [
630
+ data_row_list += [
627
631
  [
628
632
  row_key,
629
- col_titles_array[len(row_array_list)],
633
+ col_titles[len(data_row_list)],
630
634
  int(enfd_val),
631
635
  int(clsd_val),
632
636
  int(enfd_val) + int(clsd_val),
633
637
  ]
634
638
  ]
635
- row_array = np.array(row_array_list, int)
639
+ data_row_array = np.array(data_row_list, int)
640
+ del data_row_list
636
641
  # Check row totals
637
- assert_array_equal(row_total, np.einsum("ij->j", row_array[:, 2:4]))
642
+ assert_array_equal(row_total, np.einsum("ij->j", data_row_array[:, 2:4]))
638
643
 
639
644
  if row_key == TTL_KEY:
640
- col_totals = row_array
645
+ col_totals = data_row_array
641
646
  else:
642
647
  invdata_array = (
643
- np.vstack((invdata_array, row_array))
648
+ np.vstack((invdata_array, data_row_array))
644
649
  if invdata_array.shape
645
- else row_array
650
+ else data_row_array
646
651
  )
647
- del row_array, row_array_list
652
+ del data_row_array
648
653
  else:
649
654
  continue
650
655
 
@@ -2,17 +2,25 @@
2
2
 
3
3
  import decimal
4
4
  from collections.abc import Callable
5
+ from types import ModuleType
5
6
  from typing import Literal, TypedDict
6
7
 
7
8
  import matplotlib as mpl
8
9
  import matplotlib.axes as mpa
10
+ import matplotlib.offsetbox as mof
9
11
  import matplotlib.patches as mpp
10
12
  import matplotlib.pyplot as plt
11
13
  import matplotlib.ticker as mpt
12
14
  import numpy as np
13
15
  from mpmath import mp, mpf # type: ignore
14
16
 
15
- from .. import DEFAULT_REC_RATIO, VERSION, ArrayBIGINT, ArrayDouble # noqa: TID252
17
+ from .. import ( # noqa: TID252
18
+ _PKG_NAME,
19
+ DEFAULT_REC_RATIO,
20
+ VERSION,
21
+ ArrayBIGINT,
22
+ ArrayDouble,
23
+ )
16
24
  from . import GuidelinesBoundary, MPFloat
17
25
 
18
26
  __version__ = VERSION
@@ -781,6 +789,7 @@ def round_cust(
781
789
 
782
790
 
783
791
  def boundary_plot(
792
+ _plt: ModuleType,
784
793
  *,
785
794
  mktshare_plot_flag: bool = True,
786
795
  mktshare_axes_flag: bool = True,
@@ -793,11 +802,11 @@ def boundary_plot(
793
802
  if backend == "pgf":
794
803
  mpl.use("pgf")
795
804
 
796
- plt.rcParams.update({
805
+ mpl.rcParams.update({
806
+ "text.usetex": True,
797
807
  "pgf.rcfonts": False,
798
808
  "pgf.texsystem": "lualatex",
799
809
  "pgf.preamble": "\n".join([
800
- R"\pdfvariable minorversion=7",
801
810
  R"\usepackage{fontspec}",
802
811
  R"\usepackage{luacode}",
803
812
  R"\begin{luacode}",
@@ -809,27 +818,40 @@ def boundary_plot(
809
818
  R' "luaotfload.patch_font", embedfull, "embedfull"'
810
819
  R")",
811
820
  R"\end{luacode}",
812
- R"\setmainfont{STIX Two Text}",
813
- r"\setsansfont{Fira Sans Light}",
814
- R"\setmonofont[Scale=MatchLowercase,]{Fira Mono}",
815
821
  R"\defaultfontfeatures[\rmfamily]{",
816
822
  R" Ligatures={TeX, Common},",
817
823
  R" Numbers={Proportional, Lining},",
818
824
  R" }",
819
- R"\defaultfontfeatures[\sffamily]{",
825
+ R"\defaultfontfeatures[\sffamily, \dvsfamily]{",
820
826
  R" Ligatures={TeX, Common},",
821
827
  R" Numbers={Monospaced, Lining},",
822
828
  R" LetterSpace=0.50,",
823
829
  R" }",
830
+ R"\setmainfont{STIX Two Text}",
831
+ R"\setsansfont{Fira Sans Light}",
832
+ R"\setmonofont[Scale=MatchLowercase,]{Fira Mono}",
833
+ R"\newfontfamily\dvsfamily{DejaVu Sans}",
824
834
  R"\usepackage{mathtools}",
825
835
  R"\usepackage{unicode-math}",
826
- R"\setmathfont[math-style=ISO]{STIX Two Math}",
836
+ R"\setmathfont{STIX Two Math}[math-style=ISO,range={scr,bfscr},StylisticSet=01]",
827
837
  R"\usepackage[",
828
838
  R" activate={true, nocompatibility},",
829
839
  R" tracking=true,",
830
840
  R" ]{microtype}",
831
841
  ]),
832
842
  })
843
+ else:
844
+ if backend:
845
+ mpl.use(backend)
846
+ mpl.rcParams.update({
847
+ "text.usetex": False,
848
+ "pgf.rcfonts": True,
849
+ "font.family": "sans-serif",
850
+ "font.sans-serif": ["DejaVu Sans", "sans-serif"],
851
+ "font.monospace": ["DejaVu Mono", "monospace"],
852
+ "font.serif": ["stix", "serif"],
853
+ "mathtext.fontset": "stix",
854
+ })
833
855
 
834
856
  # Initialize a canvas with a single figure (set of axes)
835
857
  fig_ = plt.figure(figsize=(5, 5), dpi=600)
@@ -909,6 +931,38 @@ def boundary_plot(
909
931
  for _t in axl_[::2]:
910
932
  _t.set_visible(False)
911
933
 
934
+ # package version badge
935
+ # https://futurile.net/2016/03/14/partial-colouring-text-in-matplotlib-with-latex/
936
+ badge_fmt_str = R"{{\dvsfamily {}}}" if backend == "pgf" else "{}"
937
+ badge_txt_list = [
938
+ badge_fmt_str.format(_s) for _s in [f"{_PKG_NAME}", f"v{VERSION}"]
939
+ ]
940
+
941
+ badge_fmt_list = [
942
+ _btp := {"color": "#fff", "backgroundcolor": "#555", "size": 2},
943
+ _btp | {"backgroundcolor": "#007ec6"},
944
+ ]
945
+
946
+ badge_box = mof.HPacker(
947
+ sep=2.6,
948
+ children=[
949
+ mof.TextArea(_t, textprops=_f)
950
+ for _t, _f in zip(badge_txt_list, badge_fmt_list)
951
+ ],
952
+ )
953
+
954
+ ax0_.add_artist(
955
+ mof.AnnotationBbox(
956
+ badge_box,
957
+ xy=(0.5, 0.5),
958
+ xybox=(-0.05, -0.12),
959
+ xycoords="data",
960
+ boxcoords="data",
961
+ frameon=False,
962
+ pad=0,
963
+ )
964
+ )
965
+
912
966
  # Axis labels
913
967
  if mktshare_axes_flag:
914
968
  # x-axis
@@ -122,9 +122,9 @@ def diversion_share_boundary_qdtr_wtd_avg(
122
122
  Array of share-pairs, area under boundary.
123
123
 
124
124
  """
125
- _delta_star = mpf(f"{_delta_star}")
125
+ _delta_star, _r_val = (mpf(_v) for _v in (f"{_delta_star}", f"{_r_val}"))
126
126
  _s_mid = _delta_star / (1 + _delta_star)
127
- s_naught = 0
127
+ s_naught = mpf("0.0")
128
128
 
129
129
  s_1, s_2 = symbols("s_1:3", positive=True)
130
130
 
@@ -143,19 +143,17 @@ def diversion_share_boundary_qdtr_wtd_avg(
143
143
  )
144
144
 
145
145
  _bdry_func = solve(_bdry_eqn, s_2)[0]
146
- s_naught = (
147
- float(solve(simplify(_bdry_eqn.subs({s_2: 1 - s_1})), s_1)[0]) # type: ignore
148
- if recapture_form == "inside-out"
149
- else 0
150
- )
151
- bdry_area = float(
152
- 2
153
- * (
146
+
147
+ if recapture_form == "inside-out":
148
+ s_naught = mpf(solve(simplify(_bdry_eqn.subs({s_2: 1 - s_1})), s_1)[0])
149
+
150
+ bdry_area = mp.fmul(
151
+ "2",
152
+ (
154
153
  s_naught
155
154
  + mp.quad(lambdify(s_1, _bdry_func, "mpmath"), (s_naught, _s_mid))
156
- )
157
- - (_s_mid**2 + s_naught**2)
158
- )
155
+ ),
156
+ ) - (_s_mid**2 + s_naught**2)
159
157
 
160
158
  case "cross-product-share":
161
159
  mp.trap_complex = False
@@ -173,14 +171,14 @@ def diversion_share_boundary_qdtr_wtd_avg(
173
171
  )
174
172
 
175
173
  _bdry_func = solve(_bdry_eqn, s_2)[1]
176
- bdry_area = float(
177
- 2
178
- * (
174
+ bdry_area = (
175
+ mp.fmul(
176
+ "2",
179
177
  mp.quad(
180
178
  lambdify(s_1, _bdry_func.subs({d_star: _delta_star}), "mpmath"),
181
179
  (0, _s_mid),
182
- )
183
- ).real
180
+ ).real,
181
+ )
184
182
  - _s_mid**2
185
183
  )
186
184
 
@@ -199,13 +197,13 @@ def diversion_share_boundary_qdtr_wtd_avg(
199
197
  )
200
198
 
201
199
  _bdry_func = solve(_bdry_eqn, s_2)[0]
202
- bdry_area = float(
203
- 2 * (mp.quad(lambdify(s_1, _bdry_func, "mpmath"), (0, _s_mid)))
200
+ bdry_area = (
201
+ mp.fmul("2", mp.quad(lambdify(s_1, _bdry_func, "mpmath"), (0, _s_mid)))
204
202
  - _s_mid**2
205
203
  )
206
204
 
207
205
  return GuidelinesBoundaryCallable(
208
- lambdify(s_1, _bdry_func, "numpy"), bdry_area, s_naught
206
+ lambdify(s_1, _bdry_func, "numpy"), float(bdry_area), float(s_naught)
209
207
  )
210
208
 
211
209
 
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Literal
6
6
 
7
+ import numexpr as ne
7
8
  import numpy as np
8
9
  from attrs import evolve
9
10
  from numpy.random import SeedSequence
@@ -438,13 +439,15 @@ def gen_divr_array(
438
439
  """
439
440
  divr_array: ArrayDouble
440
441
  if _recapture_form == RECForm.FIXED:
441
- divr_array = np.divide(
442
- _recapture_ratio * _frmshr_array[:, ::-1], 1 - _frmshr_array
442
+ _frmshr_array_rev = _frmshr_array[:, ::-1]
443
+ divr_array = ne.evaluate(
444
+ "_recapture_ratio * _frmshr_array_rev / (1 - _frmshr_array)"
443
445
  )
444
446
 
445
447
  else:
446
- purchprob_array = _aggregate_purchase_prob * _frmshr_array
447
- divr_array = np.divide(purchprob_array[:, ::-1], 1 - purchprob_array)
448
+ purchprob_array = ne.evaluate("_aggregate_purchase_prob * _frmshr_array")
449
+ purchprob_array_rev = purchprob_array[:, ::-1] # noqa: F841
450
+ divr_array = ne.evaluate("purchprob_array_rev / (1 - purchprob_array)")
448
451
 
449
452
  divr_assert_test = (
450
453
  (np.round(np.einsum("ij->i", _frmshr_array), 15) == 1)
@@ -535,12 +538,12 @@ def gen_margin_price_data(
535
538
  for _p in (_frmshr_array, _nth_firm_share)
536
539
  )
537
540
  case PriceSpec.RNG:
538
- price_array_gen = prng(_pr_rng_seed_seq).choice(
541
+ _price_array_gen = prng(_pr_rng_seed_seq).choice(
539
542
  1 + np.arange(pr_max_ratio), size=(len(_frmshr_array), 3)
540
543
  )
541
- price_array = price_array_gen[:, :2]
542
- nth_firm_price = price_array_gen[:, [2]]
543
- # del _price_array_gen
544
+ price_array = _price_array_gen[:, :2]
545
+ nth_firm_price = _price_array_gen[:, [2]]
546
+ del _price_array_gen
544
547
  case PriceSpec.CSY:
545
548
  # TODO:
546
549
  # evolve PCMRestriction (save running MNL test twice); evolve copy of _mkt_sample_spec=1q
@@ -561,26 +564,22 @@ def gen_margin_price_data(
561
564
  getattr(margin_data, _f) for _f in ("pcm_array", "mnl_test_array")
562
565
  )
563
566
 
564
- price_array_here = 1 / (1 - pcm_array)
567
+ price_array_here = ne.evaluate("1 / (1 - pcm_array)")
565
568
  price_array = price_array_here[:, :2]
566
- nth_firm_price = price_array_here[:, [-1]]
569
+ nth_firm_price = price_array_here[:, [-1]] # noqa: F841
567
570
  if _pcm_spec.pcm_restriction == PCMRestriction.MNL:
568
571
  # Generate i.i.d. PCMs then take PCM0 and construct PCM1
569
572
  # Regenerate MNL test
570
- purchase_prob_array = _aggregate_purchase_prob * _frmshr_array
571
- pcm_array[:, 1] = np.divide(
572
- (
573
- m1_nr := np.divide(
574
- np.einsum(
575
- "ij,ij,ij->ij",
576
- price_array[:, [0]],
577
- pcm_array[:, [0]],
578
- 1 - purchase_prob_array[:, [0]],
579
- ),
580
- 1 - purchase_prob_array[:, [1]],
581
- )
582
- ),
583
- 1 + m1_nr,
573
+ purchase_prob_array = ne.evaluate(
574
+ "_aggregate_purchase_prob * _frmshr_array"
575
+ )
576
+
577
+ _pr_0, _pi_0 = price_array_here[:, [0]], purchase_prob_array[:, [0]]
578
+ _pcm_0 = pcm_array[:, [0]]
579
+ _pr_1, _pi_1 = price_array_here[:, [1]], purchase_prob_array[:, [1]]
580
+
581
+ pcm_array[:, [1]] = ne.evaluate(
582
+ "_pcm_0 * (1 - _pi_0) / (1 - _pi_1 + _pcm_0 * (_pi_1 - _pi_0))"
584
583
  )
585
584
  mnl_test_array = (pcm_array[:, [1]] >= 0) & (pcm_array[:, [1]] <= 1)
586
585
 
@@ -601,8 +600,8 @@ def gen_margin_price_data(
601
600
  )
602
601
 
603
602
  # _price_array = _price_array.astype(np.float64)
604
- rev_array = price_array * _frmshr_array
605
- nth_firm_rev = nth_firm_price * _nth_firm_share
603
+ rev_array = ne.evaluate("price_array * _frmshr_array")
604
+ nth_firm_rev = ne.evaluate("nth_firm_price * _nth_firm_share")
606
605
 
607
606
  # Although `_test_rev_ratio_inv` is not fixed at 10%,
608
607
  # the ratio has not changed since inception of the HSR filing test,
@@ -723,16 +722,12 @@ def _gen_margin_data(
723
722
  # Impose FOCs from profit-maximization with MNL demand
724
723
  purchase_prob_array = _aggregate_purchase_prob * _frmshr_array
725
724
 
726
- pcm_array[:, [1]] = np.divide(
727
- np.einsum(
728
- "ij,ij,ij->ij",
729
- _price_array[:, [0]],
730
- pcm_array[:, [0]],
731
- 1 - purchase_prob_array[:, [0]],
732
- ),
733
- np.einsum(
734
- "ij,ij->ij", _price_array[:, [1]], 1 - purchase_prob_array[:, [1]]
735
- ),
725
+ _pr_0, _pi_0 = _price_array[:, [0]], purchase_prob_array[:, [0]]
726
+ _pcm_0 = pcm_array[:, [0]]
727
+ _pr_1, _pi_1 = _price_array[:, [1]], purchase_prob_array[:, [1]]
728
+
729
+ pcm_array[:, [1]] = ne.evaluate(
730
+ "_pr_0 * _pcm_0 * (1 - _pi_0) / (_pr_1 * (1 - _pi_1))"
736
731
  )
737
732
 
738
733
  mnl_test_array = (pcm_array[:, 1] >= 0) & (pcm_array[:, 1] <= 1)