mergeron 2025.739290.2__py3-none-any.whl → 2025.739290.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mergeron might be problematic. Click here for more details.

mergeron/gen/upp_tests.py CHANGED
@@ -5,14 +5,10 @@ from generated market data.
5
5
  """
6
6
 
7
7
  from collections.abc import Sequence
8
- from contextlib import suppress
9
- from pathlib import Path
10
- from typing import Any, Literal, TypedDict
8
+ from typing import TypedDict
11
9
 
12
10
  import numpy as np
13
- import tables as ptb # type: ignore
14
11
  from numpy.random import SeedSequence
15
- from numpy.typing import NDArray
16
12
 
17
13
  from .. import ( # noqa
18
14
  VERSION,
@@ -26,7 +22,6 @@ from .. import ( # noqa
26
22
  )
27
23
  from ..core import guidelines_boundaries as gbl # noqa: TID252
28
24
  from . import (
29
- DataclassInstance,
30
25
  INVResolution,
31
26
  MarketSampleData,
32
27
  UPPTestRegime,
@@ -37,8 +32,6 @@ from . import enforcement_stats as esl
37
32
 
38
33
  __version__ = VERSION
39
34
 
40
- type SaveData = Literal[False] | tuple[Literal[True], ptb.File, ptb.Group]
41
-
42
35
 
43
36
  class INVRESCntsArgs(TypedDict, total=False):
44
37
  "Keyword arguments of function, :code:`sim_enf_cnts`"
@@ -46,8 +39,6 @@ class INVRESCntsArgs(TypedDict, total=False):
46
39
  sample_size: int
47
40
  seed_seq_list: Sequence[SeedSequence] | None
48
41
  nthreads: int
49
- save_data_to_file: SaveData
50
- saved_array_name_suffix: str
51
42
 
52
43
 
53
44
  def compute_upp_test_counts(
@@ -80,116 +71,115 @@ def compute_upp_test_counts(
80
71
 
81
72
  """
82
73
 
83
- _enf_cnts_sim_array = -1 * np.ones((6, 2), np.int64)
84
- _upp_test_arrays = compute_upp_test_arrays(
74
+ upp_test_arrays = compute_upp_test_arrays(
85
75
  _market_data_sample, _upp_test_parms, _upp_test_regime
86
76
  )
87
77
 
88
- _fcounts, _hhi_delta, _hhi_post = (
78
+ fcounts, hhi_delta, hhi_post = (
89
79
  getattr(_market_data_sample, _g) for _g in ("fcounts", "hhi_delta", "hhi_post")
90
80
  )
91
81
 
92
- _stats_rowlen = 6
82
+ stats_rowlen = 6
93
83
  # Clearance/enforcement counts --- by firm count
94
- _firmcounts_list = np.unique(_fcounts)
95
- if _firmcounts_list is not None and np.all(_firmcounts_list >= 0):
96
- _max_firmcount = max(_firmcounts_list)
97
-
98
- _enf_cnts_sim_byfirmcount_array: ArrayBIGINT = -1 * np.ones(_stats_rowlen, int)
99
- for _firmcount in np.arange(2, _max_firmcount + 1):
100
- _firmcount_test = _fcounts == _firmcount
101
-
102
- _enf_cnts_sim_byfirmcount_array = np.vstack((
103
- _enf_cnts_sim_byfirmcount_array,
84
+ enf_cnts_sim_byfirmcount_array: ArrayBIGINT = np.zeros(stats_rowlen, int)
85
+ firmcounts_list = np.unique(fcounts)
86
+ if firmcounts_list.any():
87
+ for _fc in firmcounts_list:
88
+ fc_test = fcounts == _fc
89
+
90
+ enf_cnts_sim_byfirmcount_array = np.vstack((
91
+ enf_cnts_sim_byfirmcount_array,
104
92
  np.array([
105
- _firmcount,
106
- np.einsum("ij->", 1 * _firmcount_test),
93
+ _fc,
94
+ np.einsum("ij->", 1 * fc_test),
107
95
  *[
108
96
  np.einsum(
109
- "ij->",
110
- 1 * (_firmcount_test & getattr(_upp_test_arrays, _f)),
97
+ "ij->", 1 * (fc_test & getattr(upp_test_arrays, _a.name))
111
98
  )
112
- for _f in _upp_test_arrays.__dataclass_fields__
99
+ for _a in upp_test_arrays.__attrs_attrs__
113
100
  ],
114
101
  ]),
115
102
  ))
116
- _enf_cnts_sim_byfirmcount_array = _enf_cnts_sim_byfirmcount_array[1:]
103
+
104
+ enf_cnts_sim_byfirmcount_array = enf_cnts_sim_byfirmcount_array[1:]
117
105
  else:
118
- _enf_cnts_sim_byfirmcount_array = np.array(
119
- np.nan * np.empty((1, _stats_rowlen)), np.int64
120
- )
121
- _enf_cnts_sim_byfirmcount_array[0] = 2
106
+ enf_cnts_sim_byfirmcount_array = np.array([], int)
122
107
 
123
108
  # Clearance/enforcement counts --- by delta
124
- _hhi_delta_ranged = esl.hhi_delta_ranger(_hhi_delta)
125
- _enf_cnts_sim_bydelta_array: ArrayBIGINT = -1 * np.ones(_stats_rowlen, int)
126
- for _hhi_delta_lim in esl.HHI_DELTA_KNOTS[:-1]:
127
- _hhi_delta_test = _hhi_delta_ranged == _hhi_delta_lim
109
+ enf_cnts_sim_bydelta_array: ArrayBIGINT = np.zeros(stats_rowlen, int)
110
+ hhi_deltaranged = esl.hhi_delta_ranger(hhi_delta)
111
+ for hhi_deltalim in esl.HHI_DELTA_KNOTS[:-1]:
112
+ hhi_deltatest = hhi_deltaranged == hhi_deltalim
128
113
 
129
- _enf_cnts_sim_bydelta_array = np.vstack((
130
- _enf_cnts_sim_bydelta_array,
114
+ enf_cnts_sim_bydelta_array = np.vstack((
115
+ enf_cnts_sim_bydelta_array,
131
116
  np.array([
132
- _hhi_delta_lim,
133
- np.einsum("ij->", 1 * _hhi_delta_test),
117
+ hhi_deltalim,
118
+ np.einsum("ij->", 1 * hhi_deltatest),
134
119
  *[
135
120
  np.einsum(
136
- "ij->", 1 * (_hhi_delta_test & getattr(_upp_test_arrays, _f))
121
+ "ij->", 1 * (hhi_deltatest & getattr(upp_test_arrays, _a.name))
137
122
  )
138
- for _f in _upp_test_arrays.__dataclass_fields__
123
+ for _a in upp_test_arrays.__attrs_attrs__
139
124
  ],
140
125
  ]),
141
126
  ))
142
127
 
143
- _enf_cnts_sim_bydelta_array = _enf_cnts_sim_bydelta_array[1:]
128
+ enf_cnts_sim_bydelta_array = enf_cnts_sim_bydelta_array[1:]
144
129
 
145
130
  # Clearance/enforcement counts --- by zone
146
- try:
147
- _hhi_zone_post_ranged = esl.hhi_zone_post_ranger(_hhi_post)
148
- except ValueError as _err:
149
- print(_hhi_post)
150
- raise _err
151
-
152
- _stats_byconczone_sim = -1 * np.ones(_stats_rowlen + 1, np.int64)
153
- for _hhi_zone_post_knot in esl.HHI_POST_ZONE_KNOTS[:-1]:
154
- _level_test = _hhi_zone_post_ranged == _hhi_zone_post_knot
155
-
156
- for _hhi_zone_delta_knot in [0, 100, 200]:
157
- _delta_test = (
158
- _hhi_delta_ranged > 100
159
- if _hhi_zone_delta_knot == 200
160
- else _hhi_delta_ranged == _hhi_zone_delta_knot
161
- )
162
-
163
- _conc_test = _level_test & _delta_test
164
-
165
- _stats_byconczone_sim = np.vstack((
166
- _stats_byconczone_sim,
167
- np.array([
168
- _hhi_zone_post_knot,
169
- _hhi_zone_delta_knot,
170
- np.einsum("ij->", 1 * _conc_test),
171
- *[
172
- np.einsum(
173
- "ij->", 1 * (_conc_test & getattr(_upp_test_arrays, _f))
174
- )
175
- for _f in _upp_test_arrays.__dataclass_fields__
176
- ],
177
- ]),
178
- ))
179
-
180
- _enf_cnts_sim_byconczone_array = esl.enf_cnts_byconczone(_stats_byconczone_sim[1:])
181
- del _stats_byconczone_sim
182
- del _hhi_delta, _hhi_post, _fcounts
131
+ if np.isnan(hhi_post).all():
132
+ stats_byconczone_sim = np.array([], int)
133
+ else:
134
+ try:
135
+ hhi_zone_post_ranged = esl.hhi_zone_post_ranger(hhi_post)
136
+ except ValueError as _err:
137
+ print(hhi_post)
138
+ raise _err
139
+
140
+ stats_byconczone_sim = np.zeros(stats_rowlen + 1, int)
141
+ for hhi_zone_post_knot in esl.HHI_POST_ZONE_KNOTS[:-1]:
142
+ level_test = hhi_zone_post_ranged == hhi_zone_post_knot
143
+
144
+ for hhi_zone_delta_knot in [0, 100, 200]:
145
+ delta_test = (
146
+ hhi_deltaranged > 100
147
+ if hhi_zone_delta_knot == 200
148
+ else hhi_deltaranged == hhi_zone_delta_knot
149
+ )
150
+
151
+ conc_test = level_test & delta_test
152
+
153
+ stats_byconczone_sim = np.vstack((
154
+ stats_byconczone_sim,
155
+ np.array([
156
+ hhi_zone_post_knot,
157
+ hhi_zone_delta_knot,
158
+ np.einsum("ij->", 1 * conc_test),
159
+ *[
160
+ np.einsum(
161
+ "ij->",
162
+ 1 * (conc_test & getattr(upp_test_arrays, _a.name)),
163
+ )
164
+ for _a in upp_test_arrays.__attrs_attrs__
165
+ ],
166
+ ]),
167
+ ))
168
+
169
+ enf_cnts_sim_byconczone_array = esl.enf_cnts_byconczone(stats_byconczone_sim[1:])
170
+
171
+ del stats_byconczone_sim
172
+ del hhi_delta, hhi_post, fcounts
183
173
 
184
174
  return UPPTestsCounts(
185
- _enf_cnts_sim_byfirmcount_array,
186
- _enf_cnts_sim_bydelta_array,
187
- _enf_cnts_sim_byconczone_array,
175
+ enf_cnts_sim_byfirmcount_array,
176
+ enf_cnts_sim_bydelta_array,
177
+ enf_cnts_sim_byconczone_array,
188
178
  )
189
179
 
190
180
 
191
181
  def compute_upp_test_arrays(
192
- _market_data: MarketSampleData,
182
+ _market_data_sample: MarketSampleData,
193
183
  _upp_test_parms: gbl.HMGThresholds,
194
184
  _sim_test_regime: UPPTestRegime,
195
185
  /,
@@ -209,59 +199,61 @@ def compute_upp_test_arrays(
209
199
  configuration to use for generating UPP tests
210
200
 
211
201
  """
212
- _g_bar, _divr_bar, _cmcr_bar, _ipr_bar = (
202
+ g_bar_, divr_bar_, cmcr_bar_, ipr_bar_ = (
213
203
  getattr(_upp_test_parms, _f) for _f in ("guppi", "divr", "cmcr", "ipr")
214
204
  )
215
205
 
216
- _guppi_array, _ipr_array, _cmcr_array = (
217
- np.empty_like(_market_data.price_array) for _ in range(3)
206
+ guppi_array, ipr_array, cmcr_array = (
207
+ np.empty_like(_market_data_sample.price_array) for _ in range(3)
218
208
  )
219
209
 
220
210
  np.einsum(
221
211
  "ij,ij,ij->ij",
222
- _market_data.divr_array,
223
- _market_data.pcm_array[:, ::-1],
224
- _market_data.price_array[:, ::-1] / _market_data.price_array,
225
- out=_guppi_array,
212
+ _market_data_sample.divr_array,
213
+ _market_data_sample.pcm_array[:, ::-1],
214
+ _market_data_sample.price_array[:, ::-1] / _market_data_sample.price_array,
215
+ out=guppi_array,
226
216
  )
227
217
 
228
218
  np.divide(
229
- np.einsum("ij,ij->ij", _market_data.pcm_array, _market_data.divr_array),
230
- 1 - _market_data.divr_array,
231
- out=_ipr_array,
219
+ np.einsum(
220
+ "ij,ij->ij", _market_data_sample.pcm_array, _market_data_sample.divr_array
221
+ ),
222
+ 1 - _market_data_sample.divr_array,
223
+ out=ipr_array,
232
224
  )
233
225
 
234
- np.divide(_ipr_array, 1 - _market_data.pcm_array, out=_cmcr_array)
226
+ np.divide(ipr_array, 1 - _market_data_sample.pcm_array, out=cmcr_array)
235
227
 
236
- (_divr_test_vector,) = _compute_test_array_seq(
237
- (_market_data.divr_array,),
238
- _market_data.frmshr_array,
228
+ (divr_test_vector,) = _compute_test_array_seq(
229
+ (_market_data_sample.divr_array,),
230
+ _market_data_sample.frmshr_array,
239
231
  _sim_test_regime.divr_aggregator,
240
232
  )
241
233
 
242
- (_guppi_test_vector, _cmcr_test_vector, _ipr_test_vector) = _compute_test_array_seq(
243
- (_guppi_array, _cmcr_array, _ipr_array),
244
- _market_data.frmshr_array,
234
+ (guppi_test_vector, cmcr_test_vector, ipr_test_vector) = _compute_test_array_seq(
235
+ (guppi_array, cmcr_array, ipr_array),
236
+ _market_data_sample.frmshr_array,
245
237
  _sim_test_regime.guppi_aggregator,
246
238
  )
247
- del _cmcr_array, _ipr_array, _guppi_array
239
+ del cmcr_array, ipr_array, guppi_array
248
240
 
249
241
  if _sim_test_regime.resolution == INVResolution.ENFT:
250
- _upp_test_arrays = UPPTestsRaw(
251
- _guppi_test_vector >= _g_bar,
252
- (_guppi_test_vector >= _g_bar) | (_divr_test_vector >= _divr_bar),
253
- _cmcr_test_vector >= _cmcr_bar,
254
- _ipr_test_vector >= _ipr_bar,
242
+ upp_test_arrays = UPPTestsRaw(
243
+ guppi_test_vector >= g_bar_,
244
+ (guppi_test_vector >= g_bar_) | (divr_test_vector >= divr_bar_),
245
+ cmcr_test_vector >= cmcr_bar_,
246
+ ipr_test_vector >= ipr_bar_,
255
247
  )
256
248
  else:
257
- _upp_test_arrays = UPPTestsRaw(
258
- _guppi_test_vector < _g_bar,
259
- (_guppi_test_vector < _g_bar) & (_divr_test_vector < _divr_bar),
260
- _cmcr_test_vector < _cmcr_bar,
261
- _ipr_test_vector < _ipr_bar,
249
+ upp_test_arrays = UPPTestsRaw(
250
+ guppi_test_vector < g_bar_,
251
+ (guppi_test_vector < g_bar_) & (divr_test_vector < divr_bar_),
252
+ cmcr_test_vector < cmcr_bar_,
253
+ ipr_test_vector < ipr_bar_,
262
254
  )
263
255
 
264
- return _upp_test_arrays
256
+ return upp_test_arrays
265
257
 
266
258
 
267
259
  def _compute_test_array_seq(
@@ -272,119 +264,52 @@ def _compute_test_array_seq(
272
264
  _wt_array = (
273
265
  _wt_array / np.einsum("ij->i", _wt_array)[:, None]
274
266
  if _aggregator
275
- in (
267
+ in {
276
268
  UPPAggrSelector.CPA,
277
269
  UPPAggrSelector.CPD,
278
270
  UPPAggrSelector.OSA,
279
271
  UPPAggrSelector.OSD,
280
- )
272
+ }
281
273
  else np.array([0.5, 0.5], float)
282
274
  )
283
275
 
284
276
  match _aggregator:
285
277
  case UPPAggrSelector.AVG:
286
- _test_array_seq = (
278
+ test_array_seq = (
287
279
  1 / 2 * np.einsum("ij->i", _g)[:, None] for _g in _test_measure_seq
288
280
  )
289
281
  case UPPAggrSelector.CPA:
290
- _test_array_seq = (
282
+ test_array_seq = (
291
283
  np.einsum("ij,ij->i", _wt_array[:, ::-1], _g)[:, None]
292
284
  for _g in _test_measure_seq
293
285
  )
294
286
  case UPPAggrSelector.CPD:
295
- _test_array_seq = (
287
+ test_array_seq = (
296
288
  np.sqrt(np.einsum("ij,ij,ij->i", _wt_array[:, ::-1], _g, _g))[:, None]
297
289
  for _g in _test_measure_seq
298
290
  )
299
291
  case UPPAggrSelector.DIS:
300
- _test_array_seq = (
292
+ test_array_seq = (
301
293
  np.sqrt(1 / 2 * np.einsum("ij,ij->i", _g, _g))[:, None]
302
294
  for _g in _test_measure_seq
303
295
  )
304
296
  case UPPAggrSelector.MAX:
305
- _test_array_seq = (
306
- _g.max(axis=1, keepdims=True) for _g in _test_measure_seq
307
- )
297
+ test_array_seq = (_g.max(axis=1, keepdims=True) for _g in _test_measure_seq)
308
298
  case UPPAggrSelector.MIN:
309
- _test_array_seq = (
310
- _g.min(axis=1, keepdims=True) for _g in _test_measure_seq
311
- )
299
+ test_array_seq = (_g.min(axis=1, keepdims=True) for _g in _test_measure_seq)
312
300
  case UPPAggrSelector.OSA:
313
- _test_array_seq = (
301
+ test_array_seq = (
314
302
  np.einsum("ij,ij->i", _wt_array, _g)[:, None]
315
303
  for _g in _test_measure_seq
316
304
  )
317
305
  case UPPAggrSelector.OSD:
318
- _test_array_seq = (
306
+ test_array_seq = (
319
307
  np.sqrt(np.einsum("ij,ij,ij->i", _wt_array, _g, _g))[:, None]
320
308
  for _g in _test_measure_seq
321
309
  )
322
310
  case _:
323
311
  raise ValueError("GUPPI/diversion ratio aggregation method is invalid.")
324
- return tuple(_test_array_seq)
325
-
326
-
327
- def initialize_hd5(
328
- _h5_path: Path, _hmg_pub_year: HMGPubYear, _test_regime: UPPTestRegime, /
329
- ) -> tuple[SaveData, str]:
330
- _h5_title = f"HMG version: {_hmg_pub_year}; Test regime: {_test_regime}"
331
- if _h5_path.is_file():
332
- _h5_path.unlink()
333
- _h5_file = ptb.open_file(_h5_path, mode="w", title=_h5_title)
334
- _save_data_to_file: SaveData = (True, _h5_file, _h5_file.root)
335
- _next_subgroup_name_root = "enf_{}_{}_{}_{}".format(
336
- _hmg_pub_year,
337
- *(getattr(_test_regime, _f.name).name for _f in _test_regime.__attrs_attrs__),
338
- )
339
- return _save_data_to_file, _next_subgroup_name_root
340
-
341
-
342
- def save_data_to_hdf5(
343
- _dclass: DataclassInstance,
344
- /,
345
- *,
346
- saved_array_name_suffix: str | None = "",
347
- excluded_attrs: Sequence[str] | None = (),
348
- save_data_to_file: SaveData = False,
349
- ) -> None:
350
- if save_data_to_file:
351
- _, _h5_file, _h5_group = save_data_to_file
352
- # Save market data arrays
353
- excluded_attrs = excluded_attrs or ()
354
- for _array_name in _dclass.__dataclass_fields__:
355
- if _array_name in excluded_attrs:
356
- continue
357
- save_array_to_hdf5(
358
- getattr(_dclass, _array_name),
359
- _array_name,
360
- _h5_group,
361
- _h5_file,
362
- saved_array_name_suffix=saved_array_name_suffix,
363
- )
364
-
365
-
366
- def save_array_to_hdf5(
367
- _array_obj: NDArray[Any],
368
- _array_name: str,
369
- _h5_group: ptb.Group,
370
- _h5_file: ptb.File,
371
- /,
372
- *,
373
- saved_array_name_suffix: str | None = None,
374
- ) -> None:
375
- _h5_array_name = f"{_array_name}_{saved_array_name_suffix or ''}".rstrip("_")
376
-
377
- with suppress(ptb.NoSuchNodeError):
378
- _h5_file.remove_node(_h5_group, name=_array_name)
379
-
380
- _h5_array = ptb.CArray(
381
- _h5_group,
382
- _h5_array_name,
383
- atom=ptb.Atom.from_dtype(_array_obj.dtype),
384
- shape=_array_obj.shape,
385
- filters=ptb.Filters(complevel=3, complib="blosc:lz4hc", fletcher32=True),
386
- )
387
- _h5_array[:] = _array_obj
312
+ return tuple(test_array_seq)
388
313
 
389
314
 
390
315
  if __name__ == "__main__":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: mergeron
3
- Version: 2025.739290.2
3
+ Version: 2025.739290.4
4
4
  Summary: Analyze merger enforcement policy using Python
5
5
  License: MIT
6
6
  Keywords: merger policy analysis,merger guidelines,merger screening,policy presumptions,concentration standards,upward pricing pressure,GUPPI
@@ -23,7 +23,7 @@ Requires-Dist: aenum (>=3.1.15,<4.0.0)
23
23
  Requires-Dist: attrs (>=23.2)
24
24
  Requires-Dist: bs4 (>=0.0.1)
25
25
  Requires-Dist: certifi (>=2023.11.17)
26
- Requires-Dist: google-re2 (>=1.1)
26
+ Requires-Dist: h5py (>=3.13.0,<4.0.0)
27
27
  Requires-Dist: jinja2 (>=3.1)
28
28
  Requires-Dist: joblib (>=1.3)
29
29
  Requires-Dist: matplotlib (>=3.8)
@@ -33,7 +33,6 @@ Requires-Dist: msgpack-numpy (>=0.4)
33
33
  Requires-Dist: ruamel-yaml (>=0.18.10,<0.19.0)
34
34
  Requires-Dist: scipy (>=1.12)
35
35
  Requires-Dist: sympy (>=1.12)
36
- Requires-Dist: tables (>=3.10.1)
37
36
  Requires-Dist: types-beautifulsoup4 (>=4.11.2)
38
37
  Requires-Dist: urllib3 (>=2.2.2,<3.0.0)
39
38
  Requires-Dist: xlrd (>=2.0.1,<3.0.0)
@@ -0,0 +1,24 @@
1
+ mergeron/__init__.py,sha256=AEuHY5uWPKWw8LDwGoe1vKTgwi0CAYKUsvLqzyYfY94,4269
2
+ mergeron/core/__init__.py,sha256=pn8s3Tg1IM-ZSIRjKt4suJyPkHtt4fT7GSySzXBClWE,3082
3
+ mergeron/core/empirical_margin_distribution.py,sha256=DFblaHUA6iXbU3L022fOSw39h9FVqJq8BOnxo4SCBTw,9474
4
+ mergeron/core/ftc_merger_investigations_data.py,sha256=y-CmeXKC8IY5f9iTJ0RIdFAsZFpvN2KM8h2uNiLx2Mk,28485
5
+ mergeron/core/guidelines_boundaries.py,sha256=4VikatoJAqJnUkw6Y01ow2ym-BmTZZ7r_i01PoU5ULE,15730
6
+ mergeron/core/guidelines_boundary_functions.py,sha256=hKYvmAY4ev1N_eOP533VrsMwX0PKvVHceS2O40oONy0,28877
7
+ mergeron/core/guidelines_boundary_functions_extra.py,sha256=tzYODCEh5vcR_pVwIHEsncf1v3aViehe5ZEmZkjmfwQ,16154
8
+ mergeron/core/pseudorandom_numbers.py,sha256=6CX395445p-LP6MEBQyIajgqr44mt6oi0QPQyIVTgQw,10021
9
+ mergeron/data/__init__.py,sha256=KtjBlZOl7jwBCAUhrTJB9PdrN39YLYytNiSUSM_gRmA,62
10
+ mergeron/data/damodaran_margin_data.xls,sha256=Qggl1p5nkOMJI8YUXhkwXQRz-OhRSqBTzz57N0JQyYA,79360
11
+ mergeron/data/damodaran_margin_data_serialized.zip,sha256=_oCVHI2YpG762WN5-KM5vnAS4OoAGkhP7Vd8KSWuiG8,20384
12
+ mergeron/data/ftc_invdata.msgpack,sha256=WBFHgi7Ld4R-h2zL2Zc3TOIlKqVrbVFMH1LoI4-T-M0,264664
13
+ mergeron/data/ftc_invdata.zip,sha256=1J74iTM1RVHTcvqv2v7H3yqeYN3vEvp4ZUszDk1oHQc,15686
14
+ mergeron/demo/__init__.py,sha256=KtjBlZOl7jwBCAUhrTJB9PdrN39YLYytNiSUSM_gRmA,62
15
+ mergeron/demo/visualize_empirical_margin_distribution.py,sha256=U40SYsSSghJrDgLHu91A2uRd1dqv5yDkkFM8NFYMGHM,2388
16
+ mergeron/gen/__init__.py,sha256=J5E3Oq8PSVSL7MkEKGfdRpk15BW2hBNm3go2_BGxF6Q,21789
17
+ mergeron/gen/data_generation.py,sha256=dPuUUGKfxYpATbCFvqVJUTbwEY7orYuznWpxrb9GQe8,17644
18
+ mergeron/gen/data_generation_functions.py,sha256=UDh3B4FPwh4SxTdJs7-faLouf7cWUUjHarRkfJc9gjI,26408
19
+ mergeron/gen/enforcement_stats.py,sha256=469UT7YTAo-My1lvtfMQRgbB2py8Z30hKP0HYKvyrm0,10781
20
+ mergeron/gen/upp_tests.py,sha256=KVjGHHahdpyo5wEy0QSbSV0eSSiHHY4EOuNC_FMnsdY,9979
21
+ mergeron/py.typed,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
22
+ mergeron-2025.739290.4.dist-info/METADATA,sha256=qiFizgv4_0bMnmP-29HwF6m4oU6Ceq82LqZoPiBGd_E,14512
23
+ mergeron-2025.739290.4.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
24
+ mergeron-2025.739290.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.0.1
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,23 +0,0 @@
1
- mergeron/__init__.py,sha256=UPdsvB_F8JjzmyClewmgX9s_vQy8CChgDoGxq9EVGsM,3515
2
- mergeron/core/__init__.py,sha256=qYRA1D4Gx-WSkICqiueWLWPedkWmb_CIn7fgvN7unk4,182
3
- mergeron/core/empirical_margin_distribution.py,sha256=byO11ROGVQcmbZVWdVJkJ906WnHhx7jcoL5BbxPmX7I,8688
4
- mergeron/core/ftc_merger_investigations_data.py,sha256=13yEg7L3wpR2XU4v7WAfSH-lIDk0eRv31i0LY39Fk4w,29452
5
- mergeron/core/guidelines_boundaries.py,sha256=1d-ToNnX-lQpcj4ucfxwwa9Vu2dcQTx8DPJKEZ7SmEM,17723
6
- mergeron/core/guidelines_boundary_functions.py,sha256=4vMz5DQ72fgj7leUlVJvXBrxdZAeDJP7FoGBEVXDdeQ,34866
7
- mergeron/core/guidelines_boundary_functions_extra.py,sha256=qL1zT-Xgj2qD2EyBRR87APoVc47MkGM02xYQhNgwEvg,11216
8
- mergeron/core/pseudorandom_numbers.py,sha256=8_MBHIvWjVZUN4Et7Tt3J1NzWxuzq9vtzFkycW5rBaQ,9918
9
- mergeron/data/__init__.py,sha256=KtjBlZOl7jwBCAUhrTJB9PdrN39YLYytNiSUSM_gRmA,62
10
- mergeron/data/damodaran_margin_data.xls,sha256=Qggl1p5nkOMJI8YUXhkwXQRz-OhRSqBTzz57N0JQyYA,79360
11
- mergeron/data/damodaran_margin_data_dict.msgpack,sha256=sr6s4L69kposEpzGI7jpPb4ULz0UpY-bEYfeNi6UlRA,57621
12
- mergeron/data/ftc_invdata.msgpack,sha256=WBFHgi7Ld4R-h2zL2Zc3TOIlKqVrbVFMH1LoI4-T-M0,264664
13
- mergeron/demo/__init__.py,sha256=KtjBlZOl7jwBCAUhrTJB9PdrN39YLYytNiSUSM_gRmA,62
14
- mergeron/demo/visualize_empirical_margin_distribution.py,sha256=kDwPfhsBjsQdYPyhe4KZG2guB_xHhPaCrvn8fezIz4M,2354
15
- mergeron/gen/__init__.py,sha256=lzUBHgkkVTF5rRTFEBPOaqiKCHx7aFVe27hkWJcs0x4,22351
16
- mergeron/gen/data_generation.py,sha256=IBWo-Al2ApHnjVQyIUV35x0OnoMgliJfqy5YrbBCVgo,16101
17
- mergeron/gen/data_generation_functions.py,sha256=RqPTjCAyYJkOvbLdbgnBJUFJ7ecE1UvPB1rvBhfimy0,26988
18
- mergeron/gen/enforcement_stats.py,sha256=z_Kjw1OwLue5B-tKFlmEoyGDusi9D6c77foRCmiNGqQ,11532
19
- mergeron/gen/upp_tests.py,sha256=VTh0rDd0cJ70MwkvRDYx27LWMaA1BPThXfd3D1HKX-E,12489
20
- mergeron/py.typed,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
21
- mergeron-2025.739290.2.dist-info/METADATA,sha256=qEqlP5po3UdqHSYtsEK2R8dD3y7rBsl1pM7Wqg9-0e0,14541
22
- mergeron-2025.739290.2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
23
- mergeron-2025.739290.2.dist-info/RECORD,,