PostBOUND 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. postbound/__init__.py +211 -0
  2. postbound/_base.py +6 -0
  3. postbound/_bench.py +1012 -0
  4. postbound/_core.py +1153 -0
  5. postbound/_hints.py +1373 -0
  6. postbound/_jointree.py +1079 -0
  7. postbound/_pipelines.py +1121 -0
  8. postbound/_qep.py +1986 -0
  9. postbound/_stages.py +876 -0
  10. postbound/_validation.py +734 -0
  11. postbound/db/__init__.py +72 -0
  12. postbound/db/_db.py +2348 -0
  13. postbound/db/_duckdb.py +785 -0
  14. postbound/db/mysql.py +1195 -0
  15. postbound/db/postgres.py +4216 -0
  16. postbound/experiments/__init__.py +12 -0
  17. postbound/experiments/analysis.py +674 -0
  18. postbound/experiments/benchmarking.py +54 -0
  19. postbound/experiments/ceb.py +877 -0
  20. postbound/experiments/interactive.py +105 -0
  21. postbound/experiments/querygen.py +334 -0
  22. postbound/experiments/workloads.py +980 -0
  23. postbound/optimizer/__init__.py +92 -0
  24. postbound/optimizer/__init__.pyi +73 -0
  25. postbound/optimizer/_cardinalities.py +369 -0
  26. postbound/optimizer/_joingraph.py +1150 -0
  27. postbound/optimizer/dynprog.py +1825 -0
  28. postbound/optimizer/enumeration.py +432 -0
  29. postbound/optimizer/native.py +539 -0
  30. postbound/optimizer/noopt.py +54 -0
  31. postbound/optimizer/presets.py +147 -0
  32. postbound/optimizer/randomized.py +650 -0
  33. postbound/optimizer/tonic.py +1479 -0
  34. postbound/optimizer/ues.py +1607 -0
  35. postbound/qal/__init__.py +343 -0
  36. postbound/qal/_qal.py +9678 -0
  37. postbound/qal/formatter.py +1089 -0
  38. postbound/qal/parser.py +2344 -0
  39. postbound/qal/relalg.py +4257 -0
  40. postbound/qal/transform.py +2184 -0
  41. postbound/shortcuts.py +70 -0
  42. postbound/util/__init__.py +46 -0
  43. postbound/util/_errors.py +33 -0
  44. postbound/util/collections.py +490 -0
  45. postbound/util/dataframe.py +71 -0
  46. postbound/util/dicts.py +330 -0
  47. postbound/util/jsonize.py +68 -0
  48. postbound/util/logging.py +106 -0
  49. postbound/util/misc.py +168 -0
  50. postbound/util/networkx.py +401 -0
  51. postbound/util/numbers.py +438 -0
  52. postbound/util/proc.py +107 -0
  53. postbound/util/stats.py +37 -0
  54. postbound/util/system.py +48 -0
  55. postbound/util/typing.py +35 -0
  56. postbound/vis/__init__.py +5 -0
  57. postbound/vis/fdl.py +69 -0
  58. postbound/vis/graphs.py +48 -0
  59. postbound/vis/optimizer.py +538 -0
  60. postbound/vis/plots.py +84 -0
  61. postbound/vis/tonic.py +70 -0
  62. postbound/vis/trees.py +105 -0
  63. postbound-0.19.0.dist-info/METADATA +355 -0
  64. postbound-0.19.0.dist-info/RECORD +67 -0
  65. postbound-0.19.0.dist-info/WHEEL +5 -0
  66. postbound-0.19.0.dist-info/licenses/LICENSE.txt +202 -0
  67. postbound-0.19.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,92 @@
1
+ """The optimizer package defines the central interfaces to implement optimization algorithms.
2
+
3
+ TODO: detailed documentation
4
+ """
5
+
6
+ #
7
+ # Important note for maintainers:
8
+ # since we now use lazy loading for the optimization algorithms, we need some additional scaffolding.
9
+ # Specifically, we need an additional __init__.pyi file which contains all imports that we normally do in this package
10
+ # (lazy and otherwise). This file is only used by type checkers to resolve the lazy modules correctly.
11
+ # All changes to the imports below must also be reflected in the __init__.pyi file
12
+ # See https://scientific-python.org/specs/spec-0001/#usage and https://scientific-python.org/specs/spec-0001/#type-checkers
13
+ # for details.
14
+ #
15
+
16
+ import lazy_loader
17
+
18
+ from .. import _validation as validation
19
+ from .._hints import (
20
+ DirectionalJoinOperatorAssignment,
21
+ HintType,
22
+ JoinOperatorAssignment,
23
+ ScanOperatorAssignment,
24
+ operators_from_plan,
25
+ read_operator_assignment_json,
26
+ read_operator_json,
27
+ read_plan_params_json,
28
+ update_plan,
29
+ )
30
+ from .._jointree import (
31
+ JoinTree,
32
+ explode_query_plan,
33
+ jointree_from_plan,
34
+ parameters_from_plan,
35
+ read_jointree_json,
36
+ read_query_plan_json,
37
+ to_query_plan,
38
+ )
39
+ from ._cardinalities import (
40
+ CardinalityDistortion,
41
+ PreciseCardinalityHintGenerator,
42
+ PreComputedCardinalities,
43
+ )
44
+ from ._joingraph import (
45
+ IndexInfo,
46
+ JoinGraph,
47
+ JoinPath,
48
+ TableInfo,
49
+ )
50
+
51
+ # lazy import setup
52
+ submodules = [
53
+ "dynprog",
54
+ "enumeration",
55
+ "native",
56
+ "noopt",
57
+ "presets",
58
+ "randomized",
59
+ "tonic",
60
+ "ues",
61
+ ]
62
+
63
+ __getattr__, __dir__, _ = lazy_loader.attach(__name__, submodules)
64
+
65
+ __all__ = [
66
+ "validation",
67
+ "CardinalityDistortion",
68
+ "PreciseCardinalityHintGenerator",
69
+ "PreComputedCardinalities",
70
+ "ScanOperatorAssignment",
71
+ "JoinOperatorAssignment",
72
+ "DirectionalJoinOperatorAssignment",
73
+ "read_operator_json",
74
+ "operators_from_plan",
75
+ "parameters_from_plan",
76
+ "read_operator_assignment_json",
77
+ "read_plan_params_json",
78
+ "update_plan",
79
+ "HintType",
80
+ "JoinTree",
81
+ "jointree_from_plan",
82
+ "read_jointree_json",
83
+ "to_query_plan",
84
+ "read_query_plan_json",
85
+ "explode_query_plan",
86
+ "JoinGraph",
87
+ "JoinPath",
88
+ "IndexInfo",
89
+ "TableInfo",
90
+ ]
91
+
92
+ __all__ += submodules
@@ -0,0 +1,73 @@
1
+ # Type stubs for postbound.optimizer package
2
+ # See comment in __init__.py for details.
3
+
4
+ from .. import _validation as validation
5
+ from .._hints import (
6
+ DirectionalJoinOperatorAssignment,
7
+ HintType,
8
+ JoinOperatorAssignment,
9
+ ScanOperatorAssignment,
10
+ operators_from_plan,
11
+ read_operator_assignment_json,
12
+ read_operator_json,
13
+ read_plan_params_json,
14
+ update_plan,
15
+ )
16
+ from .._jointree import (
17
+ JoinTree,
18
+ explode_query_plan,
19
+ jointree_from_plan,
20
+ parameters_from_plan,
21
+ read_jointree_json,
22
+ read_query_plan_json,
23
+ to_query_plan,
24
+ )
25
+
26
+ # Lazy-loaded modules
27
+ from . import dynprog, enumeration, native, noopt, presets, randomized, tonic, ues
28
+ from ._cardinalities import (
29
+ CardinalityDistortion,
30
+ PreciseCardinalityHintGenerator,
31
+ PreComputedCardinalities,
32
+ )
33
+ from ._joingraph import (
34
+ IndexInfo,
35
+ JoinGraph,
36
+ JoinPath,
37
+ TableInfo,
38
+ )
39
+
40
+ __all__ = [
41
+ "validation",
42
+ "CardinalityDistortion",
43
+ "PreciseCardinalityHintGenerator",
44
+ "PreComputedCardinalities",
45
+ "ScanOperatorAssignment",
46
+ "JoinOperatorAssignment",
47
+ "DirectionalJoinOperatorAssignment",
48
+ "read_operator_json",
49
+ "operators_from_plan",
50
+ "parameters_from_plan",
51
+ "read_operator_assignment_json",
52
+ "read_plan_params_json",
53
+ "update_plan",
54
+ "HintType",
55
+ "JoinTree",
56
+ "jointree_from_plan",
57
+ "read_jointree_json",
58
+ "to_query_plan",
59
+ "read_query_plan_json",
60
+ "explode_query_plan",
61
+ "JoinGraph",
62
+ "JoinPath",
63
+ "IndexInfo",
64
+ "TableInfo",
65
+ "dynprog",
66
+ "enumeration",
67
+ "native",
68
+ "noopt",
69
+ "presets",
70
+ "randomized",
71
+ "tonic",
72
+ "ues",
73
+ ]
@@ -0,0 +1,369 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import random
5
+ from collections.abc import Iterable
6
+ from typing import Literal, Optional
7
+
8
+ import pandas as pd
9
+
10
+ from .. import util
11
+ from .._core import Cardinality, TableReference
12
+ from .._stages import (
13
+ CardinalityEstimator,
14
+ )
15
+ from ..db._db import Database, DatabasePool
16
+ from ..experiments.workloads import Workload
17
+ from ..qal import parser, transform
18
+ from ..qal._qal import SqlQuery
19
+
20
+
21
+ class PreciseCardinalityHintGenerator(CardinalityEstimator):
22
+ """Cardinality "estimator" that calculates exact cardinalities.
23
+
24
+ These cardinalities are determined by actually executing the intermediate query plan and counting the number of result
25
+ tuples. To speed up this potentially very costly computation, the estimator can store already calculated cardinalities in
26
+ an intermediate cache. Notice that this cache is different from the query cache provided by the `Database` interface. The
27
+ reason for this distinction is simple: the query result cache assumes static databases. If it connects to the same logical
28
+ database at two different points in time (potentially after a data shift), the cached results will be out-of-date. On the
29
+ other hand, the cardinality cache is transient and local to each estimator. Therefore, it will always calculate the current
30
+ results, even when a data shift is simulated. Even when the same estimator is used while simulating a data shift, the cache
31
+ can be reset manually without impacting caching of all other queries.
32
+
33
+ Parameters
34
+ ----------
35
+ database : Optional[Database], optional
36
+ The database for which the estimates should be calculated. If omitted, the database system is inferred from the
37
+ database pool.
38
+ enable_cache : bool, optional
39
+ Whether cardinalities of intermediates should be cached *for the lifetime of the estimator object*. Defaults to
40
+ *False*.
41
+ allow_cross_products : bool, optional
42
+ Whether cardinality estimates for arbitrary cross products should be included.
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ database: Optional[Database] = None,
48
+ *,
49
+ enable_cache: bool = False,
50
+ allow_cross_products: bool = False,
51
+ ) -> None:
52
+ super().__init__(allow_cross_products=allow_cross_products)
53
+ self.database = (
54
+ database
55
+ if database is not None
56
+ else DatabasePool.get_instance().current_database()
57
+ )
58
+ self.cache_enabled = enable_cache
59
+ self._cardinality_cache: dict[SqlQuery, int] = {}
60
+
61
+ def describe(self) -> dict:
62
+ return {"name": "true-cards", "database": self.database.describe()}
63
+
64
+ def calculate_estimate(
65
+ self, query: SqlQuery, tables: TableReference | Iterable[TableReference]
66
+ ) -> Cardinality:
67
+ tables = util.enlist(tables)
68
+ partial_query = transform.as_count_star_query(
69
+ transform.extract_query_fragment(query, tables)
70
+ )
71
+ if partial_query in self._cardinality_cache:
72
+ return self._cardinality_cache[partial_query]
73
+ cardinality = Cardinality(self.database.execute_query(partial_query))
74
+ self._cardinality_cache[partial_query] = cardinality
75
+ return cardinality
76
+
77
+ def reset_cache(self) -> None:
78
+ self._cardinality_cache.clear()
79
+
80
+
81
+ def _parse_tables(tabs: str) -> set[TableReference]:
82
+ """Utility to load tables from their JSON representation.
83
+
84
+ Parameters
85
+ ----------
86
+ tabs : str
87
+ The raw JSON data
88
+
89
+ Returns
90
+ -------
91
+ set[TableReference]
92
+ The corresponding tables
93
+ """
94
+ return {parser.load_table_json(t) for t in json.loads(tabs)}
95
+
96
+
97
+ class PreComputedCardinalities(CardinalityEstimator):
98
+ """Re-uses existing cardinalities from an external data source.
99
+
100
+ The cardinalities have to be stored in a CSV file which follows a certain structure. Some details can be customized (e.g.
101
+ column names). Most importantly, queries have to be identified via their labels. See parameters for details.
102
+
103
+ Parameters
104
+ ----------
105
+ workload : workloads.Workload
106
+ The workload which was used to calculate the cardinalities. This is required to determine the query label based on an
107
+ input query. Each hint generator can only support a specific workload.
108
+ lookup_table_path : str
109
+ The file path to the CSV file containing the cardinalities.
110
+ include_cross_products : bool, optional
111
+ Whether cardinality estimates for arbitrary cross products are contained in the CSV file and hence can be used during
112
+ estimation. By default this is disabled.
113
+ default_cardinality : Optional[Cardinality], optional
114
+ In case no cardinality estimate exists for a specific intermediate, a default cardinality can be used instead. In case
115
+ no default value has been specified, an error would be raised. Notice that a ``None`` value unsets the default. If the
116
+ client should handle this situation instead, another value (e.g. ``Cardinality.unknown()`` has to be used).
117
+ label_col : str, optional
118
+ The column in the CSV file that contains the query labels. Defaults to *label*.
119
+ tables_col : str, optional
120
+ The column in the CSV file that contains the (JSON serialized) tables that form the current intermediate result of the
121
+ current query. Defaults to *tables*.
122
+ cardinality_col : str, optional
123
+ The column in the CSV file that contains the actual cardinalities. Defaults to *cardinality*.
124
+ live_fallback : bool, optional
125
+ Whether to fall back to a live database in case no cardinality estimate is found in the CSV file. This is off by
126
+ default.
127
+ error_on_missing_card : bool, optional
128
+ If live fallback is disabled and we did not find a cardinality estimate for a specific intermediate, we will raise an
129
+ error by default. If this is not desired and missing values can be handled by the client, this behavior can be disabled
130
+ with this parameter.
131
+ live_fallback_style : Literal["actual", "estimated"], optional
132
+ In case the fallback is enabled, this customizes the calculation strategy. "actual" will calculate the true cardinality
133
+ of the intermediate in question, whereas "estimated" (the default) will use the native optimizer to estimate the
134
+ cardinality.
135
+ live_db : Optional[Database], optional
136
+ The database system that should be used in case of a live fallback. If omitted, the database system is inferred from
137
+ the database pool.
138
+ save_live_fallback_results : bool, optional
139
+ Whether the cardinalities computed by the live fallback should be stored in the original file containing the lookup
140
+ table. This is only used if live fallback is active and enabled by default.
141
+ """
142
+
143
+ def __init__(
144
+ self,
145
+ workload: Workload,
146
+ lookup_table_path: str,
147
+ *,
148
+ include_cross_products: bool = False,
149
+ default_cardinality: Optional[Cardinality] = None,
150
+ label_col: str = "label",
151
+ tables_col: str = "tables",
152
+ cardinality_col: str = "cardinality",
153
+ live_fallback: bool = False,
154
+ error_on_missing_card: bool = True,
155
+ live_db: Optional[Database] = None,
156
+ live_fallback_style: Literal["actual", "estimated"] = "estimated",
157
+ save_live_fallback_results: bool = True,
158
+ ) -> None:
159
+ super().__init__(allow_cross_products=include_cross_products)
160
+ self._workload = workload
161
+ self._label_col = label_col
162
+ self._tables_col = tables_col
163
+ self._card_col = cardinality_col
164
+ self._default_card = default_cardinality
165
+ self._lookup_df_path = lookup_table_path
166
+
167
+ self._error_on_missing_card = error_on_missing_card
168
+ self._live_db: Optional[Database] = None
169
+ if live_fallback:
170
+ self._live_db = (
171
+ DatabasePool.get_instance().current_database()
172
+ if live_db is None
173
+ else live_db
174
+ )
175
+ else:
176
+ self._live_db = None
177
+ self._live_fallback_style = live_fallback_style
178
+ self._save_life_fallback = save_live_fallback_results
179
+
180
+ self._true_card_df = pd.read_csv(
181
+ lookup_table_path, converters={tables_col: _parse_tables}
182
+ )
183
+
184
+ def calculate_estimate(
185
+ self, query: SqlQuery, tables: TableReference | Iterable[TableReference]
186
+ ) -> Cardinality:
187
+ tables = util.enlist(tables)
188
+ label = self._workload.label_of(query)
189
+ relevant_samples = self._true_card_df[
190
+ self._true_card_df[self._label_col] == label
191
+ ]
192
+ cardinality_sample = relevant_samples[
193
+ relevant_samples[self._tables_col] == tables
194
+ ]
195
+
196
+ tables_debug = "(" + ", ".join(tab.identifier() for tab in tables) + ")"
197
+ n_samples = len(cardinality_sample)
198
+ if n_samples == 1:
199
+ cardinality = Cardinality(cardinality_sample.iloc[0][self._card_col])
200
+ return cardinality
201
+ elif n_samples > 1:
202
+ raise ValueError(
203
+ f"{n_samples} samples found for join {tables_debug} in query {label}. Expected 1."
204
+ )
205
+
206
+ fallback_value = self._attempt_fallback_estimate(n_samples, query, tables)
207
+ if fallback_value is None and self._error_on_missing_card:
208
+ raise ValueError(
209
+ f"No matching sample found for join {tables_debug} in query {label}"
210
+ )
211
+ return fallback_value
212
+
213
+ def describe(self) -> dict:
214
+ return {
215
+ "name": "pre-computed-cards",
216
+ "location": self._lookup_df_path,
217
+ "workload": self._workload.name,
218
+ }
219
+
220
+ def _attempt_fallback_estimate(
221
+ self, n_samples: int, query: SqlQuery, tables: frozenset[TableReference]
222
+ ) -> Cardinality:
223
+ """Tries to infer the fallback value for a specific estimate, if this is necessary.
224
+
225
+ The inference strategy applies the following rules:
226
+
227
+ 1. If exactly one sample was found, no fallback is necessary.
228
+ 2. If no sample was found, but we specified a static fallback value, this value is used.
229
+ 3. If a live fallback is available, the cardinality is calculated according to the `live_fallback_style`.
230
+ 4. Otherwise no fallback is possible.
231
+
232
+ Parameters
233
+ ----------
234
+ n_samples : int
235
+ The number of samples found for the current intermediate
236
+ query : SqlQuery
237
+ The query for which the cardinality should be estimated
238
+ tables : frozenset[TableReference]
239
+ The joins that form the current intermediate
240
+
241
+ Returns
242
+ -------
243
+ Cardinality
244
+ The fallback value if it could be inferred, otherwise *NaN*.
245
+ """
246
+ if n_samples == 1:
247
+ # If we found exactly one sample, we did not need to fall back at all
248
+ return Cardinality.unknown()
249
+
250
+ if self._default_card is not None:
251
+ return self._default_card
252
+ if self._live_db is None:
253
+ return Cardinality.unknown()
254
+
255
+ query_fragment = transform.extract_query_fragment(query, tables)
256
+ if not query_fragment:
257
+ return Cardinality.unknown()
258
+
259
+ if self._live_fallback_style == "actual":
260
+ true_card_query = transform.as_count_star_query(query_fragment)
261
+ cardinality = Cardinality(self._live_db.execute_query(true_card_query))
262
+ elif self._live_fallback_style == "estimated":
263
+ cardinality = self._live_db.optimizer().cardinality_estimate(query_fragment)
264
+ else:
265
+ raise ValueError(f"Unknown fallback style: '{self._live_fallback_style}'")
266
+
267
+ if self._save_life_fallback:
268
+ self._dump_fallback_estimate(query, tables, cardinality)
269
+ return cardinality
270
+
271
+ def _dump_fallback_estimate(
272
+ self,
273
+ query: SqlQuery,
274
+ tables: frozenset[TableReference],
275
+ cardinality: Cardinality,
276
+ ) -> None:
277
+ """Stores a newly computed cardinality estimate in the lookup table.
278
+
279
+ Parameters
280
+ ----------
281
+ query : SqlQuery
282
+ The query for which the cardinality was estimated
283
+ tables : frozenset[TableReference]
284
+ The tables that form the current intermediate
285
+ cardinality : int
286
+ The computed cardinality
287
+ """
288
+ result_row = {}
289
+ result_row[self._label_col] = [self._workload.label_of(query)]
290
+ result_row[self._tables_col] = [util.to_json(tables)]
291
+
292
+ if "query" in self._true_card_df.columns:
293
+ result_row["query"] = [str(query)]
294
+ if "query_fragment" in self._true_card_df.columns:
295
+ result_row["query_fragment"] = [
296
+ str(transform.extract_query_fragment(query, tables))
297
+ ]
298
+
299
+ result_row[self._card_col] = [cardinality]
300
+ result_df = pd.DataFrame(result_row)
301
+
302
+ self._true_card_df = pd.concat(
303
+ [self._true_card_df, result_df], ignore_index=True
304
+ )
305
+ self._true_card_df.to_csv(self._lookup_df_path, index=False)
306
+
307
+
308
+ class CardinalityDistortion(CardinalityEstimator):
309
+ """Decorator to simulate errors during cardinality estimation.
310
+
311
+ The distortion service uses cardinality estimates produced by an actual estimator and mofifies its estimations to simulate
312
+ the effect of deviations and misestimates.
313
+
314
+ Behavior regarding cross products is inferred based on the behavior of the actual estimator.
315
+
316
+ Parameters
317
+ ----------
318
+ estimator : CardinalityEstimator
319
+ The actual estimator that calculates the "correct" cardinalities.
320
+ distortion_factor : float
321
+ How much the cardinalities are allowed to deviate from the original estimations. Values > 1 simulate overestimation
322
+ whereas values < 1 simulate underestimation. For example, a distortion factor of 0.5 means that the final estimates can
323
+ deviate at most half of the original cardinalities, pr a factor of 1.3 allows an overestimation of up to 30%.
324
+ distortion_strategy : Literal["fixed", "random"], optional
325
+ How the estimation errors should be calculated. The default *fixed* strategy always applies the exact distrotion factor
326
+ to the cardinalities. For example, an estimate of 1000 tuples would always become 1300 tuples with a distrotion factor
327
+ of 1.3. On the other hand the *random* strategy allows any error between 1 and the desired factor and selects the
328
+ specific distortion at random. For example, an estimate of 100 could become any cardinality between 50 and 100 tuples
329
+ with a distortion factor of 0.5.
330
+ """
331
+
332
+ def __init__(
333
+ self,
334
+ estimator: CardinalityEstimator,
335
+ distortion_factor: float,
336
+ *,
337
+ distortion_strategy: Literal["fixed", "random"] = "fixed",
338
+ ) -> None:
339
+ super().__init__(allow_cross_products=estimator.allow_cross_products)
340
+ self.estimator = estimator
341
+ self.distortion_factor = distortion_factor
342
+ self.distortion_strategy = distortion_strategy
343
+
344
+ def describe(self) -> dict:
345
+ return {
346
+ "name": "cardinality-distortion",
347
+ "estimator": "distortion",
348
+ "distortion_factor": self.distortion_factor,
349
+ "distortion_strategy": self.distortion_strategy,
350
+ }
351
+
352
+ def calculate_estimate(
353
+ self, query: SqlQuery, tables: TableReference | Iterable[TableReference]
354
+ ) -> Cardinality:
355
+ tables = util.enlist(tables)
356
+ card_est = self.estimator.calculate_estimate(query, tables)
357
+ if not card_est.is_valid():
358
+ return Cardinality.unknown()
359
+ if self.distortion_strategy == "fixed":
360
+ distortion_factor = self.distortion_factor
361
+ elif self.distortion_strategy == "random":
362
+ distortion_factor = random.uniform(
363
+ min(self.distortion_factor, 1.0), max(self.distortion_factor, 1.0)
364
+ )
365
+ else:
366
+ raise ValueError(
367
+ f"Unknown distortion strategy: '{self.distortion_strategy}'"
368
+ )
369
+ return round(card_est * distortion_factor)