additory 0.1.0a3__py3-none-any.whl → 0.1.1a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. additory/__init__.py +58 -14
  2. additory/common/__init__.py +31 -147
  3. additory/common/column_selector.py +255 -0
  4. additory/common/distributions.py +286 -613
  5. additory/common/extractors.py +313 -0
  6. additory/common/knn_imputation.py +332 -0
  7. additory/common/result.py +380 -0
  8. additory/common/strategy_parser.py +243 -0
  9. additory/common/unit_conversions.py +338 -0
  10. additory/common/validation.py +283 -103
  11. additory/core/__init__.py +34 -22
  12. additory/core/backend.py +258 -0
  13. additory/core/config.py +177 -305
  14. additory/core/logging.py +230 -24
  15. additory/core/memory_manager.py +157 -495
  16. additory/expressions/__init__.py +2 -23
  17. additory/expressions/compiler.py +457 -0
  18. additory/expressions/engine.py +264 -487
  19. additory/expressions/integrity.py +179 -0
  20. additory/expressions/loader.py +263 -0
  21. additory/expressions/parser.py +363 -167
  22. additory/expressions/resolver.py +274 -0
  23. additory/functions/__init__.py +1 -0
  24. additory/functions/analyze/__init__.py +144 -0
  25. additory/functions/analyze/cardinality.py +58 -0
  26. additory/functions/analyze/correlations.py +66 -0
  27. additory/functions/analyze/distributions.py +53 -0
  28. additory/functions/analyze/duplicates.py +49 -0
  29. additory/functions/analyze/features.py +61 -0
  30. additory/functions/analyze/imputation.py +66 -0
  31. additory/functions/analyze/outliers.py +65 -0
  32. additory/functions/analyze/patterns.py +65 -0
  33. additory/functions/analyze/presets.py +72 -0
  34. additory/functions/analyze/quality.py +59 -0
  35. additory/functions/analyze/timeseries.py +53 -0
  36. additory/functions/analyze/types.py +45 -0
  37. additory/functions/expressions/__init__.py +161 -0
  38. additory/functions/snapshot/__init__.py +82 -0
  39. additory/functions/snapshot/filter.py +119 -0
  40. additory/functions/synthetic/__init__.py +113 -0
  41. additory/functions/synthetic/mode_detector.py +47 -0
  42. additory/functions/synthetic/strategies/__init__.py +1 -0
  43. additory/functions/synthetic/strategies/advanced.py +35 -0
  44. additory/functions/synthetic/strategies/augmentative.py +160 -0
  45. additory/functions/synthetic/strategies/generative.py +168 -0
  46. additory/functions/synthetic/strategies/presets.py +116 -0
  47. additory/functions/to/__init__.py +188 -0
  48. additory/functions/to/lookup.py +351 -0
  49. additory/functions/to/merge.py +189 -0
  50. additory/functions/to/sort.py +91 -0
  51. additory/functions/to/summarize.py +170 -0
  52. additory/functions/transform/__init__.py +140 -0
  53. additory/functions/transform/datetime.py +79 -0
  54. additory/functions/transform/extract.py +85 -0
  55. additory/functions/transform/harmonize.py +105 -0
  56. additory/functions/transform/knn.py +62 -0
  57. additory/functions/transform/onehotencoding.py +68 -0
  58. additory/functions/transform/transpose.py +42 -0
  59. additory-0.1.1a1.dist-info/METADATA +83 -0
  60. additory-0.1.1a1.dist-info/RECORD +62 -0
  61. additory/analysis/__init__.py +0 -48
  62. additory/analysis/cardinality.py +0 -126
  63. additory/analysis/correlations.py +0 -124
  64. additory/analysis/distributions.py +0 -376
  65. additory/analysis/quality.py +0 -158
  66. additory/analysis/scan.py +0 -400
  67. additory/common/backend.py +0 -371
  68. additory/common/column_utils.py +0 -191
  69. additory/common/exceptions.py +0 -62
  70. additory/common/lists.py +0 -229
  71. additory/common/patterns.py +0 -240
  72. additory/common/resolver.py +0 -567
  73. additory/common/sample_data.py +0 -182
  74. additory/core/ast_builder.py +0 -165
  75. additory/core/backends/__init__.py +0 -23
  76. additory/core/backends/arrow_bridge.py +0 -483
  77. additory/core/backends/cudf_bridge.py +0 -355
  78. additory/core/column_positioning.py +0 -358
  79. additory/core/compiler_polars.py +0 -166
  80. additory/core/enhanced_cache_manager.py +0 -1119
  81. additory/core/enhanced_matchers.py +0 -473
  82. additory/core/enhanced_version_manager.py +0 -325
  83. additory/core/executor.py +0 -59
  84. additory/core/integrity_manager.py +0 -477
  85. additory/core/loader.py +0 -190
  86. additory/core/namespace_manager.py +0 -657
  87. additory/core/parser.py +0 -176
  88. additory/core/polars_expression_engine.py +0 -601
  89. additory/core/registry.py +0 -176
  90. additory/core/sample_data_manager.py +0 -492
  91. additory/core/user_namespace.py +0 -751
  92. additory/core/validator.py +0 -27
  93. additory/dynamic_api.py +0 -304
  94. additory/expressions/proxy.py +0 -549
  95. additory/expressions/registry.py +0 -313
  96. additory/expressions/samples.py +0 -492
  97. additory/synthetic/__init__.py +0 -13
  98. additory/synthetic/column_name_resolver.py +0 -149
  99. additory/synthetic/distributions.py +0 -22
  100. additory/synthetic/forecast.py +0 -1132
  101. additory/synthetic/linked_list_parser.py +0 -415
  102. additory/synthetic/namespace_lookup.py +0 -129
  103. additory/synthetic/smote.py +0 -320
  104. additory/synthetic/strategies.py +0 -850
  105. additory/synthetic/synthesizer.py +0 -713
  106. additory/utilities/__init__.py +0 -53
  107. additory/utilities/encoding.py +0 -600
  108. additory/utilities/games.py +0 -300
  109. additory/utilities/keys.py +0 -8
  110. additory/utilities/lookup.py +0 -103
  111. additory/utilities/matchers.py +0 -216
  112. additory/utilities/resolvers.py +0 -286
  113. additory/utilities/settings.py +0 -167
  114. additory/utilities/units.py +0 -749
  115. additory/utilities/validators.py +0 -153
  116. additory-0.1.0a3.dist-info/METADATA +0 -288
  117. additory-0.1.0a3.dist-info/RECORD +0 -71
  118. additory-0.1.0a3.dist-info/licenses/LICENSE +0 -21
  119. {additory-0.1.0a3.dist-info → additory-0.1.1a1.dist-info}/WHEEL +0 -0
  120. {additory-0.1.0a3.dist-info → additory-0.1.1a1.dist-info}/top_level.txt +0 -0
@@ -1,371 +0,0 @@
1
- """
2
- Unified Backend Detection System
3
-
4
- Provides consistent backend detection across all additory modules.
5
- """
6
-
7
- import pandas as pd
8
- from typing import Any, Literal, Dict
9
-
10
- # Optional imports
11
- try:
12
- import polars as pl
13
- HAS_POLARS = True
14
- except ImportError:
15
- HAS_POLARS = False
16
- pl = None
17
-
18
- try:
19
- import cudf
20
- HAS_CUDF = True
21
- except (ImportError, Exception):
22
- HAS_CUDF = False
23
- cudf = None
24
-
25
-
26
- BackendType = Literal['pandas', 'polars', 'cudf']
27
- ExecutionMode = Literal['cpu', 'gpu']
28
-
29
-
30
- def detect_backend(df: Any) -> BackendType:
31
- """
32
- Detect the specific backend type of a dataframe.
33
-
34
- Args:
35
- df: Dataframe to detect
36
-
37
- Returns:
38
- 'pandas', 'polars', or 'cudf'
39
-
40
- Raises:
41
- TypeError: If not a supported dataframe type
42
-
43
- Usage:
44
- - Use this when you need to know the SPECIFIC backend
45
- - For utilities that need native operations
46
- - For type-specific conversions
47
-
48
- Examples:
49
- >>> backend = detect_backend(df)
50
- >>> if backend == 'polars':
51
- ... result = df.select(...)
52
- >>> elif backend == 'pandas':
53
- ... result = df[...]
54
- """
55
- if isinstance(df, pd.DataFrame):
56
- return 'pandas'
57
- elif HAS_POLARS and isinstance(df, pl.DataFrame):
58
- return 'polars'
59
- elif HAS_CUDF and isinstance(df, cudf.DataFrame):
60
- return 'cudf'
61
- else:
62
- raise TypeError(
63
- f"Unsupported dataframe type: {type(df)}. "
64
- f"Supported types: pandas.DataFrame"
65
- f"{', polars.DataFrame' if HAS_POLARS else ''}"
66
- f"{', cudf.DataFrame' if HAS_CUDF else ''}"
67
- )
68
-
69
-
70
- def detect_execution_mode(df: Any, preference: str = None) -> ExecutionMode:
71
- """
72
- Detect execution mode (CPU vs GPU) for expression processing.
73
-
74
- Args:
75
- df: Dataframe to detect
76
- preference: User preference ('cpu', 'gpu', or None for auto)
77
-
78
- Returns:
79
- 'cpu' or 'gpu'
80
-
81
- Usage:
82
- - Use this for expression execution routing
83
- - Respects user preferences
84
- - Falls back intelligently
85
-
86
- Examples:
87
- >>> mode = detect_execution_mode(df, preference='gpu')
88
- >>> if mode == 'gpu':
89
- ... # Use GPU-accelerated execution
90
- """
91
- backend = detect_backend(df)
92
-
93
- # User preference takes priority
94
- if preference == 'gpu' and HAS_CUDF:
95
- return 'gpu'
96
- elif preference == 'cpu':
97
- return 'cpu'
98
-
99
- # Auto-detect based on dataframe type
100
- if backend == 'cudf':
101
- return 'gpu'
102
- else:
103
- return 'cpu'
104
-
105
-
106
- def is_dataframe(obj: Any) -> bool:
107
- """
108
- Check if object is any supported dataframe type.
109
-
110
- Args:
111
- obj: Object to check
112
-
113
- Returns:
114
- True if supported dataframe type
115
-
116
- Usage:
117
- - Use for simple boolean checks
118
- - Fast validation without exceptions
119
-
120
- Examples:
121
- >>> if is_dataframe(obj):
122
- ... process(obj)
123
- """
124
- return (
125
- isinstance(obj, pd.DataFrame) or
126
- (HAS_POLARS and isinstance(obj, pl.DataFrame)) or
127
- (HAS_CUDF and isinstance(obj, cudf.DataFrame))
128
- )
129
-
130
-
131
- def get_available_backends() -> Dict[str, bool]:
132
- """
133
- Get availability status of all backends.
134
-
135
- Returns:
136
- Dictionary mapping backend name to availability
137
-
138
- Examples:
139
- >>> backends = get_available_backends()
140
- >>> if backends['polars']:
141
- ... # Use polars-specific features
142
- """
143
- return {
144
- 'pandas': True, # Always available
145
- 'polars': HAS_POLARS,
146
- 'cudf': HAS_CUDF
147
- }
148
-
149
-
150
- def check_backend_available(backend: BackendType) -> bool:
151
- """
152
- Check if a specific backend is available.
153
-
154
- Args:
155
- backend: Backend to check ('pandas', 'polars', 'cudf')
156
-
157
- Returns:
158
- True if backend is available
159
-
160
- Examples:
161
- >>> if check_backend_available('polars'):
162
- ... # Safe to use polars
163
- """
164
- availability = get_available_backends()
165
- return availability.get(backend, False)
166
-
167
-
168
- # ============================================================================
169
- # Arrow Bridge Helpers - Polars-Only Architecture
170
- # ============================================================================
171
-
172
- def get_arrow_bridge():
173
- """
174
- Get singleton instance of Arrow bridge.
175
-
176
- Returns:
177
- EnhancedArrowBridge instance
178
-
179
- Usage:
180
- - Use for all cross-backend conversions
181
- - Handles pandas/polars/cuDF via Arrow
182
- """
183
- from additory.core.backends.arrow_bridge import EnhancedArrowBridge, ArrowBridgeError
184
-
185
- # Singleton pattern
186
- if not hasattr(get_arrow_bridge, '_instance'):
187
- try:
188
- get_arrow_bridge._instance = EnhancedArrowBridge()
189
- except ArrowBridgeError:
190
- get_arrow_bridge._instance = None
191
-
192
- return get_arrow_bridge._instance
193
-
194
-
195
- def to_polars(df: Any, backend_type: BackendType = None) -> 'pl.DataFrame':
196
- """
197
- Convert any dataframe to Polars via Arrow bridge.
198
-
199
- This is the primary conversion function for the Polars-only architecture.
200
- All operations (expressions, synthetic, etc.) use this to convert input
201
- dataframes to Polars for processing.
202
-
203
- Args:
204
- df: Input dataframe (pandas, polars, or cuDF)
205
- backend_type: Source backend type (auto-detected if None)
206
-
207
- Returns:
208
- Polars DataFrame
209
-
210
- Raises:
211
- TypeError: If df is not a supported dataframe type
212
- RuntimeError: If conversion fails
213
-
214
- Examples:
215
- >>> # Convert pandas to polars
216
- >>> pl_df = to_polars(pandas_df)
217
-
218
- >>> # Convert cuDF to polars
219
- >>> pl_df = to_polars(cudf_df)
220
-
221
- >>> # Already polars (no-op)
222
- >>> pl_df = to_polars(polars_df)
223
- """
224
- if not HAS_POLARS:
225
- raise RuntimeError(
226
- "Polars is not available. Install with: pip install polars"
227
- )
228
-
229
- # Fast path: already Polars
230
- if HAS_POLARS and isinstance(df, pl.DataFrame):
231
- return df
232
-
233
- # Validate input
234
- if not is_dataframe(df):
235
- raise TypeError(
236
- f"Expected pandas, polars, or cuDF DataFrame, got {type(df)}"
237
- )
238
-
239
- # Auto-detect backend if not provided
240
- if backend_type is None:
241
- backend_type = detect_backend(df)
242
-
243
- # Convert via Arrow bridge
244
- try:
245
- bridge = get_arrow_bridge()
246
- if bridge is None:
247
- # Fallback: direct conversion for pandas
248
- if backend_type == "pandas":
249
- if isinstance(df, pd.DataFrame):
250
- return pl.from_pandas(df)
251
- raise RuntimeError("Arrow bridge not available and cannot convert non-pandas DataFrame")
252
-
253
- arrow_table = bridge.to_arrow(df, backend_type)
254
- pl_df = bridge.from_arrow(arrow_table, "polars")
255
- return pl_df
256
- except Exception as e:
257
- raise RuntimeError(
258
- f"Failed to convert {backend_type} DataFrame to Polars: {e}"
259
- ) from e
260
-
261
-
262
- def from_polars(pl_df: 'pl.DataFrame', target_backend: BackendType) -> Any:
263
- """
264
- Convert Polars dataframe back to target backend via Arrow bridge.
265
-
266
- This is used to convert results back to the user's original format
267
- after processing in Polars.
268
-
269
- Args:
270
- pl_df: Polars DataFrame
271
- target_backend: Target backend ('pandas', 'polars', or 'cudf')
272
-
273
- Returns:
274
- DataFrame in target format
275
-
276
- Raises:
277
- TypeError: If pl_df is not a Polars DataFrame
278
- ValueError: If target_backend is not supported
279
- RuntimeError: If conversion fails
280
-
281
- Examples:
282
- >>> # Convert back to pandas
283
- >>> pandas_df = from_polars(pl_df, 'pandas')
284
-
285
- >>> # Convert back to cuDF
286
- >>> cudf_df = from_polars(pl_df, 'cudf')
287
-
288
- >>> # Keep as polars (no-op)
289
- >>> pl_df = from_polars(pl_df, 'polars')
290
- """
291
- if not HAS_POLARS:
292
- raise RuntimeError(
293
- "Polars is not available. Install with: pip install polars"
294
- )
295
-
296
- # Validate input
297
- if not isinstance(pl_df, pl.DataFrame):
298
- raise TypeError(
299
- f"Expected Polars DataFrame, got {type(pl_df)}"
300
- )
301
-
302
- # Validate target backend
303
- if target_backend not in ('pandas', 'polars', 'cudf'):
304
- raise ValueError(
305
- f"Invalid target_backend: {target_backend}. "
306
- f"Must be 'pandas', 'polars', or 'cudf'"
307
- )
308
-
309
- # Fast path: already target format
310
- if target_backend == 'polars':
311
- return pl_df
312
-
313
- # Check target backend availability
314
- if target_backend == 'cudf' and not HAS_CUDF:
315
- raise RuntimeError(
316
- "cuDF is not available. Install with: pip install cudf"
317
- )
318
-
319
- # Convert via Arrow bridge
320
- try:
321
- bridge = get_arrow_bridge()
322
- if bridge is None:
323
- # Fallback: direct conversion for pandas
324
- if target_backend == "pandas":
325
- return pl_df.to_pandas()
326
- raise RuntimeError("Arrow bridge not available and cannot convert to non-pandas DataFrame")
327
-
328
- arrow_table = bridge.to_arrow(pl_df, "polars")
329
- result_df = bridge.from_arrow(arrow_table, target_backend)
330
- return result_df
331
- except Exception as e:
332
- raise RuntimeError(
333
- f"Failed to convert Polars DataFrame to {target_backend}: {e}"
334
- ) from e
335
-
336
-
337
- def convert_via_polars(df: Any, target_backend: BackendType = None) -> Any:
338
- """
339
- Convert dataframe to target backend via Polars (round-trip conversion).
340
-
341
- This is a convenience function that combines to_polars() and from_polars().
342
- Useful for format conversions without processing.
343
-
344
- Args:
345
- df: Input dataframe
346
- target_backend: Target backend (defaults to input backend)
347
-
348
- Returns:
349
- DataFrame in target format
350
-
351
- Examples:
352
- >>> # Convert pandas to cuDF via Polars
353
- >>> cudf_df = convert_via_polars(pandas_df, 'cudf')
354
-
355
- >>> # Round-trip (normalize via Polars)
356
- >>> normalized_df = convert_via_polars(df)
357
- """
358
- # Detect input backend
359
- input_backend = detect_backend(df)
360
-
361
- # Default to same backend
362
- if target_backend is None:
363
- target_backend = input_backend
364
-
365
- # Fast path: same backend
366
- if input_backend == target_backend:
367
- return df
368
-
369
- # Convert via Polars
370
- pl_df = to_polars(df, input_backend)
371
- return from_polars(pl_df, target_backend)
@@ -1,191 +0,0 @@
1
- """
2
- Common Column Utilities
3
-
4
- Provides column name handling utilities shared across modules.
5
- """
6
-
7
- import re
8
- from typing import List
9
- from .exceptions import ValidationError
10
-
11
-
12
- def sanitize_column_name(col_name: str) -> str:
13
- """
14
- Convert column name to Python-friendly identifier.
15
-
16
- Rules:
17
- - Replace spaces and special chars with underscores
18
- - Remove consecutive underscores
19
- - Remove leading/trailing underscores
20
- - Ensure doesn't start with number
21
- - Convert to lowercase for consistency
22
-
23
- Args:
24
- col_name: Original column name
25
-
26
- Returns:
27
- Sanitized column name safe for Python identifiers
28
-
29
- Examples:
30
- >>> sanitize_column_name("height collected on site")
31
- 'height_collected_on_site'
32
- >>> sanitize_column_name("Patient Height - Site A")
33
- 'patient_height_site_a'
34
- >>> sanitize_column_name("Weight (kg)")
35
- 'weight_kg'
36
- >>> sanitize_column_name("temp@location#1")
37
- 'temp_location_1'
38
- """
39
- # Convert to string and handle None/empty
40
- if not col_name:
41
- return "unnamed_column"
42
-
43
- col_str = str(col_name)
44
-
45
- # Replace non-alphanumeric chars with underscores
46
- sanitized = re.sub(r'[^a-zA-Z0-9_]', '_', col_str)
47
-
48
- # Remove consecutive underscores
49
- sanitized = re.sub(r'_+', '_', sanitized)
50
-
51
- # Remove leading/trailing underscores
52
- sanitized = sanitized.strip('_')
53
-
54
- # Ensure doesn't start with number
55
- if sanitized and sanitized[0].isdigit():
56
- sanitized = f"col_{sanitized}"
57
-
58
- # Convert to lowercase for consistency
59
- sanitized = sanitized.lower()
60
-
61
- return sanitized if sanitized else "unnamed_column"
62
-
63
-
64
- def generate_safe_column_name(base_name: str, existing_columns: List[str]) -> str:
65
- """
66
- Generate a safe column name that doesn't conflict with existing columns.
67
-
68
- Args:
69
- base_name: Desired column name
70
- existing_columns: List of existing column names
71
-
72
- Returns:
73
- Safe column name with _1, _2, etc. suffix if needed
74
-
75
- Examples:
76
- >>> generate_safe_column_name("value", ["value", "value_1"])
77
- 'value_2'
78
- >>> generate_safe_column_name("new_col", ["col1", "col2"])
79
- 'new_col'
80
- """
81
- if base_name not in existing_columns:
82
- return base_name
83
-
84
- counter = 1
85
- while f"{base_name}_{counter}" in existing_columns:
86
- counter += 1
87
-
88
- return f"{base_name}_{counter}"
89
-
90
-
91
- def validate_column_name(name: str) -> None:
92
- """
93
- Validate column name format.
94
-
95
- Args:
96
- name: Column name to validate
97
-
98
- Raises:
99
- ValidationError: If name is invalid
100
-
101
- Examples:
102
- >>> validate_column_name("valid_column")
103
- >>> validate_column_name("") # Raises ValidationError
104
- """
105
- if not isinstance(name, str):
106
- raise ValidationError(f"Column name must be a string, got {type(name)}")
107
-
108
- if not name.strip():
109
- raise ValidationError("Column name cannot be empty")
110
-
111
-
112
- def truncate_column_name(name: str, max_length: int = 63,
113
- preserve_end: bool = True) -> str:
114
- """
115
- Truncate column name to maximum length while preserving uniqueness.
116
-
117
- Args:
118
- name: Column name to truncate
119
- max_length: Maximum length (default 63 for SQL compatibility)
120
- preserve_end: If True, preserve end of name (where differences often are)
121
-
122
- Returns:
123
- Truncated column name
124
-
125
- Examples:
126
- >>> truncate_column_name("very_long_column_name_with_suffix_01", max_length=20)
127
- 'very_lon_suffix_01'
128
- >>> truncate_column_name("short", max_length=20)
129
- 'short'
130
- """
131
- if len(name) <= max_length:
132
- return name
133
-
134
- if preserve_end:
135
- # Keep start and end, truncate middle
136
- keep_start = max_length // 2
137
- keep_end = max_length - keep_start
138
- return name[:keep_start] + name[-keep_end:]
139
- else:
140
- # Simple truncation from start
141
- return name[:max_length]
142
-
143
-
144
- def generate_column_names_with_prefix_suffix(
145
- base_name: str,
146
- values: List[str],
147
- prefix: str = None,
148
- suffix: str = None,
149
- max_length: int = 63
150
- ) -> List[str]:
151
- """
152
- Generate column names with optional prefix/suffix.
153
-
154
- Args:
155
- base_name: Base column name
156
- values: List of values to create column names for
157
- prefix: Optional prefix
158
- suffix: Optional suffix
159
- max_length: Maximum column name length
160
-
161
- Returns:
162
- List of generated column names
163
-
164
- Examples:
165
- >>> generate_column_names_with_prefix_suffix(
166
- ... "color", ["red", "blue"], prefix="ohe"
167
- ... )
168
- ['ohe_color_red', 'ohe_color_blue']
169
- """
170
- column_names = []
171
-
172
- for value in values:
173
- # Build parts
174
- parts = []
175
- if prefix:
176
- parts.append(prefix)
177
- parts.append(base_name)
178
- parts.append(str(value))
179
- if suffix:
180
- parts.append(suffix)
181
-
182
- # Join with underscores
183
- full_name = "_".join(parts)
184
-
185
- # Truncate if needed
186
- if len(full_name) > max_length:
187
- full_name = truncate_column_name(full_name, max_length)
188
-
189
- column_names.append(full_name)
190
-
191
- return column_names
@@ -1,62 +0,0 @@
1
- """
2
- Unified Exception Hierarchy for Additory
3
-
4
- Provides consistent error handling across all modules.
5
- """
6
-
7
-
8
- class AdditoryError(Exception):
9
- """Base exception for all additory errors."""
10
- pass
11
-
12
-
13
- class ValidationError(AdditoryError):
14
- """Raised when validation fails."""
15
- pass
16
-
17
-
18
- class BackendError(AdditoryError):
19
- """Raised when backend operations fail."""
20
- pass
21
-
22
-
23
- class ConversionError(AdditoryError):
24
- """Raised when data conversion fails."""
25
- pass
26
-
27
-
28
- class ExpressionError(AdditoryError):
29
- """Raised when expression execution fails."""
30
- pass
31
-
32
-
33
- class ConfigurationError(AdditoryError):
34
- """Raised when configuration is invalid."""
35
- pass
36
-
37
-
38
- # Specific error types for different modules
39
-
40
- class UnitConversionError(ConversionError):
41
- """Raised when unit conversion fails."""
42
- pass
43
-
44
-
45
- class EncodingError(ConversionError):
46
- """Raised when encoding operations fail."""
47
- pass
48
-
49
-
50
- class LookupError(AdditoryError):
51
- """Raised when lookup operations fail."""
52
- pass
53
-
54
-
55
- class SyntheticDataError(AdditoryError):
56
- """Raised when synthetic data generation fails."""
57
- pass
58
-
59
-
60
- class AugmentError(AdditoryError):
61
- """Raised when data augmentation fails."""
62
- pass