qnty 0.0.9__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. qnty/__init__.py +2 -3
  2. qnty/constants/__init__.py +10 -0
  3. qnty/constants/numerical.py +18 -0
  4. qnty/constants/solvers.py +6 -0
  5. qnty/constants/tests.py +6 -0
  6. qnty/dimensions/__init__.py +23 -0
  7. qnty/dimensions/base.py +97 -0
  8. qnty/dimensions/field_dims.py +126 -0
  9. qnty/dimensions/field_dims.pyi +128 -0
  10. qnty/dimensions/signature.py +111 -0
  11. qnty/equations/__init__.py +1 -1
  12. qnty/equations/equation.py +118 -155
  13. qnty/equations/system.py +68 -65
  14. qnty/expressions/__init__.py +25 -46
  15. qnty/expressions/formatter.py +188 -0
  16. qnty/expressions/functions.py +46 -68
  17. qnty/expressions/nodes.py +540 -384
  18. qnty/expressions/types.py +70 -0
  19. qnty/problems/__init__.py +145 -0
  20. qnty/problems/composition.py +1101 -0
  21. qnty/problems/problem.py +737 -0
  22. qnty/problems/rules.py +145 -0
  23. qnty/problems/solving.py +1216 -0
  24. qnty/problems/validation.py +127 -0
  25. qnty/quantities/__init__.py +28 -5
  26. qnty/quantities/base_qnty.py +677 -0
  27. qnty/quantities/field_converters.py +24004 -0
  28. qnty/quantities/field_qnty.py +1012 -0
  29. qnty/{generated/setters.py → quantities/field_setter.py} +3071 -2961
  30. qnty/{generated/quantities.py → quantities/field_vars.py} +829 -444
  31. qnty/{generated/quantities.pyi → quantities/field_vars.pyi} +1289 -1290
  32. qnty/solving/manager.py +50 -44
  33. qnty/solving/order.py +181 -133
  34. qnty/solving/solvers/__init__.py +2 -9
  35. qnty/solving/solvers/base.py +27 -37
  36. qnty/solving/solvers/iterative.py +115 -135
  37. qnty/solving/solvers/simultaneous.py +93 -165
  38. qnty/units/__init__.py +1 -0
  39. qnty/{generated/units.py → units/field_units.py} +1700 -991
  40. qnty/units/field_units.pyi +2461 -0
  41. qnty/units/prefixes.py +58 -105
  42. qnty/units/registry.py +76 -89
  43. qnty/utils/__init__.py +16 -0
  44. qnty/utils/caching/__init__.py +23 -0
  45. qnty/utils/caching/manager.py +401 -0
  46. qnty/utils/error_handling/__init__.py +66 -0
  47. qnty/utils/error_handling/context.py +39 -0
  48. qnty/utils/error_handling/exceptions.py +96 -0
  49. qnty/utils/error_handling/handlers.py +171 -0
  50. qnty/utils/logging.py +4 -4
  51. qnty/utils/protocols.py +164 -0
  52. qnty/utils/scope_discovery.py +420 -0
  53. {qnty-0.0.9.dist-info → qnty-0.1.1.dist-info}/METADATA +1 -1
  54. qnty-0.1.1.dist-info/RECORD +60 -0
  55. qnty/_backup/problem_original.py +0 -1251
  56. qnty/_backup/quantity.py +0 -63
  57. qnty/codegen/cli.py +0 -125
  58. qnty/codegen/generators/data/unit_data.json +0 -8807
  59. qnty/codegen/generators/data_processor.py +0 -345
  60. qnty/codegen/generators/dimensions_gen.py +0 -434
  61. qnty/codegen/generators/doc_generator.py +0 -141
  62. qnty/codegen/generators/out/dimension_mapping.json +0 -974
  63. qnty/codegen/generators/out/dimension_metadata.json +0 -123
  64. qnty/codegen/generators/out/units_metadata.json +0 -223
  65. qnty/codegen/generators/quantities_gen.py +0 -159
  66. qnty/codegen/generators/setters_gen.py +0 -178
  67. qnty/codegen/generators/stubs_gen.py +0 -167
  68. qnty/codegen/generators/units_gen.py +0 -295
  69. qnty/expressions/cache.py +0 -94
  70. qnty/generated/dimensions.py +0 -514
  71. qnty/problem/__init__.py +0 -91
  72. qnty/problem/base.py +0 -142
  73. qnty/problem/composition.py +0 -385
  74. qnty/problem/composition_mixin.py +0 -382
  75. qnty/problem/equations.py +0 -413
  76. qnty/problem/metaclass.py +0 -302
  77. qnty/problem/reconstruction.py +0 -1016
  78. qnty/problem/solving.py +0 -180
  79. qnty/problem/validation.py +0 -64
  80. qnty/problem/variables.py +0 -239
  81. qnty/quantities/expression_quantity.py +0 -314
  82. qnty/quantities/quantity.py +0 -428
  83. qnty/quantities/typed_quantity.py +0 -215
  84. qnty/validation/__init__.py +0 -0
  85. qnty/validation/registry.py +0 -0
  86. qnty/validation/rules.py +0 -167
  87. qnty-0.0.9.dist-info/RECORD +0 -63
  88. /qnty/{codegen → extensions}/__init__.py +0 -0
  89. /qnty/{codegen/generators → extensions/integration}/__init__.py +0 -0
  90. /qnty/{codegen/generators/utils → extensions/plotting}/__init__.py +0 -0
  91. /qnty/{generated → extensions/reporting}/__init__.py +0 -0
  92. {qnty-0.0.9.dist-info → qnty-0.1.1.dist-info}/WHEEL +0 -0
@@ -1,345 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Unit Data Processing Utilities
4
- ==============================
5
-
6
- Shared utilities for processing unit data consistently across all generators.
7
- Handles prefix augmentation, data normalization, and statistics calculation.
8
- """
9
-
10
- import json
11
- import sys
12
- from pathlib import Path
13
- from typing import Any
14
-
15
-
16
- def setup_import_path() -> None:
17
- """Add src path to import qnty package."""
18
- # Go up from generators/ to src/
19
- src_path = Path(__file__).parent.parent.parent.parent
20
- sys.path.insert(0, str(src_path))
21
-
22
-
23
- def load_unit_data(data_path: Path) -> dict[str, Any]:
24
- """Load and return unit data from JSON file."""
25
- with open(data_path, encoding='utf-8') as f:
26
- return json.load(f)
27
-
28
-
29
- def augment_with_prefixed_units(raw_data: dict[str, Any]) -> tuple[dict[str, Any], int]:
30
- """
31
- Add missing prefixed units to the data consistently.
32
-
33
- This is the canonical implementation used by all generators to ensure
34
- they all see the same augmented data structure.
35
-
36
- Returns:
37
- Tuple of (augmented_data, generated_count)
38
- """
39
- # Import prefixes (setup_import_path should be called first)
40
- from qnty.units.prefixes import PREFIXABLE_UNITS, StandardPrefixes
41
-
42
- # Create deep copy of data with consistent structure
43
- augmented_data = {}
44
- for field_name, field_data in raw_data.items():
45
- augmented_data[field_name] = {
46
- 'field': field_data.get('field', field_name),
47
- 'normalized_field': field_data.get('normalized_field', field_name),
48
- 'dimensions': field_data.get('dimensions', {}),
49
- 'si_base_unit': field_data.get('si_base_unit', ''),
50
- 'imperial_base_unit': field_data.get('imperial_base_unit', ''),
51
- 'units': list(field_data.get('units', [])) # Deep copy the units list
52
- }
53
-
54
- # Track existing units globally to avoid duplicates
55
- existing_units = set()
56
- for field_data in augmented_data.values():
57
- for unit in field_data['units']:
58
- existing_units.add(unit.get('normalized_name', ''))
59
-
60
- generated_count = 0
61
-
62
- # Process each field to find prefixable base units
63
- for field_data in augmented_data.values():
64
- # Copy list since we'll modify it during iteration
65
- original_units = list(field_data['units'])
66
-
67
- for unit_data in original_units:
68
- unit_name = unit_data.get('normalized_name', '')
69
-
70
- # Check if this unit is in our prefixable units list
71
- if unit_name in PREFIXABLE_UNITS:
72
- prefixes = PREFIXABLE_UNITS[unit_name]
73
-
74
- # Generate prefixed variants
75
- for prefix_enum in prefixes:
76
- if prefix_enum == StandardPrefixes.NONE:
77
- continue
78
-
79
- prefix = prefix_enum.value
80
- prefixed_name = prefix.apply_to_name(unit_name)
81
-
82
- # Only add if it doesn't already exist globally
83
- if prefixed_name not in existing_units:
84
- # Create prefixed unit with consistent structure
85
- prefixed_unit = {
86
- 'name': prefix.apply_to_name(unit_data.get('name', unit_name)),
87
- 'normalized_name': prefixed_name,
88
- 'notation': prefix.apply_to_symbol(unit_data.get('notation', '')),
89
- 'si_conversion': unit_data.get('si_conversion', 1.0) * prefix.factor,
90
- 'imperial_conversion': unit_data.get('imperial_conversion', 1.0) * prefix.factor,
91
- 'aliases': [
92
- prefix.apply_to_symbol(unit_data.get('notation', ''))
93
- ] if unit_data.get('notation') else [],
94
- 'generated_from_prefix': True # Mark as generated
95
- }
96
-
97
- field_data['units'].append(prefixed_unit)
98
- existing_units.add(prefixed_name)
99
- generated_count += 1
100
-
101
- return augmented_data, generated_count
102
-
103
-
104
- def convert_to_class_name(field_name: str) -> str:
105
- """Convert field name to PascalCase class name consistently."""
106
- words = field_name.split('_')
107
- return ''.join(word.capitalize() for word in words)
108
-
109
-
110
- def get_dimension_constant_name(field_name: str) -> str:
111
- """Get dimension constant name - just convert to uppercase."""
112
- return field_name.upper()
113
-
114
-
115
- def calculate_statistics(unit_data: dict[str, Any]) -> dict[str, Any]:
116
- """Calculate statistics for the unit data."""
117
- total_units = sum(len(field_data.get('units', [])) for field_data in unit_data.values())
118
- total_fields = len(unit_data)
119
-
120
- # Count generated units
121
- generated_units = sum(
122
- sum(1 for unit in field_data.get('units', []) if unit.get('generated_from_prefix', False))
123
- for field_data in unit_data.values()
124
- )
125
-
126
- return {
127
- 'total_units': total_units,
128
- 'total_fields': total_fields,
129
- 'generated_prefixed_units': generated_units,
130
- 'original_units': total_units - generated_units
131
- }
132
-
133
-
134
- def get_unit_names_and_aliases(unit_data: dict[str, Any]) -> tuple[str, list[str]]:
135
- """
136
- Extract the primary unit name and aliases consistently for all generators.
137
-
138
- Uses normalized_name as the primary identifier, with notation and aliases
139
- as additional identifiers. Does NOT use the full 'name' field to avoid
140
- inconsistencies between generators.
141
-
142
- Returns:
143
- Tuple of (primary_name, aliases_list)
144
- """
145
- import re
146
-
147
- def sanitize_name(name: str) -> str:
148
- """Sanitize a name to be a valid Python identifier."""
149
- if not name:
150
- return 'unnamed'
151
- # Replace invalid characters with underscores
152
- sanitized = re.sub(r'[^a-zA-Z0-9_]', '_', name)
153
- # Ensure it doesn't start with a number
154
- if sanitized and sanitized[0].isdigit():
155
- sanitized = 'unit_' + sanitized
156
- # Remove double underscores and trailing underscores
157
- sanitized = re.sub(r'_+', '_', sanitized).strip('_')
158
-
159
- # Check for Python reserved words and keywords
160
- import keyword
161
- if keyword.iskeyword(sanitized) or sanitized in ['in', 'and', 'or', 'not', 'is']:
162
- sanitized = sanitized + '_unit'
163
-
164
- return sanitized if sanitized else 'unnamed'
165
-
166
- # Use normalized_name as primary - this is the canonical identifier
167
- primary_name = sanitize_name(unit_data.get('normalized_name', ''))
168
-
169
- # Collect aliases from notation and aliases fields only (NOT full name)
170
- aliases = []
171
-
172
- # Add notation as alias if different from primary
173
- notation = unit_data.get('notation', '')
174
- if notation:
175
- notation_sanitized = sanitize_name(notation)
176
- if notation_sanitized != primary_name and notation_sanitized not in aliases:
177
- aliases.append(notation_sanitized)
178
-
179
- # Add explicit aliases from the aliases field
180
- raw_aliases = unit_data.get('aliases', [])
181
- for alias in raw_aliases:
182
- sanitized_alias = sanitize_name(alias)
183
- if sanitized_alias != primary_name and sanitized_alias not in aliases:
184
- aliases.append(sanitized_alias)
185
-
186
- return primary_name, aliases
187
-
188
-
189
- def save_text_file(content: str, file_path: Path) -> None:
190
- """Save text content to file."""
191
- with open(file_path, 'w', encoding='utf-8') as f:
192
- f.write(content)
193
-
194
-
195
- def load_json_data(file_path: Path) -> dict[str, Any]:
196
- """Load JSON data from file."""
197
- with open(file_path, encoding='utf-8') as f:
198
- return json.load(f)
199
-
200
-
201
- def save_metadata(metadata: dict[str, Any], output_path: Path, generator_name: str) -> None:
202
- """Save generator metadata to JSON file."""
203
- metadata_path = output_path / f'{generator_name}_metadata.json'
204
- with open(metadata_path, 'w', encoding='utf-8') as f:
205
- json.dump(metadata, f, indent=2)
206
- print(f"Saved {generator_name} metadata to {metadata_path}")
207
-
208
-
209
- def escape_string(s: str) -> str:
210
- """Escape quotes and backslashes in string for Python code generation."""
211
- return s.replace('\\', '\\\\').replace('"', '\\"') if s else ''
212
-
213
-
214
- def is_valid_python_identifier(name: str) -> bool:
215
- """Check if a string is a valid Python identifier."""
216
- import keyword
217
- return bool(name and name.isidentifier() and not keyword.iskeyword(name))
218
-
219
-
220
- def sanitize_python_name(name: str) -> str:
221
- """Convert name to valid Python identifier."""
222
- import re
223
- # Replace invalid characters with underscores
224
- sanitized = re.sub(r'[^a-zA-Z0-9_]', '_', name)
225
-
226
- # Ensure it doesn't start with a number
227
- if sanitized and sanitized[0].isdigit():
228
- sanitized = '_' + sanitized
229
-
230
- # Remove double underscores and trailing underscores
231
- sanitized = re.sub(r'_+', '_', sanitized).strip('_')
232
-
233
- return sanitized if sanitized else 'unnamed'
234
-
235
-
236
- def get_standard_generator_paths(generator_file: Path) -> dict[str, Path]:
237
- """Get standard paths used by generators."""
238
- generator_dir = generator_file.parent
239
- return {
240
- 'generator_dir': generator_dir,
241
- 'data_path': generator_dir / 'data' / 'unit_data.json',
242
- 'output_dir': generator_dir / 'out',
243
- 'generated_dir': generator_dir.parent.parent / 'generated',
244
- 'src_dir': generator_dir.parent.parent
245
- }
246
-
247
-
248
- def identify_base_units_needing_prefixes(parsed_data: dict[str, Any]) -> dict[str, list]:
249
- """Identify base SI units that should have prefixes generated."""
250
- from qnty.units.prefixes import PREFIXABLE_UNITS
251
-
252
- base_units = {}
253
-
254
- # Look through all units to find base SI units that are in PREFIXABLE_UNITS
255
- for field_name, field_data in parsed_data.items():
256
- for unit_data in field_data['units']:
257
- unit_name = unit_data['normalized_name']
258
- if unit_name in PREFIXABLE_UNITS:
259
- # This is a base unit that should have prefixes
260
- # Store all occurrences of the unit, not just the last one
261
- if unit_name not in base_units:
262
- base_units[unit_name] = []
263
-
264
- base_units[unit_name].append({
265
- 'unit_data': unit_data,
266
- 'field_name': field_name,
267
- 'prefixes': PREFIXABLE_UNITS[unit_name]
268
- })
269
-
270
- return base_units
271
-
272
-
273
- def generate_prefixed_unit_data(base_unit_data: dict[str, Any], prefix, field_data: dict[str, Any]) -> dict[str, Any]:
274
- """Generate unit data for a prefixed variant of a base unit."""
275
- prefix_def = prefix.value
276
-
277
- # Apply prefix to name and symbol
278
- prefixed_name = prefix_def.apply_to_name(base_unit_data['normalized_name'])
279
- # Use the field's si_base_unit instead of unit-level data
280
- prefixed_symbol = prefix_def.apply_to_symbol(field_data.get('si_base_unit', base_unit_data.get('notation', '')))
281
-
282
- # Calculate new SI factor
283
- base_factor = base_unit_data.get('si_conversion', 1.0)
284
- new_factor = base_factor * prefix_def.factor
285
-
286
- # Create new unit data using new structure
287
- return {
288
- 'name': prefix_def.apply_to_name(base_unit_data['name']),
289
- 'normalized_name': prefixed_name,
290
- 'notation': prefixed_symbol,
291
- 'si_conversion': new_factor,
292
- 'imperial_conversion': base_unit_data.get('imperial_conversion', 1.0),
293
- 'aliases': [prefixed_symbol] if prefixed_symbol else [],
294
- 'generated_from_prefix': True # Mark as generated for identification
295
- }
296
-
297
-
298
- def augment_parsed_data_with_prefixes(parsed_data: dict[str, Any]) -> dict[str, Any]:
299
- """Add missing prefixed units to the parsed data."""
300
- from qnty.units.prefixes import StandardPrefixes
301
-
302
- # Make a deep copy to avoid modifying the original
303
- augmented_data = {}
304
- for key, value in parsed_data.items():
305
- augmented_data[key] = {
306
- 'field': value['field'],
307
- 'normalized_field': value['normalized_field'],
308
- 'dimensions': value.get('dimensions', {}),
309
- 'units': list(value['units']) # Copy the units list
310
- }
311
-
312
- # Find base units that need prefixes
313
- base_units = identify_base_units_needing_prefixes(parsed_data)
314
-
315
- # Track existing unit names to avoid duplicates
316
- existing_units = set()
317
- for field_data in augmented_data.values():
318
- for unit in field_data['units']:
319
- existing_units.add(unit['normalized_name'])
320
-
321
- # Generate and add missing prefixed units
322
- generated_count = 0
323
- for unit_name, base_entries in base_units.items():
324
- # Process each field where this unit appears
325
- for base_info in base_entries:
326
- base_unit = base_info['unit_data']
327
- field_name = base_info['field_name']
328
- prefixes = base_info['prefixes']
329
-
330
- for prefix in prefixes:
331
- if prefix == StandardPrefixes.NONE:
332
- continue # Skip NONE prefix
333
-
334
- prefix_def = prefix.value
335
- prefixed_name = prefix_def.apply_to_name(unit_name)
336
-
337
- # Only add if it doesn't already exist globally
338
- if prefixed_name not in existing_units:
339
- prefixed_unit = generate_prefixed_unit_data(base_unit, prefix, augmented_data[field_name])
340
- augmented_data[field_name]['units'].append(prefixed_unit)
341
- existing_units.add(prefixed_name)
342
- generated_count += 1
343
-
344
- print(f"Generated {generated_count} missing prefixed units for type stubs")
345
- return augmented_data