pyopenapi-gen 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. pyopenapi_gen/__init__.py +224 -0
  2. pyopenapi_gen/__main__.py +6 -0
  3. pyopenapi_gen/cli.py +62 -0
  4. pyopenapi_gen/context/CLAUDE.md +284 -0
  5. pyopenapi_gen/context/file_manager.py +52 -0
  6. pyopenapi_gen/context/import_collector.py +382 -0
  7. pyopenapi_gen/context/render_context.py +726 -0
  8. pyopenapi_gen/core/CLAUDE.md +224 -0
  9. pyopenapi_gen/core/__init__.py +0 -0
  10. pyopenapi_gen/core/auth/base.py +22 -0
  11. pyopenapi_gen/core/auth/plugins.py +89 -0
  12. pyopenapi_gen/core/cattrs_converter.py +810 -0
  13. pyopenapi_gen/core/exceptions.py +20 -0
  14. pyopenapi_gen/core/http_status_codes.py +218 -0
  15. pyopenapi_gen/core/http_transport.py +222 -0
  16. pyopenapi_gen/core/loader/__init__.py +12 -0
  17. pyopenapi_gen/core/loader/loader.py +174 -0
  18. pyopenapi_gen/core/loader/operations/__init__.py +12 -0
  19. pyopenapi_gen/core/loader/operations/parser.py +161 -0
  20. pyopenapi_gen/core/loader/operations/post_processor.py +62 -0
  21. pyopenapi_gen/core/loader/operations/request_body.py +90 -0
  22. pyopenapi_gen/core/loader/parameters/__init__.py +10 -0
  23. pyopenapi_gen/core/loader/parameters/parser.py +186 -0
  24. pyopenapi_gen/core/loader/responses/__init__.py +10 -0
  25. pyopenapi_gen/core/loader/responses/parser.py +111 -0
  26. pyopenapi_gen/core/loader/schemas/__init__.py +11 -0
  27. pyopenapi_gen/core/loader/schemas/extractor.py +275 -0
  28. pyopenapi_gen/core/pagination.py +64 -0
  29. pyopenapi_gen/core/parsing/__init__.py +13 -0
  30. pyopenapi_gen/core/parsing/common/__init__.py +1 -0
  31. pyopenapi_gen/core/parsing/common/ref_resolution/__init__.py +9 -0
  32. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/__init__.py +0 -0
  33. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/cyclic_properties.py +66 -0
  34. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/direct_cycle.py +33 -0
  35. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/existing_schema.py +22 -0
  36. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/list_response.py +54 -0
  37. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/missing_ref.py +52 -0
  38. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/new_schema.py +50 -0
  39. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/stripped_suffix.py +51 -0
  40. pyopenapi_gen/core/parsing/common/ref_resolution/resolve_schema_ref.py +86 -0
  41. pyopenapi_gen/core/parsing/common/type_parser.py +73 -0
  42. pyopenapi_gen/core/parsing/context.py +187 -0
  43. pyopenapi_gen/core/parsing/cycle_helpers.py +126 -0
  44. pyopenapi_gen/core/parsing/keywords/__init__.py +1 -0
  45. pyopenapi_gen/core/parsing/keywords/all_of_parser.py +81 -0
  46. pyopenapi_gen/core/parsing/keywords/any_of_parser.py +84 -0
  47. pyopenapi_gen/core/parsing/keywords/array_items_parser.py +72 -0
  48. pyopenapi_gen/core/parsing/keywords/one_of_parser.py +77 -0
  49. pyopenapi_gen/core/parsing/keywords/properties_parser.py +98 -0
  50. pyopenapi_gen/core/parsing/schema_finalizer.py +169 -0
  51. pyopenapi_gen/core/parsing/schema_parser.py +804 -0
  52. pyopenapi_gen/core/parsing/transformers/__init__.py +0 -0
  53. pyopenapi_gen/core/parsing/transformers/inline_enum_extractor.py +285 -0
  54. pyopenapi_gen/core/parsing/transformers/inline_object_promoter.py +120 -0
  55. pyopenapi_gen/core/parsing/unified_cycle_detection.py +293 -0
  56. pyopenapi_gen/core/postprocess_manager.py +260 -0
  57. pyopenapi_gen/core/spec_fetcher.py +148 -0
  58. pyopenapi_gen/core/streaming_helpers.py +84 -0
  59. pyopenapi_gen/core/telemetry.py +69 -0
  60. pyopenapi_gen/core/utils.py +456 -0
  61. pyopenapi_gen/core/warning_collector.py +83 -0
  62. pyopenapi_gen/core/writers/code_writer.py +135 -0
  63. pyopenapi_gen/core/writers/documentation_writer.py +222 -0
  64. pyopenapi_gen/core/writers/line_writer.py +217 -0
  65. pyopenapi_gen/core/writers/python_construct_renderer.py +321 -0
  66. pyopenapi_gen/core_package_template/README.md +21 -0
  67. pyopenapi_gen/emit/models_emitter.py +143 -0
  68. pyopenapi_gen/emitters/CLAUDE.md +286 -0
  69. pyopenapi_gen/emitters/client_emitter.py +51 -0
  70. pyopenapi_gen/emitters/core_emitter.py +181 -0
  71. pyopenapi_gen/emitters/docs_emitter.py +44 -0
  72. pyopenapi_gen/emitters/endpoints_emitter.py +247 -0
  73. pyopenapi_gen/emitters/exceptions_emitter.py +187 -0
  74. pyopenapi_gen/emitters/mocks_emitter.py +185 -0
  75. pyopenapi_gen/emitters/models_emitter.py +426 -0
  76. pyopenapi_gen/generator/CLAUDE.md +352 -0
  77. pyopenapi_gen/generator/client_generator.py +567 -0
  78. pyopenapi_gen/generator/exceptions.py +7 -0
  79. pyopenapi_gen/helpers/CLAUDE.md +325 -0
  80. pyopenapi_gen/helpers/__init__.py +1 -0
  81. pyopenapi_gen/helpers/endpoint_utils.py +532 -0
  82. pyopenapi_gen/helpers/type_cleaner.py +334 -0
  83. pyopenapi_gen/helpers/type_helper.py +112 -0
  84. pyopenapi_gen/helpers/type_resolution/__init__.py +1 -0
  85. pyopenapi_gen/helpers/type_resolution/array_resolver.py +57 -0
  86. pyopenapi_gen/helpers/type_resolution/composition_resolver.py +79 -0
  87. pyopenapi_gen/helpers/type_resolution/finalizer.py +105 -0
  88. pyopenapi_gen/helpers/type_resolution/named_resolver.py +172 -0
  89. pyopenapi_gen/helpers/type_resolution/object_resolver.py +216 -0
  90. pyopenapi_gen/helpers/type_resolution/primitive_resolver.py +109 -0
  91. pyopenapi_gen/helpers/type_resolution/resolver.py +47 -0
  92. pyopenapi_gen/helpers/url_utils.py +14 -0
  93. pyopenapi_gen/http_types.py +20 -0
  94. pyopenapi_gen/ir.py +165 -0
  95. pyopenapi_gen/py.typed +1 -0
  96. pyopenapi_gen/types/CLAUDE.md +140 -0
  97. pyopenapi_gen/types/__init__.py +11 -0
  98. pyopenapi_gen/types/contracts/__init__.py +13 -0
  99. pyopenapi_gen/types/contracts/protocols.py +106 -0
  100. pyopenapi_gen/types/contracts/types.py +28 -0
  101. pyopenapi_gen/types/resolvers/__init__.py +7 -0
  102. pyopenapi_gen/types/resolvers/reference_resolver.py +71 -0
  103. pyopenapi_gen/types/resolvers/response_resolver.py +177 -0
  104. pyopenapi_gen/types/resolvers/schema_resolver.py +498 -0
  105. pyopenapi_gen/types/services/__init__.py +5 -0
  106. pyopenapi_gen/types/services/type_service.py +165 -0
  107. pyopenapi_gen/types/strategies/__init__.py +5 -0
  108. pyopenapi_gen/types/strategies/response_strategy.py +310 -0
  109. pyopenapi_gen/visit/CLAUDE.md +272 -0
  110. pyopenapi_gen/visit/client_visitor.py +477 -0
  111. pyopenapi_gen/visit/docs_visitor.py +38 -0
  112. pyopenapi_gen/visit/endpoint/__init__.py +1 -0
  113. pyopenapi_gen/visit/endpoint/endpoint_visitor.py +292 -0
  114. pyopenapi_gen/visit/endpoint/generators/__init__.py +1 -0
  115. pyopenapi_gen/visit/endpoint/generators/docstring_generator.py +123 -0
  116. pyopenapi_gen/visit/endpoint/generators/endpoint_method_generator.py +222 -0
  117. pyopenapi_gen/visit/endpoint/generators/mock_generator.py +140 -0
  118. pyopenapi_gen/visit/endpoint/generators/overload_generator.py +252 -0
  119. pyopenapi_gen/visit/endpoint/generators/request_generator.py +103 -0
  120. pyopenapi_gen/visit/endpoint/generators/response_handler_generator.py +705 -0
  121. pyopenapi_gen/visit/endpoint/generators/signature_generator.py +83 -0
  122. pyopenapi_gen/visit/endpoint/generators/url_args_generator.py +207 -0
  123. pyopenapi_gen/visit/endpoint/processors/__init__.py +1 -0
  124. pyopenapi_gen/visit/endpoint/processors/import_analyzer.py +78 -0
  125. pyopenapi_gen/visit/endpoint/processors/parameter_processor.py +171 -0
  126. pyopenapi_gen/visit/exception_visitor.py +90 -0
  127. pyopenapi_gen/visit/model/__init__.py +0 -0
  128. pyopenapi_gen/visit/model/alias_generator.py +93 -0
  129. pyopenapi_gen/visit/model/dataclass_generator.py +553 -0
  130. pyopenapi_gen/visit/model/enum_generator.py +212 -0
  131. pyopenapi_gen/visit/model/model_visitor.py +198 -0
  132. pyopenapi_gen/visit/visitor.py +97 -0
  133. pyopenapi_gen-2.7.2.dist-info/METADATA +1169 -0
  134. pyopenapi_gen-2.7.2.dist-info/RECORD +137 -0
  135. pyopenapi_gen-2.7.2.dist-info/WHEEL +4 -0
  136. pyopenapi_gen-2.7.2.dist-info/entry_points.txt +2 -0
  137. pyopenapi_gen-2.7.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,293 @@
1
+ """
2
+ Unified cycle detection system for schema parsing.
3
+
4
+ This module provides a comprehensive, conflict-free approach to cycle detection
5
+ that handles structural cycles, processing cycles, and depth limits consistently.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ from dataclasses import dataclass, field
12
+ from enum import Enum
13
+ from typing import List, Set
14
+
15
+ from pyopenapi_gen import IRSchema
16
+ from pyopenapi_gen.core.utils import NameSanitizer
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class SchemaState(Enum):
22
+ """States a schema can be in during parsing."""
23
+
24
+ NOT_STARTED = "not_started"
25
+ IN_PROGRESS = "in_progress"
26
+ COMPLETED = "completed"
27
+ PLACEHOLDER_CYCLE = "placeholder_cycle"
28
+ PLACEHOLDER_DEPTH = "placeholder_depth"
29
+ PLACEHOLDER_SELF_REF = "placeholder_self_ref"
30
+
31
+
32
+ class CycleType(Enum):
33
+ """Types of cycles that can be detected."""
34
+
35
+ STRUCTURAL = "structural" # Schema references form a loop
36
+ SELF_REFERENCE = "self_reference" # Schema directly references itself
37
+ MAX_DEPTH = "max_depth" # Recursion depth limit exceeded
38
+
39
+
40
+ class CycleAction(Enum):
41
+ """Actions to take when cycle is detected."""
42
+
43
+ CONTINUE_PARSING = "continue" # No cycle or allowed cycle
44
+ RETURN_PLACEHOLDER = "placeholder" # Return pre-made placeholder
45
+ CREATE_PLACEHOLDER = "create" # Create new placeholder
46
+ RETURN_EXISTING = "existing" # Return existing parsed schema
47
+
48
+
49
+ @dataclass
50
+ class CycleInfo:
51
+ """Information about a detected cycle."""
52
+
53
+ schema_name: str
54
+ cycle_path: List[str]
55
+ cycle_type: CycleType
56
+ is_direct_self_reference: bool
57
+ depth_when_detected: int
58
+
59
+
60
+ @dataclass
61
+ class CycleDetectionResult:
62
+ """Result of cycle detection check."""
63
+
64
+ is_cycle: bool
65
+ cycle_type: CycleType | None
66
+ action: CycleAction
67
+ cycle_info: CycleInfo | None = None
68
+ placeholder_schema: IRSchema | None = None
69
+
70
+
71
+ @dataclass
72
+ class UnifiedCycleContext:
73
+ """Unified context for all cycle detection mechanisms."""
74
+
75
+ # Core tracking
76
+ schema_stack: List[str] = field(default_factory=list)
77
+ schema_states: dict[str, SchemaState] = field(default_factory=dict)
78
+ parsed_schemas: dict[str, IRSchema] = field(default_factory=dict)
79
+ recursion_depth: int = 0
80
+
81
+ # Detection results
82
+ detected_cycles: List[CycleInfo] = field(default_factory=list)
83
+ depth_exceeded_schemas: Set[str] = field(default_factory=set)
84
+ cycle_detected: bool = False # Global flag for backward compatibility
85
+
86
+ # Configuration
87
+ max_depth: int = 150
88
+ allow_self_reference: bool = False
89
+
90
+
91
+ def analyze_cycle(schema_name: str, schema_stack: List[str]) -> CycleInfo:
92
+ """Analyze a detected cycle to determine its characteristics."""
93
+ try:
94
+ start_index = schema_stack.index(schema_name)
95
+ cycle_path = schema_stack[start_index:] + [schema_name]
96
+ except ValueError:
97
+ # Schema not in stack - shouldn't happen, but handle gracefully
98
+ cycle_path = [schema_name, schema_name]
99
+
100
+ is_direct_self_reference = len(cycle_path) == 2 and cycle_path[0] == cycle_path[1]
101
+
102
+ cycle_type = CycleType.SELF_REFERENCE if is_direct_self_reference else CycleType.STRUCTURAL
103
+
104
+ return CycleInfo(
105
+ schema_name=schema_name,
106
+ cycle_path=cycle_path,
107
+ cycle_type=cycle_type,
108
+ is_direct_self_reference=is_direct_self_reference,
109
+ depth_when_detected=len(schema_stack),
110
+ )
111
+
112
+
113
+ def create_cycle_placeholder(schema_name: str, cycle_info: CycleInfo) -> IRSchema:
114
+ """Create a placeholder IRSchema for cycle detection."""
115
+ sanitized_name = NameSanitizer.sanitize_class_name(schema_name)
116
+ cycle_path_str = " -> ".join(cycle_info.cycle_path)
117
+
118
+ return IRSchema(
119
+ name=sanitized_name,
120
+ type="object",
121
+ description=f"[Circular reference detected: {cycle_path_str}]",
122
+ _from_unresolved_ref=True,
123
+ _circular_ref_path=cycle_path_str,
124
+ _is_circular_ref=True,
125
+ )
126
+
127
+
128
+ def create_self_ref_placeholder(schema_name: str, cycle_info: CycleInfo) -> IRSchema:
129
+ """Create a placeholder IRSchema for allowed self-reference."""
130
+ sanitized_name = NameSanitizer.sanitize_class_name(schema_name)
131
+
132
+ return IRSchema(
133
+ name=sanitized_name,
134
+ type="object",
135
+ description=f"[Self-referencing schema: {schema_name}]",
136
+ _is_self_referential_stub=True,
137
+ )
138
+
139
+
140
+ def create_depth_placeholder(schema_name: str, depth: int) -> IRSchema:
141
+ """Create a placeholder IRSchema for max depth exceeded."""
142
+ sanitized_name = NameSanitizer.sanitize_class_name(schema_name)
143
+ description = f"[Maximum recursion depth ({depth}) exceeded for '{schema_name}']"
144
+
145
+ # Import cycle_helpers to use its logging functionality
146
+ from .cycle_helpers import logger as cycle_helpers_logger
147
+
148
+ cycle_helpers_logger.warning(description)
149
+
150
+ return IRSchema(
151
+ name=sanitized_name,
152
+ type="object",
153
+ description=description,
154
+ _max_depth_exceeded_marker=True,
155
+ )
156
+
157
+
158
+ def unified_cycle_check(schema_name: str | None, context: UnifiedCycleContext) -> CycleDetectionResult:
159
+ """Unified cycle detection that handles all cases."""
160
+
161
+ if schema_name is None:
162
+ return CycleDetectionResult(False, None, CycleAction.CONTINUE_PARSING)
163
+
164
+ # Check current state
165
+ current_state = context.schema_states.get(schema_name, SchemaState.NOT_STARTED)
166
+
167
+ # 1. If already completed, reuse (no cycle)
168
+ if current_state == SchemaState.COMPLETED:
169
+ return CycleDetectionResult(False, None, CycleAction.RETURN_EXISTING)
170
+
171
+ # 2. If already a placeholder, reuse it
172
+ if current_state in [
173
+ SchemaState.PLACEHOLDER_CYCLE,
174
+ SchemaState.PLACEHOLDER_DEPTH,
175
+ SchemaState.PLACEHOLDER_SELF_REF,
176
+ ]:
177
+ return CycleDetectionResult(True, None, CycleAction.RETURN_PLACEHOLDER)
178
+
179
+ # 3. Check depth limit BEFORE checking cycles (dynamically check environment)
180
+ import os
181
+
182
+ max_depth = int(os.environ.get("PYOPENAPI_MAX_DEPTH", context.max_depth))
183
+ if context.recursion_depth > max_depth:
184
+ context.depth_exceeded_schemas.add(schema_name)
185
+ context.schema_states[schema_name] = SchemaState.PLACEHOLDER_DEPTH
186
+ context.cycle_detected = True # Max depth exceeded is considered a form of cycle detection
187
+ placeholder = create_depth_placeholder(schema_name, max_depth)
188
+ context.parsed_schemas[schema_name] = placeholder
189
+ return CycleDetectionResult(
190
+ True, CycleType.MAX_DEPTH, CycleAction.CREATE_PLACEHOLDER, placeholder_schema=placeholder
191
+ )
192
+
193
+ # 4. Check for structural cycle
194
+ if schema_name in context.schema_stack:
195
+ cycle_info = analyze_cycle(schema_name, context.schema_stack)
196
+ context.cycle_detected = True
197
+
198
+ # For cycles, create a placeholder for the re-entrant reference, not the original schema
199
+ # This allows the original schema parsing to complete normally
200
+ # The re-entrant reference gets a circular placeholder
201
+
202
+ # Create a unique key for this specific cycle reference
203
+ cycle_ref_key = f"{schema_name}_cycle_ref_{len(context.detected_cycles)}"
204
+
205
+ # Determine if cycle is allowed
206
+ if context.allow_self_reference and cycle_info.is_direct_self_reference:
207
+ placeholder = create_self_ref_placeholder(schema_name, cycle_info)
208
+ else:
209
+ context.detected_cycles.append(cycle_info)
210
+ placeholder = create_cycle_placeholder(schema_name, cycle_info)
211
+
212
+ # Determine storage policy based on cycle characteristics
213
+ is_synthetic_schema = schema_name and (
214
+ "Item" in schema_name or "Property" in schema_name # Array item schemas # Property schemas
215
+ )
216
+
217
+ # Check for specific known patterns
218
+ cycle_path_str = " -> ".join(cycle_info.cycle_path)
219
+ is_direct_array_self_ref = (
220
+ "Children" in cycle_path_str
221
+ and "ChildrenItem" in cycle_path_str
222
+ and cycle_info.cycle_path[0] == cycle_info.cycle_path[-1]
223
+ )
224
+ is_nested_property_self_ref = (
225
+ any(
226
+ name.startswith(schema_name) and name != schema_name and not name.endswith("Item")
227
+ for name in cycle_info.cycle_path
228
+ )
229
+ and cycle_info.cycle_path[0] == cycle_info.cycle_path[-1]
230
+ )
231
+
232
+ should_store_placeholder = (
233
+ is_synthetic_schema
234
+ or cycle_info.is_direct_self_reference
235
+ or is_direct_array_self_ref
236
+ or is_nested_property_self_ref
237
+ )
238
+
239
+ if should_store_placeholder:
240
+ context.parsed_schemas[schema_name] = placeholder
241
+ # Mark schema state appropriately
242
+ if context.allow_self_reference and cycle_info.is_direct_self_reference:
243
+ context.schema_states[schema_name] = SchemaState.PLACEHOLDER_SELF_REF
244
+ else:
245
+ context.schema_states[schema_name] = SchemaState.PLACEHOLDER_CYCLE
246
+
247
+ # Don't mark the original schema as a placeholder - just return the placeholder for this reference
248
+ return CycleDetectionResult(
249
+ True,
250
+ (
251
+ cycle_info.cycle_type
252
+ if not (context.allow_self_reference and cycle_info.is_direct_self_reference)
253
+ else CycleType.SELF_REFERENCE
254
+ ),
255
+ CycleAction.CREATE_PLACEHOLDER,
256
+ cycle_info=cycle_info,
257
+ placeholder_schema=placeholder,
258
+ )
259
+
260
+ # 5. No cycle detected - proceed with parsing
261
+ context.schema_states[schema_name] = SchemaState.IN_PROGRESS
262
+ return CycleDetectionResult(False, None, CycleAction.CONTINUE_PARSING)
263
+
264
+
265
+ def unified_enter_schema(schema_name: str | None, context: UnifiedCycleContext) -> CycleDetectionResult:
266
+ """Unified entry point that always maintains consistent state."""
267
+ context.recursion_depth += 1
268
+
269
+ result = unified_cycle_check(schema_name, context)
270
+
271
+ # Only add to stack if we're going to continue parsing
272
+ if result.action == CycleAction.CONTINUE_PARSING and schema_name:
273
+ context.schema_stack.append(schema_name)
274
+
275
+ return result
276
+
277
+
278
+ def unified_exit_schema(schema_name: str | None, context: UnifiedCycleContext) -> None:
279
+ """Unified exit that always maintains consistent state."""
280
+ if context.recursion_depth > 0:
281
+ context.recursion_depth -= 1
282
+
283
+ if schema_name and schema_name in context.schema_stack:
284
+ context.schema_stack.remove(schema_name)
285
+
286
+ # Mark as completed if it was in progress (but don't change placeholder states)
287
+ if schema_name and context.schema_states.get(schema_name) == SchemaState.IN_PROGRESS:
288
+ context.schema_states[schema_name] = SchemaState.COMPLETED
289
+
290
+
291
+ def get_schema_or_placeholder(schema_name: str, context: UnifiedCycleContext) -> IRSchema | None:
292
+ """Get an existing schema or placeholder from the context."""
293
+ return context.parsed_schemas.get(schema_name)
@@ -0,0 +1,260 @@
1
+ import subprocess # nosec B404 - Required for running code formatters (Black, Ruff) and mypy
2
+ import sys
3
+ from pathlib import Path
4
+ from typing import List, Union
5
+
6
+ SUCCESS_LINE = "Success: no issues found in 1 source file"
7
+
8
+
9
+ def _print_filtered_stdout(stdout: str) -> None:
10
+ lines = [line for line in stdout.splitlines() if line.strip() and line.strip() != SUCCESS_LINE]
11
+ if lines:
12
+ print("\n".join(lines))
13
+
14
+
15
+ class PostprocessManager:
16
+ """
17
+ Handles post-processing of generated Python files: import cleanup, formatting, and type checking.
18
+ Can be used programmatically or as a script.
19
+ """
20
+
21
+ def __init__(self, project_root: str):
22
+ self.project_root = project_root # Store project root
23
+ pass
24
+
25
+ def run(self, targets: List[Union[str, Path]]) -> None:
26
+ """
27
+ Run Ruff checks on individual files, then run Mypy on the package root.
28
+ """
29
+ if not targets:
30
+ return
31
+
32
+ # Ensure all targets are Path objects
33
+ target_paths = [Path(t) for t in targets]
34
+
35
+ # OPTIMISED: Run Ruff once on all files instead of per-file
36
+ # Collect all Python files
37
+ python_files = [p for p in target_paths if p.is_file() and p.suffix == ".py"]
38
+
39
+ if python_files:
40
+ # Run Ruff checks once on all files (much faster than per-file)
41
+ self.remove_unused_imports_bulk(python_files)
42
+ self.sort_imports_bulk(python_files)
43
+ self.format_code_bulk(python_files)
44
+
45
+ # Determine the package root directory(s) for Mypy
46
+ package_roots = set()
47
+ for target_path in target_paths:
48
+ if target_path.is_file():
49
+ # Find the first ancestor directory *without* __init__.py
50
+ # (or stop at workspace root)
51
+ current = target_path.parent
52
+ package_root = current
53
+ while current != Path(self.project_root) and (current / "__init__.py").exists():
54
+ package_root = current
55
+ current = current.parent
56
+ package_roots.add(package_root)
57
+ elif target_path.is_dir():
58
+ # If a directory is passed, assume it's a package root or contains packages
59
+ # For simplicity, let's assume it *is* the root to run mypy on
60
+ package_roots.add(target_path)
61
+
62
+ # Run Mypy on each identified package root
63
+ # TEMPORARILY DISABLED: Mypy is slow on large specs, disabled for faster iteration
64
+ # if package_roots:
65
+ # print(f"Running Mypy on package root(s): {package_roots}")
66
+ # for root_dir in package_roots:
67
+ # print(f"Running mypy on {root_dir}...")
68
+ # self.type_check(root_dir)
69
+
70
+ def remove_unused_imports_bulk(self, targets: List[Path]) -> None:
71
+ """Remove unused imports from multiple targets using Ruff (bulk operation)."""
72
+ if not targets:
73
+ return
74
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
75
+ [
76
+ sys.executable,
77
+ "-m",
78
+ "ruff",
79
+ "check",
80
+ "--select=F401",
81
+ "--fix",
82
+ ]
83
+ + [str(t) for t in targets],
84
+ stdout=subprocess.PIPE,
85
+ stderr=subprocess.PIPE,
86
+ text=True,
87
+ )
88
+ if result.returncode != 0 or result.stderr:
89
+ if result.stdout:
90
+ _print_filtered_stdout(result.stdout)
91
+ if result.stderr:
92
+ print(result.stderr, file=sys.stderr)
93
+
94
+ def sort_imports_bulk(self, targets: List[Path]) -> None:
95
+ """Sort imports in multiple targets using Ruff (bulk operation)."""
96
+ if not targets:
97
+ return
98
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
99
+ [
100
+ sys.executable,
101
+ "-m",
102
+ "ruff",
103
+ "check",
104
+ "--select=I",
105
+ "--fix",
106
+ ]
107
+ + [str(t) for t in targets],
108
+ stdout=subprocess.PIPE,
109
+ stderr=subprocess.PIPE,
110
+ text=True,
111
+ )
112
+ if result.returncode != 0 or result.stderr:
113
+ if result.stdout:
114
+ _print_filtered_stdout(result.stdout)
115
+ if result.stderr:
116
+ print(result.stderr, file=sys.stderr)
117
+
118
+ def format_code_bulk(self, targets: List[Path]) -> None:
119
+ """Format code in multiple targets using Ruff (bulk operation)."""
120
+ if not targets:
121
+ return
122
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
123
+ [
124
+ sys.executable,
125
+ "-m",
126
+ "ruff",
127
+ "format",
128
+ ]
129
+ + [str(t) for t in targets],
130
+ stdout=subprocess.PIPE,
131
+ stderr=subprocess.PIPE,
132
+ text=True,
133
+ )
134
+ if result.returncode != 0 or result.stderr:
135
+ if result.stdout:
136
+ _print_filtered_stdout(result.stdout)
137
+ if result.stderr:
138
+ print(result.stderr, file=sys.stderr)
139
+
140
+ def remove_unused_imports(self, target: Union[str, Path]) -> None:
141
+ """Remove unused imports from the target using Ruff."""
142
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
143
+ [
144
+ sys.executable,
145
+ "-m",
146
+ "ruff",
147
+ "check",
148
+ "--select=F401",
149
+ "--fix",
150
+ str(target),
151
+ ],
152
+ stdout=subprocess.PIPE,
153
+ stderr=subprocess.PIPE,
154
+ text=True,
155
+ )
156
+ if result.returncode != 0 or result.stderr:
157
+ if result.stdout:
158
+ _print_filtered_stdout(result.stdout)
159
+ if result.stderr:
160
+ print(result.stderr, file=sys.stderr)
161
+
162
+ def sort_imports(self, target: Union[str, Path]) -> None:
163
+ """Sort imports in the target using Ruff."""
164
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
165
+ [
166
+ sys.executable,
167
+ "-m",
168
+ "ruff",
169
+ "check",
170
+ "--select=I",
171
+ "--fix",
172
+ str(target),
173
+ ],
174
+ stdout=subprocess.PIPE,
175
+ stderr=subprocess.PIPE,
176
+ text=True,
177
+ )
178
+ if result.returncode != 0 or result.stderr:
179
+ if result.stdout:
180
+ _print_filtered_stdout(result.stdout)
181
+ if result.stderr:
182
+ print(result.stderr, file=sys.stderr)
183
+
184
+ def format_code(self, target: Union[str, Path]) -> None:
185
+ """Format code in the target using Ruff."""
186
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
187
+ [
188
+ sys.executable,
189
+ "-m",
190
+ "ruff",
191
+ "format",
192
+ str(target),
193
+ ],
194
+ stdout=subprocess.PIPE,
195
+ stderr=subprocess.PIPE,
196
+ text=True,
197
+ )
198
+ if result.returncode != 0 or result.stderr:
199
+ if result.stdout:
200
+ _print_filtered_stdout(result.stdout)
201
+ if result.stderr:
202
+ print(result.stderr, file=sys.stderr)
203
+ print(f"Formatting found and fixed issues in {target}.", file=sys.stderr)
204
+
205
+ def type_check(self, target_dir: Path) -> None:
206
+ """Type check the target directory using mypy."""
207
+ if not target_dir.is_dir():
208
+ print(f"Skipping Mypy on non-directory: {target_dir}", file=sys.stderr)
209
+ return
210
+
211
+ print(f"Running mypy on {target_dir}...")
212
+ # Find all Python files in the target directory
213
+ python_files = list(target_dir.rglob("*.py"))
214
+ if not python_files:
215
+ print(f"No Python files found in {target_dir}, skipping type check.")
216
+ return
217
+
218
+ # Try mypy with cache cleanup on failure
219
+ for attempt in range(2):
220
+ cmd = [sys.executable, "-m", "mypy", "--strict"]
221
+ if attempt == 1:
222
+ # Second attempt: clear cache
223
+ cmd.append("--cache-dir=/tmp/mypy_cache_temp")
224
+ cmd.extend([str(f) for f in python_files])
225
+
226
+ result = subprocess.run( # nosec B603 - Controlled subprocess with hardcoded command
227
+ cmd,
228
+ stdout=subprocess.PIPE,
229
+ stderr=subprocess.PIPE,
230
+ text=True,
231
+ )
232
+
233
+ # Check for specific mypy cache corruption errors
234
+ cache_error_patterns = ["KeyError: 'setter_type'", "KeyError:", "deserialize"]
235
+ is_cache_error = any(pattern in result.stderr for pattern in cache_error_patterns)
236
+
237
+ if result.returncode == 0:
238
+ # Success
239
+ return
240
+ elif attempt == 0 and is_cache_error:
241
+ # Retry with cache cleanup
242
+ print(f"Mypy cache error detected, retrying with fresh cache...", file=sys.stderr)
243
+ continue
244
+ else:
245
+ # Report the error
246
+ if result.stdout:
247
+ print(result.stdout)
248
+ if result.stderr:
249
+ print(result.stderr, file=sys.stderr)
250
+ print(f"Type checking failed for {target_dir}. Please fix the above issues.", file=sys.stderr)
251
+ sys.exit(result.returncode)
252
+
253
+
254
+ if __name__ == "__main__":
255
+ import argparse
256
+
257
+ parser = argparse.ArgumentParser(description="Postprocess generated Python files/directories.")
258
+ parser.add_argument("targets", nargs="+", help="Files or directories to postprocess.")
259
+ args = parser.parse_args()
260
+ PostprocessManager(args.project_root).run(args.targets)
@@ -0,0 +1,148 @@
1
+ """Centralised OpenAPI specification loading from file paths or URLs.
2
+
3
+ This module provides utilities for loading OpenAPI specifications from both
4
+ local file paths and HTTP(S) URLs. It handles content parsing (JSON/YAML)
5
+ and provides meaningful error messages for common failure scenarios.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ import httpx
15
+ import yaml
16
+
17
+ from pyopenapi_gen.generator.exceptions import GenerationError
18
+
19
+
20
+ def is_url(path_or_url: str) -> bool:
21
+ """Check if the input looks like an HTTP(S) URL.
22
+
23
+ Args:
24
+ path_or_url: String that may be a file path or URL.
25
+
26
+ Returns:
27
+ True if the string starts with http:// or https://, False otherwise.
28
+ """
29
+ return path_or_url.startswith(("http://", "https://"))
30
+
31
+
32
+ def fetch_spec(path_or_url: str, timeout: float = 30.0) -> dict[str, Any]:
33
+ """Load an OpenAPI specification from a file path or URL.
34
+
35
+ Supports both local file paths and HTTP(S) URLs. For URLs, the content
36
+ type is inferred from the Content-Type header or URL extension.
37
+
38
+ Args:
39
+ path_or_url: Path to a local file or HTTP(S) URL to fetch.
40
+ timeout: Timeout in seconds for HTTP requests (default: 30.0).
41
+
42
+ Returns:
43
+ Parsed OpenAPI specification as a dictionary.
44
+
45
+ Raises:
46
+ GenerationError: If loading or parsing fails.
47
+ """
48
+ if is_url(path_or_url):
49
+ return _fetch_from_url(path_or_url, timeout)
50
+ return _load_from_file(path_or_url)
51
+
52
+
53
+ def _fetch_from_url(url: str, timeout: float) -> dict[str, Any]:
54
+ """Fetch and parse an OpenAPI spec from a URL.
55
+
56
+ Args:
57
+ url: HTTP(S) URL to fetch.
58
+ timeout: Timeout in seconds for the request.
59
+
60
+ Returns:
61
+ Parsed specification dictionary.
62
+
63
+ Raises:
64
+ GenerationError: On network errors, HTTP errors, or parse failures.
65
+ """
66
+ try:
67
+ response = httpx.get(url, timeout=timeout, follow_redirects=True)
68
+ response.raise_for_status()
69
+ except httpx.TimeoutException:
70
+ raise GenerationError(f"Failed to fetch spec from URL: connection timed out after {timeout}s")
71
+ except httpx.HTTPStatusError as e:
72
+ raise GenerationError(f"Failed to fetch spec from URL: HTTP {e.response.status_code}")
73
+ except httpx.RequestError as e:
74
+ raise GenerationError(f"Failed to fetch spec from URL: {e}")
75
+
76
+ content = response.text
77
+ content_type = response.headers.get("content-type", "")
78
+
79
+ return _parse_content(content, content_type, url)
80
+
81
+
82
+ def _load_from_file(path: str) -> dict[str, Any]:
83
+ """Load and parse an OpenAPI spec from a local file.
84
+
85
+ Args:
86
+ path: Path to the local file.
87
+
88
+ Returns:
89
+ Parsed specification dictionary.
90
+
91
+ Raises:
92
+ GenerationError: If file doesn't exist, isn't a file, or parse fails.
93
+ """
94
+ file_path = Path(path)
95
+
96
+ if not file_path.exists():
97
+ raise GenerationError(f"Specification file not found at {path}")
98
+
99
+ if not file_path.is_file():
100
+ raise GenerationError(f"Specified path {path} is not a file.")
101
+
102
+ content = file_path.read_text()
103
+ extension = file_path.suffix.lower()
104
+
105
+ # Determine format from extension
106
+ if extension == ".json":
107
+ content_type = "application/json"
108
+ else:
109
+ content_type = "application/yaml"
110
+
111
+ return _parse_content(content, content_type, path)
112
+
113
+
114
+ def _parse_content(content: str, content_type: str, source: str) -> dict[str, Any]:
115
+ """Parse content as JSON or YAML.
116
+
117
+ Args:
118
+ content: Raw content string.
119
+ content_type: MIME type hint (may be empty).
120
+ source: Source path/URL for error messages.
121
+
122
+ Returns:
123
+ Parsed dictionary.
124
+
125
+ Raises:
126
+ GenerationError: If parsing fails or result is not a dictionary.
127
+ """
128
+ # Try JSON first if content type suggests it
129
+ if "json" in content_type.lower():
130
+ try:
131
+ data = json.loads(content)
132
+ except json.JSONDecodeError as e:
133
+ raise GenerationError(f"Failed to parse spec: invalid JSON content - {e}")
134
+ else:
135
+ # Try YAML (which also handles JSON)
136
+ try:
137
+ data = yaml.safe_load(content)
138
+ except yaml.YAMLError as e:
139
+ # Fallback to JSON in case content-type was misleading
140
+ try:
141
+ data = json.loads(content)
142
+ except json.JSONDecodeError:
143
+ raise GenerationError(f"Failed to parse spec: invalid YAML content - {e}")
144
+
145
+ if not isinstance(data, dict):
146
+ raise GenerationError(f"Loaded spec from {source} is not a dictionary (got {type(data).__name__}).")
147
+
148
+ return data