pyopenapi-gen 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. pyopenapi_gen/__init__.py +224 -0
  2. pyopenapi_gen/__main__.py +6 -0
  3. pyopenapi_gen/cli.py +62 -0
  4. pyopenapi_gen/context/CLAUDE.md +284 -0
  5. pyopenapi_gen/context/file_manager.py +52 -0
  6. pyopenapi_gen/context/import_collector.py +382 -0
  7. pyopenapi_gen/context/render_context.py +726 -0
  8. pyopenapi_gen/core/CLAUDE.md +224 -0
  9. pyopenapi_gen/core/__init__.py +0 -0
  10. pyopenapi_gen/core/auth/base.py +22 -0
  11. pyopenapi_gen/core/auth/plugins.py +89 -0
  12. pyopenapi_gen/core/cattrs_converter.py +810 -0
  13. pyopenapi_gen/core/exceptions.py +20 -0
  14. pyopenapi_gen/core/http_status_codes.py +218 -0
  15. pyopenapi_gen/core/http_transport.py +222 -0
  16. pyopenapi_gen/core/loader/__init__.py +12 -0
  17. pyopenapi_gen/core/loader/loader.py +174 -0
  18. pyopenapi_gen/core/loader/operations/__init__.py +12 -0
  19. pyopenapi_gen/core/loader/operations/parser.py +161 -0
  20. pyopenapi_gen/core/loader/operations/post_processor.py +62 -0
  21. pyopenapi_gen/core/loader/operations/request_body.py +90 -0
  22. pyopenapi_gen/core/loader/parameters/__init__.py +10 -0
  23. pyopenapi_gen/core/loader/parameters/parser.py +186 -0
  24. pyopenapi_gen/core/loader/responses/__init__.py +10 -0
  25. pyopenapi_gen/core/loader/responses/parser.py +111 -0
  26. pyopenapi_gen/core/loader/schemas/__init__.py +11 -0
  27. pyopenapi_gen/core/loader/schemas/extractor.py +275 -0
  28. pyopenapi_gen/core/pagination.py +64 -0
  29. pyopenapi_gen/core/parsing/__init__.py +13 -0
  30. pyopenapi_gen/core/parsing/common/__init__.py +1 -0
  31. pyopenapi_gen/core/parsing/common/ref_resolution/__init__.py +9 -0
  32. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/__init__.py +0 -0
  33. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/cyclic_properties.py +66 -0
  34. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/direct_cycle.py +33 -0
  35. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/existing_schema.py +22 -0
  36. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/list_response.py +54 -0
  37. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/missing_ref.py +52 -0
  38. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/new_schema.py +50 -0
  39. pyopenapi_gen/core/parsing/common/ref_resolution/helpers/stripped_suffix.py +51 -0
  40. pyopenapi_gen/core/parsing/common/ref_resolution/resolve_schema_ref.py +86 -0
  41. pyopenapi_gen/core/parsing/common/type_parser.py +73 -0
  42. pyopenapi_gen/core/parsing/context.py +187 -0
  43. pyopenapi_gen/core/parsing/cycle_helpers.py +126 -0
  44. pyopenapi_gen/core/parsing/keywords/__init__.py +1 -0
  45. pyopenapi_gen/core/parsing/keywords/all_of_parser.py +81 -0
  46. pyopenapi_gen/core/parsing/keywords/any_of_parser.py +84 -0
  47. pyopenapi_gen/core/parsing/keywords/array_items_parser.py +72 -0
  48. pyopenapi_gen/core/parsing/keywords/one_of_parser.py +77 -0
  49. pyopenapi_gen/core/parsing/keywords/properties_parser.py +98 -0
  50. pyopenapi_gen/core/parsing/schema_finalizer.py +169 -0
  51. pyopenapi_gen/core/parsing/schema_parser.py +804 -0
  52. pyopenapi_gen/core/parsing/transformers/__init__.py +0 -0
  53. pyopenapi_gen/core/parsing/transformers/inline_enum_extractor.py +285 -0
  54. pyopenapi_gen/core/parsing/transformers/inline_object_promoter.py +120 -0
  55. pyopenapi_gen/core/parsing/unified_cycle_detection.py +293 -0
  56. pyopenapi_gen/core/postprocess_manager.py +260 -0
  57. pyopenapi_gen/core/spec_fetcher.py +148 -0
  58. pyopenapi_gen/core/streaming_helpers.py +84 -0
  59. pyopenapi_gen/core/telemetry.py +69 -0
  60. pyopenapi_gen/core/utils.py +456 -0
  61. pyopenapi_gen/core/warning_collector.py +83 -0
  62. pyopenapi_gen/core/writers/code_writer.py +135 -0
  63. pyopenapi_gen/core/writers/documentation_writer.py +222 -0
  64. pyopenapi_gen/core/writers/line_writer.py +217 -0
  65. pyopenapi_gen/core/writers/python_construct_renderer.py +321 -0
  66. pyopenapi_gen/core_package_template/README.md +21 -0
  67. pyopenapi_gen/emit/models_emitter.py +143 -0
  68. pyopenapi_gen/emitters/CLAUDE.md +286 -0
  69. pyopenapi_gen/emitters/client_emitter.py +51 -0
  70. pyopenapi_gen/emitters/core_emitter.py +181 -0
  71. pyopenapi_gen/emitters/docs_emitter.py +44 -0
  72. pyopenapi_gen/emitters/endpoints_emitter.py +247 -0
  73. pyopenapi_gen/emitters/exceptions_emitter.py +187 -0
  74. pyopenapi_gen/emitters/mocks_emitter.py +185 -0
  75. pyopenapi_gen/emitters/models_emitter.py +426 -0
  76. pyopenapi_gen/generator/CLAUDE.md +352 -0
  77. pyopenapi_gen/generator/client_generator.py +567 -0
  78. pyopenapi_gen/generator/exceptions.py +7 -0
  79. pyopenapi_gen/helpers/CLAUDE.md +325 -0
  80. pyopenapi_gen/helpers/__init__.py +1 -0
  81. pyopenapi_gen/helpers/endpoint_utils.py +532 -0
  82. pyopenapi_gen/helpers/type_cleaner.py +334 -0
  83. pyopenapi_gen/helpers/type_helper.py +112 -0
  84. pyopenapi_gen/helpers/type_resolution/__init__.py +1 -0
  85. pyopenapi_gen/helpers/type_resolution/array_resolver.py +57 -0
  86. pyopenapi_gen/helpers/type_resolution/composition_resolver.py +79 -0
  87. pyopenapi_gen/helpers/type_resolution/finalizer.py +105 -0
  88. pyopenapi_gen/helpers/type_resolution/named_resolver.py +172 -0
  89. pyopenapi_gen/helpers/type_resolution/object_resolver.py +216 -0
  90. pyopenapi_gen/helpers/type_resolution/primitive_resolver.py +109 -0
  91. pyopenapi_gen/helpers/type_resolution/resolver.py +47 -0
  92. pyopenapi_gen/helpers/url_utils.py +14 -0
  93. pyopenapi_gen/http_types.py +20 -0
  94. pyopenapi_gen/ir.py +165 -0
  95. pyopenapi_gen/py.typed +1 -0
  96. pyopenapi_gen/types/CLAUDE.md +140 -0
  97. pyopenapi_gen/types/__init__.py +11 -0
  98. pyopenapi_gen/types/contracts/__init__.py +13 -0
  99. pyopenapi_gen/types/contracts/protocols.py +106 -0
  100. pyopenapi_gen/types/contracts/types.py +28 -0
  101. pyopenapi_gen/types/resolvers/__init__.py +7 -0
  102. pyopenapi_gen/types/resolvers/reference_resolver.py +71 -0
  103. pyopenapi_gen/types/resolvers/response_resolver.py +177 -0
  104. pyopenapi_gen/types/resolvers/schema_resolver.py +498 -0
  105. pyopenapi_gen/types/services/__init__.py +5 -0
  106. pyopenapi_gen/types/services/type_service.py +165 -0
  107. pyopenapi_gen/types/strategies/__init__.py +5 -0
  108. pyopenapi_gen/types/strategies/response_strategy.py +310 -0
  109. pyopenapi_gen/visit/CLAUDE.md +272 -0
  110. pyopenapi_gen/visit/client_visitor.py +477 -0
  111. pyopenapi_gen/visit/docs_visitor.py +38 -0
  112. pyopenapi_gen/visit/endpoint/__init__.py +1 -0
  113. pyopenapi_gen/visit/endpoint/endpoint_visitor.py +292 -0
  114. pyopenapi_gen/visit/endpoint/generators/__init__.py +1 -0
  115. pyopenapi_gen/visit/endpoint/generators/docstring_generator.py +123 -0
  116. pyopenapi_gen/visit/endpoint/generators/endpoint_method_generator.py +222 -0
  117. pyopenapi_gen/visit/endpoint/generators/mock_generator.py +140 -0
  118. pyopenapi_gen/visit/endpoint/generators/overload_generator.py +252 -0
  119. pyopenapi_gen/visit/endpoint/generators/request_generator.py +103 -0
  120. pyopenapi_gen/visit/endpoint/generators/response_handler_generator.py +705 -0
  121. pyopenapi_gen/visit/endpoint/generators/signature_generator.py +83 -0
  122. pyopenapi_gen/visit/endpoint/generators/url_args_generator.py +207 -0
  123. pyopenapi_gen/visit/endpoint/processors/__init__.py +1 -0
  124. pyopenapi_gen/visit/endpoint/processors/import_analyzer.py +78 -0
  125. pyopenapi_gen/visit/endpoint/processors/parameter_processor.py +171 -0
  126. pyopenapi_gen/visit/exception_visitor.py +90 -0
  127. pyopenapi_gen/visit/model/__init__.py +0 -0
  128. pyopenapi_gen/visit/model/alias_generator.py +93 -0
  129. pyopenapi_gen/visit/model/dataclass_generator.py +553 -0
  130. pyopenapi_gen/visit/model/enum_generator.py +212 -0
  131. pyopenapi_gen/visit/model/model_visitor.py +198 -0
  132. pyopenapi_gen/visit/visitor.py +97 -0
  133. pyopenapi_gen-2.7.2.dist-info/METADATA +1169 -0
  134. pyopenapi_gen-2.7.2.dist-info/RECORD +137 -0
  135. pyopenapi_gen-2.7.2.dist-info/WHEEL +4 -0
  136. pyopenapi_gen-2.7.2.dist-info/entry_points.txt +2 -0
  137. pyopenapi_gen-2.7.2.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,426 @@
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import List, Set
4
+
5
+ from pyopenapi_gen import IRSchema, IRSpec
6
+ from pyopenapi_gen.context.render_context import RenderContext
7
+ from pyopenapi_gen.core.loader.schemas.extractor import extract_inline_array_items, extract_inline_enums
8
+ from pyopenapi_gen.core.utils import NameSanitizer
9
+ from pyopenapi_gen.core.writers.code_writer import CodeWriter
10
+ from pyopenapi_gen.visit.model.model_visitor import ModelVisitor
11
+
12
+ # Removed OPENAPI_TO_PYTHON_TYPES, FORMAT_TYPE_MAPPING, and MODEL_TEMPLATE constants
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class ModelsEmitter:
18
+ """
19
+ Orchestrates the generation of model files (dataclasses, enums, type aliases).
20
+
21
+ Uses a ModelVisitor to render code for each schema and writes it to a file.
22
+ Handles creation of __init__.py and py.typed files.
23
+ """
24
+
25
+ def __init__(self, context: RenderContext, parsed_schemas: dict[str, IRSchema]):
26
+ self.context: RenderContext = context
27
+ # Store a reference to the schemas that were passed in.
28
+ # These schemas will have their .generation_name and .final_module_stem updated.
29
+ self.parsed_schemas: dict[str, IRSchema] = parsed_schemas
30
+ self.import_collector = self.context.import_collector
31
+ self.writer = CodeWriter()
32
+
33
+ def _generate_model_file(self, schema_ir: IRSchema, models_dir: Path) -> str | None:
34
+ """Generates a single Python file for a given IRSchema."""
35
+ if not schema_ir.name: # Original name, used for logging/initial identification
36
+ logger.warning(f"Skipping model generation for schema without an original name: {schema_ir}")
37
+ return None
38
+
39
+ # logger.debug(
40
+ # f"_generate_model_file processing schema: original_name='{schema_ir.name}', "
41
+ # f"generation_name='{schema_ir.generation_name}', final_module_stem='{schema_ir.final_module_stem}'"
42
+ # )
43
+
44
+ # Assert that de-collided names have been set by the emit() method's preprocessing.
45
+ if schema_ir.generation_name is None:
46
+ raise RuntimeError(f"Schema '{schema_ir.name}' must have generation_name set before file generation.")
47
+ if schema_ir.final_module_stem is None:
48
+ raise RuntimeError(f"Schema '{schema_ir.name}' must have final_module_stem set before file generation.")
49
+
50
+ file_path = models_dir / f"{schema_ir.final_module_stem}.py"
51
+
52
+ self.context.set_current_file(str(file_path))
53
+
54
+ # Add support for handling arrays properly by ensuring items schema is processed
55
+ # This part might need to ensure that items_schema also has its generation_name/final_module_stem set
56
+ # if it's being recursively generated here. The main emit loop should handle all schemas.
57
+ if schema_ir.type == "array" and schema_ir.items is not None:
58
+ items_schema = schema_ir.items
59
+ if items_schema.name and items_schema.type == "object" and items_schema.properties:
60
+ if (
61
+ items_schema.name in self.parsed_schemas and items_schema.final_module_stem
62
+ ): # Check if it's a managed schema
63
+ items_file_path = models_dir / f"{items_schema.final_module_stem}.py"
64
+ if not items_file_path.exists():
65
+ # This recursive call might be problematic if items_schema wasn't fully preprocessed.
66
+ # The main emit loop is preferred for driving generation.
67
+ # For now, assuming items_schema has its names set if it's a distinct schema.
68
+ # logger.debug(f"Potentially generating item schema {items_schema.name} recursively.")
69
+ # self._generate_model_file(items_schema, models_dir) # Re-evaluating recursive call here.
70
+ # Better to rely on main loop processing all schemas.
71
+ pass
72
+
73
+ # ModelVisitor should use schema_ir.generation_name for the class name.
74
+ # We'll need to verify ModelVisitor's behavior.
75
+ # For now, ModelVisitor.visit uses schema.name as base_name_for_construct if not schema.generation_name.
76
+ # If schema.generation_name is set, it should be preferred. Let's assume ModelVisitor handles this,
77
+ # or we ensure schema.name is updated to schema.generation_name before visitor.
78
+ # The IRSchema.__post_init__ already sanitizes schema.name.
79
+ # The ModelVisitor's `visit_IRSchema` uses schema.name for `base_name_for_construct`.
80
+ # So, `schema.generation_name` should be used by the visitor.
81
+ # For now, the visitor logic uses schema.name. We must ensure that the `generation_name` (decollided)
82
+ # is what the visitor uses for the class definition.
83
+ # A temporary workaround could be:
84
+ # original_ir_name = schema_ir.name
85
+ # schema_ir.name = schema_ir.generation_name # Temporarily set for visitor
86
+ visitor = ModelVisitor(schemas=self.parsed_schemas) # Pass all schemas for reference
87
+ rendered_model_str = visitor.visit(schema_ir, self.context)
88
+ # schema_ir.name = original_ir_name # Restore if changed
89
+
90
+ imports_str = self.context.render_imports()
91
+ file_content = f"{imports_str}\n\n{rendered_model_str}"
92
+
93
+ try:
94
+ # Ensure parent directory exists with more defensive handling
95
+ file_path.parent.mkdir(parents=True, exist_ok=True)
96
+
97
+ # Verify the directory was actually created before writing
98
+ if not file_path.parent.exists():
99
+ logger.error(f"Failed to create directory {file_path.parent}")
100
+ return None
101
+
102
+ # Write with atomic operation to prevent partial writes
103
+ temp_file = file_path.with_suffix(".tmp")
104
+ temp_file.write_text(file_content, encoding="utf-8")
105
+ temp_file.rename(file_path)
106
+
107
+ # Verify the file was actually written
108
+ if not file_path.exists():
109
+ logger.error(f"File {file_path} was not created successfully")
110
+ return None
111
+
112
+ logger.debug(f"Successfully created model file: {file_path}")
113
+ return str(file_path)
114
+ except Exception as e:
115
+ logger.error(f"Error writing model file {file_path}: {e}")
116
+ import traceback
117
+
118
+ logger.error(f"Traceback: {traceback.format_exc()}")
119
+ return None
120
+
121
+ def _generate_init_py_content(self) -> str: # Removed generated_files_paths, models_dir args
122
+ """Generates the content for models/__init__.py."""
123
+ init_writer = CodeWriter()
124
+ init_writer.write_line("from typing import List")
125
+ init_writer.write_line("")
126
+
127
+ all_class_names_to_export: Set[str] = set()
128
+
129
+ # Iterate over the schemas that were processed for name generation
130
+ # to ensure we use the final, de-collided names.
131
+ # Sort by original schema name for deterministic __init__.py content.
132
+ sorted_schemas_for_init = sorted(
133
+ [s for s in self.parsed_schemas.values() if s.name and s.generation_name and s.final_module_stem],
134
+ key=lambda s: s.name, # type: ignore
135
+ )
136
+
137
+ for s_schema in sorted_schemas_for_init:
138
+ # These should have been set in the emit() preprocessing step.
139
+ if s_schema.generation_name is None:
140
+ raise RuntimeError(f"Schema '{s_schema.name}' missing generation_name in __init__ generation.")
141
+ if s_schema.final_module_stem is None:
142
+ raise RuntimeError(f"Schema '{s_schema.name}' missing final_module_stem in __init__ generation.")
143
+
144
+ if s_schema._from_unresolved_ref: # Check this flag if it's relevant
145
+ # logger.debug(
146
+ # f"Skipping schema '{s_schema.generation_name}' in __init__ as it's an unresolved reference."
147
+ # )
148
+ continue
149
+
150
+ class_name_to_import = s_schema.generation_name
151
+ module_name_to_import_from = s_schema.final_module_stem
152
+
153
+ if module_name_to_import_from == "__init__":
154
+ logger.warning(
155
+ f"Skipping import for schema class '{class_name_to_import}' as its module name became __init__."
156
+ )
157
+ continue
158
+
159
+ init_writer.write_line(f"from .{module_name_to_import_from} import {class_name_to_import}")
160
+ all_class_names_to_export.add(class_name_to_import)
161
+
162
+ init_writer.write_line("")
163
+ init_writer.write_line("__all__: List[str] = [")
164
+ for name_to_export in sorted(list(all_class_names_to_export)):
165
+ init_writer.write_line(f" '{name_to_export}',")
166
+ init_writer.write_line("]")
167
+
168
+ generated_content = init_writer.get_code()
169
+ return generated_content
170
+
171
+ def emit(self, spec: IRSpec, output_root: str) -> dict[str, List[str]]:
172
+ """Emits all model files derived from IR schemas.
173
+
174
+ Contracts:
175
+ Preconditions:
176
+ - spec is a valid IRSpec
177
+ - output_root is a valid directory path
178
+ Postconditions:
179
+ - All schema models are emitted to {output_root}/models/
180
+ - All models are properly formatted and type-annotated
181
+ - Returns a list of file paths generated
182
+ """
183
+ if not isinstance(spec, IRSpec):
184
+ raise TypeError("spec must be an IRSpec")
185
+ if not output_root:
186
+ raise ValueError("output_root must be a non-empty string")
187
+
188
+ output_dir = Path(output_root.rstrip("/"))
189
+ models_dir = output_dir / "models"
190
+ models_dir.mkdir(parents=True, exist_ok=True)
191
+
192
+ init_path = models_dir / "__init__.py"
193
+ # Initial __init__.py content, will be overwritten later with actual imports.
194
+ if not init_path.exists():
195
+ init_path.write_text('"""Models generated from the OpenAPI specification."""\n')
196
+
197
+ # 1. Extract all inline schemas first.
198
+ # self.parsed_schemas initially comes from the spec.
199
+ # extract_inline_array_items might add new named schemas to the collection it returns.
200
+ # These new schemas are instances created by promoting anonymous ones.
201
+ # It's important that these extractors operate on and return a comprehensive
202
+ # dictionary of *all* schemas that should potentially be generated.
203
+ # The `self.parsed_schemas` (which is `spec.schemas` passed in constructor)
204
+ # should be updated or replaced by the result of these extractions if they modify
205
+ # or add to the set of schemas to be processed.
206
+
207
+ # Let's assume extractors return a new dict containing original and newly promoted schemas.
208
+ # The `parsed_schemas` in `RenderContext` also needs to be aware of all schemas for type resolution.
209
+ # The ModelsEmitter was initialized with `parsed_schemas=ir.schemas`.
210
+ # If extractors modify these in place (e.g., add new IRSchema to ir.schemas), then all good.
211
+ # If they return a *new* dict, then self.parsed_schemas needs to be updated.
212
+ # Current `extract_inline_array_items` and `extract_inline_enums` take `parsed_schemas` (a Dict)
213
+ # and return a new Dict.
214
+
215
+ # So, the source of truth for schemas to generate becomes the result of these extractions.
216
+ schemas_after_item_extraction = extract_inline_array_items(self.parsed_schemas)
217
+ all_schemas_for_generation = extract_inline_enums(schemas_after_item_extraction)
218
+
219
+ # Update self.parsed_schemas to this complete list, as this is what subsequent
220
+ # operations (like _generate_init_py_content) will iterate over.
221
+ # Also, RenderContext needs the most up-to-date list of all schemas.
222
+ self.parsed_schemas = all_schemas_for_generation
223
+ self.context.parsed_schemas = all_schemas_for_generation # Correctly update the attribute
224
+
225
+ # --- Name de-collision pre-processing ---
226
+ # This step ensures each schema that will generate a file
227
+ # has a unique class name (generation_name) and a unique module stem (final_module_stem).
228
+ # This should run on ALL schemas that are candidates for file generation.
229
+
230
+ assigned_class_names: Set[str] = set()
231
+ assigned_module_stems: Set[str] = set()
232
+
233
+ # Iterate over the values of all_schemas_for_generation
234
+ # Sort by original name for deterministic suffixing if collisions occur.
235
+ # Filter for schemas that actually have a name, as unnamed schemas don't get their own files.
236
+ # A schema created by extraction (e.g. PetListItemsItem) will have a .name.
237
+
238
+ # logger.debug(
239
+ # f"ModelsEmitter: Schemas considered for naming/de-collision (pre-filter): "
240
+ # f"{ {k: v.name for k, v in all_schemas_for_generation.items()} }"
241
+ # )
242
+
243
+ # Filter out only the most basic primitive schemas to reduce clutter
244
+ # Be conservative to avoid breaking existing functionality
245
+ def should_generate_file(schema: IRSchema) -> bool:
246
+ """Determine if a schema should get its own generated file."""
247
+ if not schema.name or not schema.name.strip():
248
+ return False
249
+
250
+ # Only filter out the most basic primitive type aliases with very common names
251
+ # that are clearly just artifacts of the parsing process
252
+ is_basic_primitive_artifact = (
253
+ schema.type in ["string", "integer", "number", "boolean"]
254
+ and not schema.enum
255
+ and not schema.properties
256
+ and not schema.any_of
257
+ and not schema.one_of
258
+ and not schema.all_of
259
+ and not schema.description
260
+ and
261
+ # Only filter very common property names that are likely artifacts
262
+ schema.name.lower() in ["id", "name", "text", "content", "value", "type", "status"]
263
+ and
264
+ # And only if the schema name ends with underscore (indicating sanitization)
265
+ schema.name.endswith("_")
266
+ )
267
+
268
+ if is_basic_primitive_artifact:
269
+ return False
270
+
271
+ return True
272
+
273
+ # Filter the main schemas dict to only include schemas that should generate files
274
+ filtered_schemas_for_generation = {
275
+ k: v for k, v in all_schemas_for_generation.items() if should_generate_file(v)
276
+ }
277
+
278
+ # Update the main reference to use filtered schemas
279
+ all_schemas_for_generation = filtered_schemas_for_generation
280
+
281
+ schemas_to_name_decollision = sorted(
282
+ [s for s in all_schemas_for_generation.values()],
283
+ key=lambda s: s.name, # type: ignore
284
+ )
285
+
286
+ # logger.debug(
287
+ # f"ModelsEmitter: Schemas to actually de-collide (post-filter by s.name): "
288
+ # f"{[s.name for s in schemas_to_name_decollision]}"
289
+ # )
290
+
291
+ for schema_for_naming in schemas_to_name_decollision: # Use the comprehensive list
292
+ original_schema_name = schema_for_naming.name
293
+ if not original_schema_name:
294
+ continue # Should be filtered
295
+
296
+ # 1. Determine unique class name (schema_for_naming.generation_name)
297
+ base_class_name = NameSanitizer.sanitize_class_name(original_schema_name)
298
+ final_class_name = base_class_name
299
+ class_suffix = 1
300
+ while final_class_name in assigned_class_names:
301
+ class_suffix += 1
302
+ # Handle reserved names that already have trailing underscores
303
+ # Instead of "Email_2", we want "Email2"
304
+ if base_class_name.endswith("_"):
305
+ # Remove trailing underscore and append number
306
+ final_class_name = f"{base_class_name[:-1]}{class_suffix}"
307
+ else:
308
+ final_class_name = f"{base_class_name}{class_suffix}"
309
+ assigned_class_names.add(final_class_name)
310
+ schema_for_naming.generation_name = final_class_name
311
+ # logger.debug(f"Resolved class name for original '{original_schema_name}': '{final_class_name}'")
312
+
313
+ # 2. Determine unique module stem (schema_for_naming.final_module_stem)
314
+ base_module_stem = NameSanitizer.sanitize_module_name(original_schema_name)
315
+ final_module_stem = base_module_stem
316
+ module_suffix = 1
317
+
318
+ if final_module_stem in assigned_module_stems:
319
+ module_suffix = 2
320
+ final_module_stem = f"{base_module_stem}_{module_suffix}"
321
+ while final_module_stem in assigned_module_stems:
322
+ module_suffix += 1
323
+ final_module_stem = f"{base_module_stem}_{module_suffix}"
324
+
325
+ assigned_module_stems.add(final_module_stem)
326
+ schema_for_naming.final_module_stem = final_module_stem
327
+ # logger.debug(
328
+ # f"Resolved module stem for original '{original_schema_name}' "
329
+ # f"(class '{final_class_name}'): '{final_module_stem}'"
330
+ # )
331
+ # --- End of Name de-collision ---
332
+
333
+ generated_files = []
334
+ # Iterate using the keys from `all_schemas_for_generation` as it's the definitive list.
335
+ all_schema_keys_to_emit = list(all_schemas_for_generation.keys())
336
+ processed_schema_original_keys: set[str] = set()
337
+
338
+ max_processing_rounds = len(all_schema_keys_to_emit) + 5
339
+ rounds = 0
340
+
341
+ while len(processed_schema_original_keys) < len(all_schema_keys_to_emit) and rounds < max_processing_rounds:
342
+ rounds += 1
343
+ something_processed_this_round = False
344
+ # logger.debug(
345
+ # f"ModelsEmitter: Starting processing round {rounds}. "
346
+ # f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}"
347
+ # )
348
+
349
+ for schema_key in all_schema_keys_to_emit:
350
+ if schema_key in processed_schema_original_keys:
351
+ continue
352
+
353
+ # Fetch the schema_ir object using the key from all_schemas_for_generation
354
+ # This ensures we are working with the potentially newly created & named schemas.
355
+ current_schema_ir_obj: IRSchema | None = all_schemas_for_generation.get(schema_key)
356
+
357
+ if not current_schema_ir_obj:
358
+ logger.warning(f"Schema key '{schema_key}' from all_schemas_for_generation not found. Skipping.")
359
+ processed_schema_original_keys.add(schema_key)
360
+ something_processed_this_round = True
361
+ continue
362
+
363
+ schema_ir: IRSchema = current_schema_ir_obj
364
+
365
+ if not schema_ir.name:
366
+ # logger.debug(
367
+ # f"Skipping file generation for unnamed schema (original key '{schema_key}'). IR: {schema_ir}"
368
+ # )
369
+ processed_schema_original_keys.add(schema_key)
370
+ something_processed_this_round = True
371
+ continue
372
+
373
+ if not schema_ir.generation_name or not schema_ir.final_module_stem:
374
+ logger.error(
375
+ f"Schema '{schema_ir.name}' (original key '{schema_key}') is missing de-collided names. "
376
+ f"GenName: {schema_ir.generation_name}, "
377
+ f"ModStem: {schema_ir.final_module_stem}. Skipping file gen. IR: {schema_ir}"
378
+ )
379
+ processed_schema_original_keys.add(schema_key)
380
+ something_processed_this_round = True
381
+ continue
382
+
383
+ file_path_str = self._generate_model_file(schema_ir, models_dir)
384
+
385
+ if file_path_str is not None:
386
+ generated_files.append(file_path_str)
387
+ processed_schema_original_keys.add(schema_key)
388
+ something_processed_this_round = True
389
+ # If file_path_str is None, it means an error occurred,
390
+ # but we still mark as processed to avoid infinite loop.
391
+ elif schema_ir.name: # Only mark as processed if it was a schema we attempted to generate
392
+ processed_schema_original_keys.add(schema_key)
393
+ something_processed_this_round = True # Also count this as processed for loop termination
394
+
395
+ if not something_processed_this_round and len(processed_schema_original_keys) < len(
396
+ all_schema_keys_to_emit
397
+ ):
398
+ logger.warning(
399
+ f"ModelsEmitter: No schemas processed in round {rounds}, but not all schemas are done. "
400
+ f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}. "
401
+ f"Remaining: {set(all_schema_keys_to_emit) - processed_schema_original_keys}. "
402
+ f"Breaking to avoid infinite loop."
403
+ )
404
+ # Process any remaining ones that were not touched, to ensure they are marked as "processed"
405
+ for schema_key_rem in set(all_schema_keys_to_emit) - processed_schema_original_keys:
406
+ s_rem = all_schemas_for_generation.get(schema_key_rem)
407
+ logger.error(
408
+ f"Force marking remaining schema "
409
+ f"'{s_rem.name if s_rem else schema_key_rem}' as processed due to loop break."
410
+ )
411
+ processed_schema_original_keys.add(schema_key_rem)
412
+ break
413
+
414
+ if rounds >= max_processing_rounds:
415
+ logger.error(
416
+ f"ModelsEmitter: Exceeded max processing rounds ({max_processing_rounds}). "
417
+ f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}. "
418
+ f"Remaining: {set(all_schema_keys_to_emit) - processed_schema_original_keys}."
419
+ )
420
+
421
+ init_content = self._generate_init_py_content()
422
+ init_path.write_text(init_content, encoding="utf-8")
423
+ # py.typed file to indicate type information is available
424
+ (models_dir / "py.typed").write_text("") # Ensure empty content, encoding defaults to utf-8
425
+
426
+ return {"models": generated_files}