pyopenapi-gen 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyopenapi_gen/__init__.py +114 -0
- pyopenapi_gen/__main__.py +6 -0
- pyopenapi_gen/cli.py +86 -0
- pyopenapi_gen/context/file_manager.py +52 -0
- pyopenapi_gen/context/import_collector.py +382 -0
- pyopenapi_gen/context/render_context.py +630 -0
- pyopenapi_gen/core/__init__.py +0 -0
- pyopenapi_gen/core/auth/base.py +22 -0
- pyopenapi_gen/core/auth/plugins.py +89 -0
- pyopenapi_gen/core/exceptions.py +25 -0
- pyopenapi_gen/core/http_transport.py +219 -0
- pyopenapi_gen/core/loader/__init__.py +12 -0
- pyopenapi_gen/core/loader/loader.py +158 -0
- pyopenapi_gen/core/loader/operations/__init__.py +12 -0
- pyopenapi_gen/core/loader/operations/parser.py +155 -0
- pyopenapi_gen/core/loader/operations/post_processor.py +60 -0
- pyopenapi_gen/core/loader/operations/request_body.py +85 -0
- pyopenapi_gen/core/loader/parameters/__init__.py +10 -0
- pyopenapi_gen/core/loader/parameters/parser.py +121 -0
- pyopenapi_gen/core/loader/responses/__init__.py +10 -0
- pyopenapi_gen/core/loader/responses/parser.py +104 -0
- pyopenapi_gen/core/loader/schemas/__init__.py +11 -0
- pyopenapi_gen/core/loader/schemas/extractor.py +184 -0
- pyopenapi_gen/core/pagination.py +64 -0
- pyopenapi_gen/core/parsing/__init__.py +13 -0
- pyopenapi_gen/core/parsing/common/__init__.py +1 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/__init__.py +9 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/__init__.py +0 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/cyclic_properties.py +66 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/direct_cycle.py +33 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/existing_schema.py +22 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/list_response.py +54 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/missing_ref.py +52 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/new_schema.py +50 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/helpers/stripped_suffix.py +51 -0
- pyopenapi_gen/core/parsing/common/ref_resolution/resolve_schema_ref.py +86 -0
- pyopenapi_gen/core/parsing/common/type_parser.py +74 -0
- pyopenapi_gen/core/parsing/context.py +184 -0
- pyopenapi_gen/core/parsing/cycle_helpers.py +123 -0
- pyopenapi_gen/core/parsing/keywords/__init__.py +1 -0
- pyopenapi_gen/core/parsing/keywords/all_of_parser.py +77 -0
- pyopenapi_gen/core/parsing/keywords/any_of_parser.py +79 -0
- pyopenapi_gen/core/parsing/keywords/array_items_parser.py +69 -0
- pyopenapi_gen/core/parsing/keywords/one_of_parser.py +72 -0
- pyopenapi_gen/core/parsing/keywords/properties_parser.py +98 -0
- pyopenapi_gen/core/parsing/schema_finalizer.py +166 -0
- pyopenapi_gen/core/parsing/schema_parser.py +610 -0
- pyopenapi_gen/core/parsing/transformers/__init__.py +0 -0
- pyopenapi_gen/core/parsing/transformers/inline_enum_extractor.py +285 -0
- pyopenapi_gen/core/parsing/transformers/inline_object_promoter.py +117 -0
- pyopenapi_gen/core/parsing/unified_cycle_detection.py +293 -0
- pyopenapi_gen/core/postprocess_manager.py +161 -0
- pyopenapi_gen/core/schemas.py +40 -0
- pyopenapi_gen/core/streaming_helpers.py +86 -0
- pyopenapi_gen/core/telemetry.py +67 -0
- pyopenapi_gen/core/utils.py +409 -0
- pyopenapi_gen/core/warning_collector.py +83 -0
- pyopenapi_gen/core/writers/code_writer.py +135 -0
- pyopenapi_gen/core/writers/documentation_writer.py +222 -0
- pyopenapi_gen/core/writers/line_writer.py +217 -0
- pyopenapi_gen/core/writers/python_construct_renderer.py +274 -0
- pyopenapi_gen/core_package_template/README.md +21 -0
- pyopenapi_gen/emit/models_emitter.py +143 -0
- pyopenapi_gen/emitters/client_emitter.py +51 -0
- pyopenapi_gen/emitters/core_emitter.py +181 -0
- pyopenapi_gen/emitters/docs_emitter.py +44 -0
- pyopenapi_gen/emitters/endpoints_emitter.py +223 -0
- pyopenapi_gen/emitters/exceptions_emitter.py +52 -0
- pyopenapi_gen/emitters/models_emitter.py +428 -0
- pyopenapi_gen/generator/client_generator.py +562 -0
- pyopenapi_gen/helpers/__init__.py +1 -0
- pyopenapi_gen/helpers/endpoint_utils.py +552 -0
- pyopenapi_gen/helpers/type_cleaner.py +341 -0
- pyopenapi_gen/helpers/type_helper.py +112 -0
- pyopenapi_gen/helpers/type_resolution/__init__.py +1 -0
- pyopenapi_gen/helpers/type_resolution/array_resolver.py +57 -0
- pyopenapi_gen/helpers/type_resolution/composition_resolver.py +79 -0
- pyopenapi_gen/helpers/type_resolution/finalizer.py +89 -0
- pyopenapi_gen/helpers/type_resolution/named_resolver.py +174 -0
- pyopenapi_gen/helpers/type_resolution/object_resolver.py +212 -0
- pyopenapi_gen/helpers/type_resolution/primitive_resolver.py +57 -0
- pyopenapi_gen/helpers/type_resolution/resolver.py +48 -0
- pyopenapi_gen/helpers/url_utils.py +14 -0
- pyopenapi_gen/http_types.py +20 -0
- pyopenapi_gen/ir.py +167 -0
- pyopenapi_gen/py.typed +1 -0
- pyopenapi_gen/types/__init__.py +11 -0
- pyopenapi_gen/types/contracts/__init__.py +13 -0
- pyopenapi_gen/types/contracts/protocols.py +106 -0
- pyopenapi_gen/types/contracts/types.py +30 -0
- pyopenapi_gen/types/resolvers/__init__.py +7 -0
- pyopenapi_gen/types/resolvers/reference_resolver.py +71 -0
- pyopenapi_gen/types/resolvers/response_resolver.py +203 -0
- pyopenapi_gen/types/resolvers/schema_resolver.py +367 -0
- pyopenapi_gen/types/services/__init__.py +5 -0
- pyopenapi_gen/types/services/type_service.py +133 -0
- pyopenapi_gen/visit/client_visitor.py +228 -0
- pyopenapi_gen/visit/docs_visitor.py +38 -0
- pyopenapi_gen/visit/endpoint/__init__.py +1 -0
- pyopenapi_gen/visit/endpoint/endpoint_visitor.py +103 -0
- pyopenapi_gen/visit/endpoint/generators/__init__.py +1 -0
- pyopenapi_gen/visit/endpoint/generators/docstring_generator.py +121 -0
- pyopenapi_gen/visit/endpoint/generators/endpoint_method_generator.py +87 -0
- pyopenapi_gen/visit/endpoint/generators/request_generator.py +103 -0
- pyopenapi_gen/visit/endpoint/generators/response_handler_generator.py +497 -0
- pyopenapi_gen/visit/endpoint/generators/signature_generator.py +88 -0
- pyopenapi_gen/visit/endpoint/generators/url_args_generator.py +183 -0
- pyopenapi_gen/visit/endpoint/processors/__init__.py +1 -0
- pyopenapi_gen/visit/endpoint/processors/import_analyzer.py +76 -0
- pyopenapi_gen/visit/endpoint/processors/parameter_processor.py +171 -0
- pyopenapi_gen/visit/exception_visitor.py +52 -0
- pyopenapi_gen/visit/model/__init__.py +0 -0
- pyopenapi_gen/visit/model/alias_generator.py +89 -0
- pyopenapi_gen/visit/model/dataclass_generator.py +197 -0
- pyopenapi_gen/visit/model/enum_generator.py +200 -0
- pyopenapi_gen/visit/model/model_visitor.py +197 -0
- pyopenapi_gen/visit/visitor.py +97 -0
- pyopenapi_gen-0.8.3.dist-info/METADATA +224 -0
- pyopenapi_gen-0.8.3.dist-info/RECORD +122 -0
- pyopenapi_gen-0.8.3.dist-info/WHEEL +4 -0
- pyopenapi_gen-0.8.3.dist-info/entry_points.txt +2 -0
- pyopenapi_gen-0.8.3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,428 @@
|
|
1
|
+
import logging
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import Dict, List, Optional, Set
|
4
|
+
|
5
|
+
from pyopenapi_gen import IRSchema, IRSpec
|
6
|
+
from pyopenapi_gen.context.render_context import RenderContext
|
7
|
+
from pyopenapi_gen.core.loader.schemas.extractor import extract_inline_array_items, extract_inline_enums
|
8
|
+
from pyopenapi_gen.core.utils import NameSanitizer
|
9
|
+
from pyopenapi_gen.core.writers.code_writer import CodeWriter
|
10
|
+
from pyopenapi_gen.visit.model.model_visitor import ModelVisitor
|
11
|
+
|
12
|
+
# Removed OPENAPI_TO_PYTHON_TYPES, FORMAT_TYPE_MAPPING, and MODEL_TEMPLATE constants
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class ModelsEmitter:
|
18
|
+
"""
|
19
|
+
Orchestrates the generation of model files (dataclasses, enums, type aliases).
|
20
|
+
|
21
|
+
Uses a ModelVisitor to render code for each schema and writes it to a file.
|
22
|
+
Handles creation of __init__.py and py.typed files.
|
23
|
+
"""
|
24
|
+
|
25
|
+
def __init__(self, context: RenderContext, parsed_schemas: Dict[str, IRSchema]):
|
26
|
+
self.context: RenderContext = context
|
27
|
+
# Store a reference to the schemas that were passed in.
|
28
|
+
# These schemas will have their .generation_name and .final_module_stem updated.
|
29
|
+
self.parsed_schemas: Dict[str, IRSchema] = parsed_schemas
|
30
|
+
self.import_collector = self.context.import_collector
|
31
|
+
self.writer = CodeWriter()
|
32
|
+
|
33
|
+
def _generate_model_file(self, schema_ir: IRSchema, models_dir: Path) -> Optional[str]:
|
34
|
+
"""Generates a single Python file for a given IRSchema."""
|
35
|
+
if not schema_ir.name: # Original name, used for logging/initial identification
|
36
|
+
logger.warning(f"Skipping model generation for schema without an original name: {schema_ir}")
|
37
|
+
return None
|
38
|
+
|
39
|
+
# logger.debug(
|
40
|
+
# f"_generate_model_file processing schema: original_name='{schema_ir.name}', "
|
41
|
+
# f"generation_name='{schema_ir.generation_name}', final_module_stem='{schema_ir.final_module_stem}'"
|
42
|
+
# )
|
43
|
+
|
44
|
+
# Assert that de-collided names have been set by the emit() method's preprocessing.
|
45
|
+
assert (
|
46
|
+
schema_ir.generation_name is not None
|
47
|
+
), f"Schema '{schema_ir.name}' must have generation_name set before file generation."
|
48
|
+
assert (
|
49
|
+
schema_ir.final_module_stem is not None
|
50
|
+
), f"Schema '{schema_ir.name}' must have final_module_stem set before file generation."
|
51
|
+
|
52
|
+
file_path = models_dir / f"{schema_ir.final_module_stem}.py"
|
53
|
+
|
54
|
+
self.context.set_current_file(str(file_path))
|
55
|
+
|
56
|
+
# Add support for handling arrays properly by ensuring items schema is processed
|
57
|
+
# This part might need to ensure that items_schema also has its generation_name/final_module_stem set
|
58
|
+
# if it's being recursively generated here. The main emit loop should handle all schemas.
|
59
|
+
if schema_ir.type == "array" and schema_ir.items is not None:
|
60
|
+
items_schema = schema_ir.items
|
61
|
+
if items_schema.name and items_schema.type == "object" and items_schema.properties:
|
62
|
+
if (
|
63
|
+
items_schema.name in self.parsed_schemas and items_schema.final_module_stem
|
64
|
+
): # Check if it's a managed schema
|
65
|
+
items_file_path = models_dir / f"{items_schema.final_module_stem}.py"
|
66
|
+
if not items_file_path.exists():
|
67
|
+
# This recursive call might be problematic if items_schema wasn't fully preprocessed.
|
68
|
+
# The main emit loop is preferred for driving generation.
|
69
|
+
# For now, assuming items_schema has its names set if it's a distinct schema.
|
70
|
+
# logger.debug(f"Potentially generating item schema {items_schema.name} recursively.")
|
71
|
+
# self._generate_model_file(items_schema, models_dir) # Re-evaluating recursive call here.
|
72
|
+
# Better to rely on main loop processing all schemas.
|
73
|
+
pass
|
74
|
+
|
75
|
+
# ModelVisitor should use schema_ir.generation_name for the class name.
|
76
|
+
# We'll need to verify ModelVisitor's behavior.
|
77
|
+
# For now, ModelVisitor.visit uses schema.name as base_name_for_construct if not schema.generation_name.
|
78
|
+
# If schema.generation_name is set, it should be preferred. Let's assume ModelVisitor handles this,
|
79
|
+
# or we ensure schema.name is updated to schema.generation_name before visitor.
|
80
|
+
# The IRSchema.__post_init__ already sanitizes schema.name.
|
81
|
+
# The ModelVisitor's `visit_IRSchema` uses schema.name for `base_name_for_construct`.
|
82
|
+
# So, `schema.generation_name` should be used by the visitor.
|
83
|
+
# For now, the visitor logic uses schema.name. We must ensure that the `generation_name` (decollided)
|
84
|
+
# is what the visitor uses for the class definition.
|
85
|
+
# A temporary workaround could be:
|
86
|
+
# original_ir_name = schema_ir.name
|
87
|
+
# schema_ir.name = schema_ir.generation_name # Temporarily set for visitor
|
88
|
+
visitor = ModelVisitor(schemas=self.parsed_schemas) # Pass all schemas for reference
|
89
|
+
rendered_model_str = visitor.visit(schema_ir, self.context)
|
90
|
+
# schema_ir.name = original_ir_name # Restore if changed
|
91
|
+
|
92
|
+
imports_str = self.context.render_imports()
|
93
|
+
file_content = f"{imports_str}\n\n{rendered_model_str}"
|
94
|
+
|
95
|
+
try:
|
96
|
+
# Ensure parent directory exists with more defensive handling
|
97
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
98
|
+
|
99
|
+
# Verify the directory was actually created before writing
|
100
|
+
if not file_path.parent.exists():
|
101
|
+
logger.error(f"Failed to create directory {file_path.parent}")
|
102
|
+
return None
|
103
|
+
|
104
|
+
# Write with atomic operation to prevent partial writes
|
105
|
+
temp_file = file_path.with_suffix(".tmp")
|
106
|
+
temp_file.write_text(file_content, encoding="utf-8")
|
107
|
+
temp_file.rename(file_path)
|
108
|
+
|
109
|
+
# Verify the file was actually written
|
110
|
+
if not file_path.exists():
|
111
|
+
logger.error(f"File {file_path} was not created successfully")
|
112
|
+
return None
|
113
|
+
|
114
|
+
logger.debug(f"Successfully created model file: {file_path}")
|
115
|
+
return str(file_path)
|
116
|
+
except Exception as e:
|
117
|
+
logger.error(f"Error writing model file {file_path}: {e}")
|
118
|
+
import traceback
|
119
|
+
|
120
|
+
logger.error(f"Traceback: {traceback.format_exc()}")
|
121
|
+
return None
|
122
|
+
|
123
|
+
def _generate_init_py_content(self) -> str: # Removed generated_files_paths, models_dir args
|
124
|
+
"""Generates the content for models/__init__.py."""
|
125
|
+
init_writer = CodeWriter()
|
126
|
+
init_writer.write_line("from typing import List")
|
127
|
+
init_writer.write_line("")
|
128
|
+
|
129
|
+
all_class_names_to_export: Set[str] = set()
|
130
|
+
|
131
|
+
# Iterate over the schemas that were processed for name generation
|
132
|
+
# to ensure we use the final, de-collided names.
|
133
|
+
# Sort by original schema name for deterministic __init__.py content.
|
134
|
+
sorted_schemas_for_init = sorted(
|
135
|
+
[s for s in self.parsed_schemas.values() if s.name and s.generation_name and s.final_module_stem],
|
136
|
+
key=lambda s: s.name, # type: ignore
|
137
|
+
)
|
138
|
+
|
139
|
+
for s_schema in sorted_schemas_for_init:
|
140
|
+
# These should have been set in the emit() preprocessing step.
|
141
|
+
assert (
|
142
|
+
s_schema.generation_name is not None
|
143
|
+
), f"Schema '{s_schema.name}' missing generation_name in __init__ generation."
|
144
|
+
assert (
|
145
|
+
s_schema.final_module_stem is not None
|
146
|
+
), f"Schema '{s_schema.name}' missing final_module_stem in __init__ generation."
|
147
|
+
|
148
|
+
if s_schema._from_unresolved_ref: # Check this flag if it's relevant
|
149
|
+
# logger.debug(
|
150
|
+
# f"Skipping schema '{s_schema.generation_name}' in __init__ as it's an unresolved reference."
|
151
|
+
# )
|
152
|
+
continue
|
153
|
+
|
154
|
+
class_name_to_import = s_schema.generation_name
|
155
|
+
module_name_to_import_from = s_schema.final_module_stem
|
156
|
+
|
157
|
+
if module_name_to_import_from == "__init__":
|
158
|
+
logger.warning(
|
159
|
+
f"Skipping import for schema class '{class_name_to_import}' as its module name became __init__."
|
160
|
+
)
|
161
|
+
continue
|
162
|
+
|
163
|
+
init_writer.write_line(f"from .{module_name_to_import_from} import {class_name_to_import}")
|
164
|
+
all_class_names_to_export.add(class_name_to_import)
|
165
|
+
|
166
|
+
init_writer.write_line("")
|
167
|
+
init_writer.write_line("__all__: List[str] = [")
|
168
|
+
for name_to_export in sorted(list(all_class_names_to_export)):
|
169
|
+
init_writer.write_line(f" '{name_to_export}',")
|
170
|
+
init_writer.write_line("]")
|
171
|
+
|
172
|
+
generated_content = init_writer.get_code()
|
173
|
+
return generated_content
|
174
|
+
|
175
|
+
def emit(self, spec: IRSpec, output_root: str) -> Dict[str, List[str]]:
|
176
|
+
"""Emits all model files derived from IR schemas.
|
177
|
+
|
178
|
+
Contracts:
|
179
|
+
Preconditions:
|
180
|
+
- spec is a valid IRSpec
|
181
|
+
- output_root is a valid directory path
|
182
|
+
Postconditions:
|
183
|
+
- All schema models are emitted to {output_root}/models/
|
184
|
+
- All models are properly formatted and type-annotated
|
185
|
+
- Returns a list of file paths generated
|
186
|
+
"""
|
187
|
+
assert isinstance(spec, IRSpec), "spec must be an IRSpec"
|
188
|
+
assert output_root, "output_root must be a non-empty string"
|
189
|
+
|
190
|
+
output_dir = Path(output_root.rstrip("/"))
|
191
|
+
models_dir = output_dir / "models"
|
192
|
+
models_dir.mkdir(parents=True, exist_ok=True)
|
193
|
+
|
194
|
+
init_path = models_dir / "__init__.py"
|
195
|
+
# Initial __init__.py content, will be overwritten later with actual imports.
|
196
|
+
if not init_path.exists():
|
197
|
+
init_path.write_text('"""Models generated from the OpenAPI specification."""\n')
|
198
|
+
|
199
|
+
# 1. Extract all inline schemas first.
|
200
|
+
# self.parsed_schemas initially comes from the spec.
|
201
|
+
# extract_inline_array_items might add new named schemas to the collection it returns.
|
202
|
+
# These new schemas are instances created by promoting anonymous ones.
|
203
|
+
# It's important that these extractors operate on and return a comprehensive
|
204
|
+
# dictionary of *all* schemas that should potentially be generated.
|
205
|
+
# The `self.parsed_schemas` (which is `spec.schemas` passed in constructor)
|
206
|
+
# should be updated or replaced by the result of these extractions if they modify
|
207
|
+
# or add to the set of schemas to be processed.
|
208
|
+
|
209
|
+
# Let's assume extractors return a new dict containing original and newly promoted schemas.
|
210
|
+
# The `parsed_schemas` in `RenderContext` also needs to be aware of all schemas for type resolution.
|
211
|
+
# The ModelsEmitter was initialized with `parsed_schemas=ir.schemas`.
|
212
|
+
# If extractors modify these in place (e.g., add new IRSchema to ir.schemas), then all good.
|
213
|
+
# If they return a *new* dict, then self.parsed_schemas needs to be updated.
|
214
|
+
# Current `extract_inline_array_items` and `extract_inline_enums` take `parsed_schemas` (a Dict)
|
215
|
+
# and return a new Dict.
|
216
|
+
|
217
|
+
# So, the source of truth for schemas to generate becomes the result of these extractions.
|
218
|
+
schemas_after_item_extraction = extract_inline_array_items(self.parsed_schemas)
|
219
|
+
all_schemas_for_generation = extract_inline_enums(schemas_after_item_extraction)
|
220
|
+
|
221
|
+
# Update self.parsed_schemas to this complete list, as this is what subsequent
|
222
|
+
# operations (like _generate_init_py_content) will iterate over.
|
223
|
+
# Also, RenderContext needs the most up-to-date list of all schemas.
|
224
|
+
self.parsed_schemas = all_schemas_for_generation
|
225
|
+
self.context.parsed_schemas = all_schemas_for_generation # Correctly update the attribute
|
226
|
+
|
227
|
+
# --- Name de-collision pre-processing ---
|
228
|
+
# This step ensures each schema that will generate a file
|
229
|
+
# has a unique class name (generation_name) and a unique module stem (final_module_stem).
|
230
|
+
# This should run on ALL schemas that are candidates for file generation.
|
231
|
+
|
232
|
+
assigned_class_names: Set[str] = set()
|
233
|
+
assigned_module_stems: Set[str] = set()
|
234
|
+
|
235
|
+
# Iterate over the values of all_schemas_for_generation
|
236
|
+
# Sort by original name for deterministic suffixing if collisions occur.
|
237
|
+
# Filter for schemas that actually have a name, as unnamed schemas don't get their own files.
|
238
|
+
# A schema created by extraction (e.g. PetListItemsItem) will have a .name.
|
239
|
+
|
240
|
+
# logger.debug(
|
241
|
+
# f"ModelsEmitter: Schemas considered for naming/de-collision (pre-filter): "
|
242
|
+
# f"{ {k: v.name for k, v in all_schemas_for_generation.items()} }"
|
243
|
+
# )
|
244
|
+
|
245
|
+
# Filter out only the most basic primitive schemas to reduce clutter
|
246
|
+
# Be conservative to avoid breaking existing functionality
|
247
|
+
def should_generate_file(schema: IRSchema) -> bool:
|
248
|
+
"""Determine if a schema should get its own generated file."""
|
249
|
+
if not schema.name or not schema.name.strip():
|
250
|
+
return False
|
251
|
+
|
252
|
+
# Only filter out the most basic primitive type aliases with very common names
|
253
|
+
# that are clearly just artifacts of the parsing process
|
254
|
+
is_basic_primitive_artifact = (
|
255
|
+
schema.type in ["string", "integer", "number", "boolean"]
|
256
|
+
and not schema.enum
|
257
|
+
and not schema.properties
|
258
|
+
and not schema.any_of
|
259
|
+
and not schema.one_of
|
260
|
+
and not schema.all_of
|
261
|
+
and not schema.description
|
262
|
+
and
|
263
|
+
# Only filter very common property names that are likely artifacts
|
264
|
+
schema.name.lower() in ["id", "name", "text", "content", "value", "type", "status"]
|
265
|
+
and
|
266
|
+
# And only if the schema name ends with underscore (indicating sanitization)
|
267
|
+
schema.name.endswith("_")
|
268
|
+
)
|
269
|
+
|
270
|
+
if is_basic_primitive_artifact:
|
271
|
+
return False
|
272
|
+
|
273
|
+
return True
|
274
|
+
|
275
|
+
# Filter the main schemas dict to only include schemas that should generate files
|
276
|
+
filtered_schemas_for_generation = {
|
277
|
+
k: v for k, v in all_schemas_for_generation.items() if should_generate_file(v)
|
278
|
+
}
|
279
|
+
|
280
|
+
# Update the main reference to use filtered schemas
|
281
|
+
all_schemas_for_generation = filtered_schemas_for_generation
|
282
|
+
|
283
|
+
schemas_to_name_decollision = sorted(
|
284
|
+
[s for s in all_schemas_for_generation.values()],
|
285
|
+
key=lambda s: s.name, # type: ignore
|
286
|
+
)
|
287
|
+
|
288
|
+
# logger.debug(
|
289
|
+
# f"ModelsEmitter: Schemas to actually de-collide (post-filter by s.name): "
|
290
|
+
# f"{[s.name for s in schemas_to_name_decollision]}"
|
291
|
+
# )
|
292
|
+
|
293
|
+
for schema_for_naming in schemas_to_name_decollision: # Use the comprehensive list
|
294
|
+
original_schema_name = schema_for_naming.name
|
295
|
+
if not original_schema_name:
|
296
|
+
continue # Should be filtered
|
297
|
+
|
298
|
+
# 1. Determine unique class name (schema_for_naming.generation_name)
|
299
|
+
base_class_name = NameSanitizer.sanitize_class_name(original_schema_name)
|
300
|
+
final_class_name = base_class_name
|
301
|
+
class_suffix = 1
|
302
|
+
while final_class_name in assigned_class_names:
|
303
|
+
class_suffix += 1
|
304
|
+
# Handle reserved names that already have trailing underscores
|
305
|
+
# Instead of "Email_2", we want "Email2"
|
306
|
+
if base_class_name.endswith("_"):
|
307
|
+
# Remove trailing underscore and append number
|
308
|
+
final_class_name = f"{base_class_name[:-1]}{class_suffix}"
|
309
|
+
else:
|
310
|
+
final_class_name = f"{base_class_name}{class_suffix}"
|
311
|
+
assigned_class_names.add(final_class_name)
|
312
|
+
schema_for_naming.generation_name = final_class_name
|
313
|
+
# logger.debug(f"Resolved class name for original '{original_schema_name}': '{final_class_name}'")
|
314
|
+
|
315
|
+
# 2. Determine unique module stem (schema_for_naming.final_module_stem)
|
316
|
+
base_module_stem = NameSanitizer.sanitize_module_name(original_schema_name)
|
317
|
+
final_module_stem = base_module_stem
|
318
|
+
module_suffix = 1
|
319
|
+
|
320
|
+
if final_module_stem in assigned_module_stems:
|
321
|
+
module_suffix = 2
|
322
|
+
final_module_stem = f"{base_module_stem}_{module_suffix}"
|
323
|
+
while final_module_stem in assigned_module_stems:
|
324
|
+
module_suffix += 1
|
325
|
+
final_module_stem = f"{base_module_stem}_{module_suffix}"
|
326
|
+
|
327
|
+
assigned_module_stems.add(final_module_stem)
|
328
|
+
schema_for_naming.final_module_stem = final_module_stem
|
329
|
+
# logger.debug(
|
330
|
+
# f"Resolved module stem for original '{original_schema_name}' "
|
331
|
+
# f"(class '{final_class_name}'): '{final_module_stem}'"
|
332
|
+
# )
|
333
|
+
# --- End of Name de-collision ---
|
334
|
+
|
335
|
+
generated_files = []
|
336
|
+
# Iterate using the keys from `all_schemas_for_generation` as it's the definitive list.
|
337
|
+
all_schema_keys_to_emit = list(all_schemas_for_generation.keys())
|
338
|
+
processed_schema_original_keys: set[str] = set()
|
339
|
+
|
340
|
+
max_processing_rounds = len(all_schema_keys_to_emit) + 5
|
341
|
+
rounds = 0
|
342
|
+
|
343
|
+
while len(processed_schema_original_keys) < len(all_schema_keys_to_emit) and rounds < max_processing_rounds:
|
344
|
+
rounds += 1
|
345
|
+
something_processed_this_round = False
|
346
|
+
# logger.debug(
|
347
|
+
# f"ModelsEmitter: Starting processing round {rounds}. "
|
348
|
+
# f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}"
|
349
|
+
# )
|
350
|
+
|
351
|
+
for schema_key in all_schema_keys_to_emit:
|
352
|
+
if schema_key in processed_schema_original_keys:
|
353
|
+
continue
|
354
|
+
|
355
|
+
# Fetch the schema_ir object using the key from all_schemas_for_generation
|
356
|
+
# This ensures we are working with the potentially newly created & named schemas.
|
357
|
+
current_schema_ir_obj: Optional[IRSchema] = all_schemas_for_generation.get(schema_key)
|
358
|
+
|
359
|
+
if not current_schema_ir_obj:
|
360
|
+
logger.warning(f"Schema key '{schema_key}' from all_schemas_for_generation not found. Skipping.")
|
361
|
+
processed_schema_original_keys.add(schema_key)
|
362
|
+
something_processed_this_round = True
|
363
|
+
continue
|
364
|
+
|
365
|
+
schema_ir: IRSchema = current_schema_ir_obj
|
366
|
+
|
367
|
+
if not schema_ir.name:
|
368
|
+
# logger.debug(
|
369
|
+
# f"Skipping file generation for unnamed schema (original key '{schema_key}'). IR: {schema_ir}"
|
370
|
+
# )
|
371
|
+
processed_schema_original_keys.add(schema_key)
|
372
|
+
something_processed_this_round = True
|
373
|
+
continue
|
374
|
+
|
375
|
+
if not schema_ir.generation_name or not schema_ir.final_module_stem:
|
376
|
+
logger.error(
|
377
|
+
f"Schema '{schema_ir.name}' (original key '{schema_key}') is missing de-collided names. "
|
378
|
+
f"GenName: {schema_ir.generation_name}, "
|
379
|
+
f"ModStem: {schema_ir.final_module_stem}. Skipping file gen. IR: {schema_ir}"
|
380
|
+
)
|
381
|
+
processed_schema_original_keys.add(schema_key)
|
382
|
+
something_processed_this_round = True
|
383
|
+
continue
|
384
|
+
|
385
|
+
file_path_str = self._generate_model_file(schema_ir, models_dir)
|
386
|
+
|
387
|
+
if file_path_str is not None:
|
388
|
+
generated_files.append(file_path_str)
|
389
|
+
processed_schema_original_keys.add(schema_key)
|
390
|
+
something_processed_this_round = True
|
391
|
+
# If file_path_str is None, it means an error occurred,
|
392
|
+
# but we still mark as processed to avoid infinite loop.
|
393
|
+
elif schema_ir.name: # Only mark as processed if it was a schema we attempted to generate
|
394
|
+
processed_schema_original_keys.add(schema_key)
|
395
|
+
something_processed_this_round = True # Also count this as processed for loop termination
|
396
|
+
|
397
|
+
if not something_processed_this_round and len(processed_schema_original_keys) < len(
|
398
|
+
all_schema_keys_to_emit
|
399
|
+
):
|
400
|
+
logger.warning(
|
401
|
+
f"ModelsEmitter: No schemas processed in round {rounds}, but not all schemas are done. "
|
402
|
+
f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}. "
|
403
|
+
f"Remaining: {set(all_schema_keys_to_emit) - processed_schema_original_keys}. "
|
404
|
+
f"Breaking to avoid infinite loop."
|
405
|
+
)
|
406
|
+
# Process any remaining ones that were not touched, to ensure they are marked as "processed"
|
407
|
+
for schema_key_rem in set(all_schema_keys_to_emit) - processed_schema_original_keys:
|
408
|
+
s_rem = all_schemas_for_generation.get(schema_key_rem)
|
409
|
+
logger.error(
|
410
|
+
f"Force marking remaining schema "
|
411
|
+
f"'{s_rem.name if s_rem else schema_key_rem}' as processed due to loop break."
|
412
|
+
)
|
413
|
+
processed_schema_original_keys.add(schema_key_rem)
|
414
|
+
break
|
415
|
+
|
416
|
+
if rounds >= max_processing_rounds:
|
417
|
+
logger.error(
|
418
|
+
f"ModelsEmitter: Exceeded max processing rounds ({max_processing_rounds}). "
|
419
|
+
f"Processed: {len(processed_schema_original_keys)}/{len(all_schema_keys_to_emit)}. "
|
420
|
+
f"Remaining: {set(all_schema_keys_to_emit) - processed_schema_original_keys}."
|
421
|
+
)
|
422
|
+
|
423
|
+
init_content = self._generate_init_py_content()
|
424
|
+
init_path.write_text(init_content, encoding="utf-8")
|
425
|
+
# py.typed file to indicate type information is available
|
426
|
+
(models_dir / "py.typed").write_text("") # Ensure empty content, encoding defaults to utf-8
|
427
|
+
|
428
|
+
return {"models": generated_files}
|