pyopenapi-gen 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. pyopenapi_gen/cli.py +5 -22
  2. pyopenapi_gen/context/import_collector.py +8 -8
  3. pyopenapi_gen/core/loader/operations/parser.py +1 -1
  4. pyopenapi_gen/core/parsing/context.py +2 -1
  5. pyopenapi_gen/core/parsing/cycle_helpers.py +1 -1
  6. pyopenapi_gen/core/parsing/keywords/properties_parser.py +4 -4
  7. pyopenapi_gen/core/parsing/schema_parser.py +4 -4
  8. pyopenapi_gen/core/parsing/transformers/inline_enum_extractor.py +1 -1
  9. pyopenapi_gen/core/postprocess_manager.py +39 -13
  10. pyopenapi_gen/core/schemas.py +101 -16
  11. pyopenapi_gen/core/writers/python_construct_renderer.py +57 -9
  12. pyopenapi_gen/emitters/endpoints_emitter.py +1 -1
  13. pyopenapi_gen/helpers/endpoint_utils.py +4 -22
  14. pyopenapi_gen/helpers/type_cleaner.py +1 -1
  15. pyopenapi_gen/helpers/type_resolution/composition_resolver.py +1 -1
  16. pyopenapi_gen/helpers/type_resolution/finalizer.py +1 -1
  17. pyopenapi_gen/types/contracts/types.py +0 -1
  18. pyopenapi_gen/types/resolvers/response_resolver.py +5 -33
  19. pyopenapi_gen/types/resolvers/schema_resolver.py +2 -2
  20. pyopenapi_gen/types/services/type_service.py +0 -18
  21. pyopenapi_gen/types/strategies/__init__.py +5 -0
  22. pyopenapi_gen/types/strategies/response_strategy.py +187 -0
  23. pyopenapi_gen/visit/endpoint/endpoint_visitor.py +1 -20
  24. pyopenapi_gen/visit/endpoint/generators/docstring_generator.py +5 -3
  25. pyopenapi_gen/visit/endpoint/generators/endpoint_method_generator.py +12 -6
  26. pyopenapi_gen/visit/endpoint/generators/response_handler_generator.py +352 -343
  27. pyopenapi_gen/visit/endpoint/generators/signature_generator.py +7 -4
  28. pyopenapi_gen/visit/endpoint/processors/import_analyzer.py +4 -2
  29. pyopenapi_gen/visit/endpoint/processors/parameter_processor.py +1 -1
  30. pyopenapi_gen/visit/model/dataclass_generator.py +32 -1
  31. pyopenapi_gen-0.8.5.dist-info/METADATA +383 -0
  32. {pyopenapi_gen-0.8.3.dist-info → pyopenapi_gen-0.8.5.dist-info}/RECORD +35 -33
  33. pyopenapi_gen-0.8.3.dist-info/METADATA +0 -224
  34. {pyopenapi_gen-0.8.3.dist-info → pyopenapi_gen-0.8.5.dist-info}/WHEEL +0 -0
  35. {pyopenapi_gen-0.8.3.dist-info → pyopenapi_gen-0.8.5.dist-info}/entry_points.txt +0 -0
  36. {pyopenapi_gen-0.8.3.dist-info → pyopenapi_gen-0.8.5.dist-info}/licenses/LICENSE +0 -0
pyopenapi_gen/cli.py CHANGED
@@ -6,26 +6,6 @@ import yaml
6
6
 
7
7
  from .generator.client_generator import ClientGenerator, GenerationError
8
8
 
9
- app = typer.Typer(invoke_without_command=True)
10
-
11
-
12
- @app.callback()
13
- def main(ctx: typer.Context) -> None:
14
- """
15
- PyOpenAPI Generator CLI.
16
- """
17
- if ctx.invoked_subcommand is None:
18
- # Show basic help without using ctx.get_help() to avoid Click compatibility issues
19
- typer.echo("PyOpenAPI Generator CLI")
20
- typer.echo("")
21
- typer.echo("Usage: pyopenapi-gen [OPTIONS] COMMAND [ARGS]...")
22
- typer.echo("")
23
- typer.echo("Commands:")
24
- typer.echo(" gen Generate a Python OpenAPI client from a spec file or URL")
25
- typer.echo("")
26
- typer.echo("Run 'pyopenapi-gen gen --help' for more information on the gen command.")
27
- raise typer.Exit(code=0)
28
-
29
9
 
30
10
  def _load_spec(path_or_url: str) -> Union[Dict[str, Any], Any]:
31
11
  """Load a spec from a file path or URL."""
@@ -35,8 +15,7 @@ def _load_spec(path_or_url: str) -> Union[Dict[str, Any], Any]:
35
15
  raise typer.Exit(code=1)
36
16
 
37
17
 
38
- @app.command()
39
- def gen(
18
+ def main(
40
19
  spec: str = typer.Argument(..., help="Path or URL to OpenAPI spec"),
41
20
  project_root: Path = typer.Option(
42
21
  ...,
@@ -82,5 +61,9 @@ def gen(
82
61
  raise typer.Exit(code=1)
83
62
 
84
63
 
64
+ app = typer.Typer(help="PyOpenAPI Generator CLI - Generate Python clients from OpenAPI specs.")
65
+ app.command()(main)
66
+
67
+
85
68
  if __name__ == "__main__":
86
69
  app()
@@ -274,10 +274,10 @@ class ImportCollector:
274
274
  is_core_module_to_be_absolute = True
275
275
 
276
276
  if is_core_module_to_be_absolute:
277
- import_statement = f"from {module_name} import {', '.join(names)}"
277
+ import_statement = f"from {module_name} import {", ".join(names)}"
278
278
  standard_import_lines.append(import_statement)
279
279
  elif is_stdlib_module:
280
- import_statement = f"from {module_name} import {', '.join(names)}"
280
+ import_statement = f"from {module_name} import {", ".join(names)}"
281
281
  standard_import_lines.append(import_statement)
282
282
  elif (
283
283
  current_module_dot_path_to_use
@@ -286,13 +286,13 @@ class ImportCollector:
286
286
  ):
287
287
  try:
288
288
  relative_module = make_relative_import(current_module_dot_path_to_use, module_name)
289
- import_statement = f"from {relative_module} import {', '.join(names)}"
289
+ import_statement = f"from {relative_module} import {", ".join(names)}"
290
290
  standard_import_lines.append(import_statement)
291
291
  except ValueError as e:
292
- import_statement = f"from {module_name} import {', '.join(names)}"
292
+ import_statement = f"from {module_name} import {", ".join(names)}"
293
293
  standard_import_lines.append(import_statement)
294
294
  else:
295
- import_statement = f"from {module_name} import {', '.join(names)}"
295
+ import_statement = f"from {module_name} import {", ".join(names)}"
296
296
  standard_import_lines.append(import_statement)
297
297
 
298
298
  plain_import_lines: List[str] = []
@@ -331,7 +331,7 @@ class ImportCollector:
331
331
 
332
332
  for module in stdlib_modules:
333
333
  names = sorted(self.imports[module])
334
- statements.append(f"from {module} import {', '.join(names)}")
334
+ statements.append(f"from {module} import {", ".join(names)}")
335
335
 
336
336
  # Then third-party and app imports
337
337
  other_modules = sorted([m for m in self.imports.keys() if not _is_stdlib(m)])
@@ -341,7 +341,7 @@ class ImportCollector:
341
341
 
342
342
  for module in other_modules:
343
343
  names = sorted(self.imports[module])
344
- statements.append(f"from {module} import {', '.join(names)}")
344
+ statements.append(f"from {module} import {", ".join(names)}")
345
345
 
346
346
  # Then plain imports
347
347
  if self.plain_imports:
@@ -357,7 +357,7 @@ class ImportCollector:
357
357
 
358
358
  for module in sorted(self.relative_imports.keys()):
359
359
  names = sorted(self.relative_imports[module])
360
- statements.append(f"from {module} import {', '.join(names)}")
360
+ statements.append(f"from {module} import {", ".join(names)}")
361
361
 
362
362
  return "\n".join(statements)
363
363
 
@@ -120,7 +120,7 @@ def parse_operations(
120
120
  # Handle direct schema references in responses
121
121
  # Convert schema reference to a response with content
122
122
  resp_node_resolved = {
123
- "description": f"Response with {rn_node['$ref'].split('/')[-1]} schema",
123
+ "description": f"Response with {rn_node["$ref"].split("/")[-1]} schema",
124
124
  "content": {"application/json": {"schema": {"$ref": rn_node["$ref"]}}},
125
125
  }
126
126
  else:
@@ -47,7 +47,8 @@ class ParsingContext:
47
47
  max_depth = int(os.environ.get("PYOPENAPI_MAX_DEPTH", 150))
48
48
 
49
49
  self.unified_cycle_context = UnifiedCycleContext(
50
- parsed_schemas=self.parsed_schemas, max_depth=max_depth # Share the same parsed_schemas dict
50
+ parsed_schemas=self.parsed_schemas,
51
+ max_depth=max_depth, # Share the same parsed_schemas dict
51
52
  )
52
53
 
53
54
  def unified_enter_schema(self, schema_name: Optional[str]) -> Any:
@@ -92,7 +92,7 @@ def _handle_max_depth_exceeded(original_name: Optional[str], context: ParsingCon
92
92
 
93
93
  # path_prefix = schema_ir_name_attr if schema_ir_name_attr else "<anonymous_schema>"
94
94
  # cycle_path_for_desc = f"{path_prefix} -> MAX_DEPTH_EXCEEDED"
95
- description = f"[Maximum recursion depth ({max_depth}) exceeded for '{original_name or 'anonymous'}']"
95
+ description = f"[Maximum recursion depth ({max_depth}) exceeded for '{original_name or "anonymous"}']"
96
96
  logger.warning(description)
97
97
 
98
98
  placeholder_schema = IRSchema(
@@ -81,15 +81,15 @@ def _parse_properties(
81
81
  if promoted_ir is not None:
82
82
  properties_map[prop_key] = promoted_ir
83
83
  logger.debug(
84
- f"Added promoted '{prop_key}' (name: {getattr(promoted_ir, 'name', 'N/A')}) "
84
+ f"Added promoted '{prop_key}' (name: {getattr(promoted_ir, "name", "N/A")}) "
85
85
  f"to properties_map for '{parent_schema_name}'"
86
86
  )
87
87
  else:
88
88
  properties_map[prop_key] = prop_schema_ir
89
89
  logger.debug(
90
- f"Added original '{prop_key}' (name: {getattr(prop_schema_ir, 'name', 'N/A')}, "
91
- f"type: {getattr(prop_schema_ir, 'type', 'N/A')}, "
92
- f"circular: {getattr(prop_schema_ir, '_is_circular_ref', 'N/A')}) "
90
+ f"Added original '{prop_key}' (name: {getattr(prop_schema_ir, "name", "N/A")}, "
91
+ f"type: {getattr(prop_schema_ir, "type", "N/A")}, "
92
+ f"circular: {getattr(prop_schema_ir, "_is_circular_ref", "N/A")}) "
93
93
  f"to properties_map for '{parent_schema_name}'"
94
94
  )
95
95
 
@@ -42,7 +42,7 @@ def _resolve_ref(
42
42
  if not (ref_name_parts and ref_name_parts[-1]):
43
43
  logger.warning(
44
44
  f"Malformed $ref path '{ref_path_str}' encountered while parsing "
45
- f"parent '{parent_schema_name or 'anonymous'}'."
45
+ f"parent '{parent_schema_name or "anonymous"}'."
46
46
  )
47
47
  return IRSchema(
48
48
  name=None, # Anonymous placeholder for a bad ref
@@ -60,7 +60,7 @@ def _resolve_ref(
60
60
  ref_node = context.raw_spec_schemas.get(ref_name)
61
61
  if ref_node is None:
62
62
  logger.warning(
63
- f"Cannot resolve $ref '{ref_path_str}' for parent '{parent_schema_name or 'anonymous'}'. "
63
+ f"Cannot resolve $ref '{ref_path_str}' for parent '{parent_schema_name or "anonymous"}'. "
64
64
  f"Target '{ref_name}' not in raw_spec_schemas. Returning placeholder."
65
65
  )
66
66
  return IRSchema(
@@ -142,7 +142,7 @@ def _parse_properties(
142
142
  for prop_name, prop_schema_node in properties_node.items():
143
143
  if not isinstance(prop_name, str) or not prop_name:
144
144
  logger.warning(
145
- f"Skipping property with invalid name '{prop_name}' in schema '{parent_schema_name or 'anonymous'}'."
145
+ f"Skipping property with invalid name '{prop_name}' in schema '{parent_schema_name or "anonymous"}'."
146
146
  )
147
147
  continue
148
148
 
@@ -379,7 +379,7 @@ def _parse_schema(
379
379
 
380
380
  assert isinstance(
381
381
  schema_node, Mapping
382
- ), f"Schema node for '{schema_name or 'anonymous'}' must be a Mapping (e.g., dict), got {type(schema_node)}"
382
+ ), f"Schema node for '{schema_name or "anonymous"}' must be a Mapping (e.g., dict), got {type(schema_node)}"
383
383
 
384
384
  # If the current schema_node itself is a $ref, resolve it.
385
385
  if "$ref" in schema_node:
@@ -179,7 +179,7 @@ def _process_standalone_inline_enum(
179
179
 
180
180
  logger.debug(
181
181
  f"STANDALONE_ENUM_CHECK: Processing node for "
182
- f"'{schema_name or schema_obj.name or 'anonymous_schema'}' for direct enum properties."
182
+ f"'{schema_name or schema_obj.name or "anonymous_schema"}' for direct enum properties."
183
183
  )
184
184
 
185
185
  # Ensure basic enum properties are on schema_obj if not already there from initial _parse_schema pass
@@ -34,7 +34,7 @@ class PostprocessManager:
34
34
 
35
35
  # --- RE-ENABLE RUFF CHECKS ---
36
36
  for target_path in target_paths:
37
- if target_path.is_file():
37
+ if target_path.is_file() and target_path.suffix == ".py":
38
38
  self.remove_unused_imports(target_path)
39
39
  self.sort_imports(target_path)
40
40
  self.format_code(target_path)
@@ -136,18 +136,44 @@ class PostprocessManager:
136
136
  return
137
137
 
138
138
  print(f"Running mypy on {target_dir}...")
139
- result = subprocess.run(
140
- [sys.executable, "-m", "mypy", str(target_dir), "--strict"],
141
- stdout=subprocess.PIPE,
142
- stderr=subprocess.PIPE,
143
- text=True,
144
- )
145
- if result.stdout or result.stderr or result.returncode != 0:
146
- if result.stdout:
147
- print(result.stdout)
148
- if result.stderr:
149
- print(result.stderr, file=sys.stderr)
150
- if result.returncode != 0:
139
+ # Find all Python files in the target directory
140
+ python_files = list(target_dir.rglob("*.py"))
141
+ if not python_files:
142
+ print(f"No Python files found in {target_dir}, skipping type check.")
143
+ return
144
+
145
+ # Try mypy with cache cleanup on failure
146
+ for attempt in range(2):
147
+ cmd = [sys.executable, "-m", "mypy", "--strict"]
148
+ if attempt == 1:
149
+ # Second attempt: clear cache
150
+ cmd.append("--cache-dir=/tmp/mypy_cache_temp")
151
+ cmd.extend([str(f) for f in python_files])
152
+
153
+ result = subprocess.run(
154
+ cmd,
155
+ stdout=subprocess.PIPE,
156
+ stderr=subprocess.PIPE,
157
+ text=True,
158
+ )
159
+
160
+ # Check for specific mypy cache corruption errors
161
+ cache_error_patterns = ["KeyError: 'setter_type'", "KeyError:", "deserialize"]
162
+ is_cache_error = any(pattern in result.stderr for pattern in cache_error_patterns)
163
+
164
+ if result.returncode == 0:
165
+ # Success
166
+ return
167
+ elif attempt == 0 and is_cache_error:
168
+ # Retry with cache cleanup
169
+ print(f"Mypy cache error detected, retrying with fresh cache...", file=sys.stderr)
170
+ continue
171
+ else:
172
+ # Report the error
173
+ if result.stdout:
174
+ print(result.stdout)
175
+ if result.stderr:
176
+ print(result.stderr, file=sys.stderr)
151
177
  print(f"Type checking failed for {target_dir}. Please fix the above issues.", file=sys.stderr)
152
178
  sys.exit(result.returncode)
153
179
 
@@ -1,40 +1,125 @@
1
+ from __future__ import annotations
2
+
1
3
  from dataclasses import MISSING, dataclass, fields
2
- from typing import Any, Dict, Type, TypeVar
4
+ from typing import Any, Dict, Type, TypeVar, Union, get_args, get_origin, get_type_hints
5
+
6
+ T = TypeVar("T", bound="BaseSchema")
7
+
3
8
 
4
- T = TypeVar("T")
9
+ def _extract_base_type(field_type: Any) -> Any:
10
+ """Extract the base type from Optional/Union types."""
11
+ origin = get_origin(field_type)
12
+ if origin is Union:
13
+ # For Optional[T] or Union[T, None], get the non-None type
14
+ args = get_args(field_type)
15
+ non_none_args = [arg for arg in args if arg is not type(None)]
16
+ if len(non_none_args) == 1:
17
+ return non_none_args[0]
18
+ return field_type
5
19
 
6
20
 
7
21
  @dataclass
8
22
  class BaseSchema:
9
- """Base class for all generated Pydantic models, providing basic validation and dict conversion."""
23
+ """Base class for all generated models, providing validation, dict conversion, and field mapping."""
10
24
 
11
25
  @classmethod
12
- def model_validate(cls: Type[T], data: Dict[str, Any]) -> T:
13
- """Validate and create an instance from a dictionary, akin to Pydantic's model_validate."""
26
+ def _get_field_mappings(cls) -> Dict[str, str]:
27
+ """Get field mappings from Meta class if defined. Returns API field -> Python field mappings."""
28
+ if hasattr(cls, "Meta") and hasattr(cls.Meta, "key_transform_with_load"):
29
+ return cls.Meta.key_transform_with_load # type: ignore[no-any-return]
30
+ return {}
31
+
32
+ @classmethod
33
+ def _get_reverse_field_mappings(cls) -> Dict[str, str]:
34
+ """Get reverse field mappings. Returns Python field -> API field mappings."""
35
+ mappings = cls._get_field_mappings()
36
+ return {python_field: api_field for api_field, python_field in mappings.items()}
37
+
38
+ @classmethod
39
+ def from_dict(cls: Type[T], data: Dict[str, Any]) -> T:
40
+ """Create an instance from a dictionary with automatic field name mapping."""
14
41
  if not isinstance(data, dict):
15
42
  raise TypeError(f"Input must be a dictionary, got {type(data).__name__}")
16
43
 
44
+ field_mappings = cls._get_field_mappings() # API -> Python
17
45
  kwargs: Dict[str, Any] = {}
18
- cls_fields = {f.name: f for f in fields(cls)} # type: ignore[arg-type]
46
+ cls_fields = {f.name: f for f in fields(cls)}
47
+
48
+ # Process each field in the data
49
+ for api_field, value in data.items():
50
+ # Map API field name to Python field name
51
+ python_field = field_mappings.get(api_field, api_field)
52
+
53
+ if python_field in cls_fields:
54
+ # Handle nested objects that might also be BaseSchema instances
55
+ field_def = cls_fields[python_field]
56
+ field_type = field_def.type
57
+
58
+ # Get type hints to handle forward references and generics properly
59
+ try:
60
+ type_hints = get_type_hints(cls)
61
+ if python_field in type_hints:
62
+ field_type = type_hints[python_field]
63
+ except (NameError, AttributeError):
64
+ # Fall back to raw annotation if get_type_hints fails
65
+ pass
66
+
67
+ # Extract base type (handles Optional[Type] -> Type)
68
+ base_type = _extract_base_type(field_type)
19
69
 
70
+ if base_type is not None and hasattr(base_type, "from_dict") and isinstance(value, dict):
71
+ # Recursively convert nested dictionaries
72
+ kwargs[python_field] = base_type.from_dict(value)
73
+ elif get_origin(field_type) is list or get_origin(base_type) is list:
74
+ # Handle List[SomeModel] types
75
+ list_type = field_type if get_origin(field_type) is list else base_type
76
+ args = get_args(list_type)
77
+ if args and hasattr(args[0], "from_dict") and isinstance(value, list):
78
+ kwargs[python_field] = [
79
+ args[0].from_dict(item) if isinstance(item, dict) else item for item in value
80
+ ]
81
+ else:
82
+ kwargs[python_field] = value
83
+ else:
84
+ kwargs[python_field] = value
85
+
86
+ # Check for required fields
20
87
  for field_name, field_def in cls_fields.items():
21
- if field_name in data:
22
- kwargs[field_name] = data[field_name]
23
- elif field_def.default is MISSING and field_def.default_factory is MISSING:
88
+ if field_name not in kwargs and field_def.default is MISSING and field_def.default_factory is MISSING:
24
89
  raise ValueError(f"Missing required field: '{field_name}' for class {cls.__name__}")
25
90
 
26
- extra_fields = set(data.keys()) - set(cls_fields.keys())
27
- if extra_fields:
28
- pass
29
-
30
91
  return cls(**kwargs)
31
92
 
32
- def model_dump(self, exclude_none: bool = False) -> Dict[str, Any]:
33
- """Convert the model instance to a dictionary, akin to Pydantic's model_dump."""
93
+ def to_dict(self, exclude_none: bool = False) -> Dict[str, Any]:
94
+ """Convert the model instance to a dictionary with reverse field name mapping."""
95
+ reverse_mappings = self._get_reverse_field_mappings() # Python -> API
34
96
  result = {}
97
+
35
98
  for field_def in fields(self):
36
99
  value = getattr(self, field_def.name)
37
100
  if exclude_none and value is None:
38
101
  continue
39
- result[field_def.name] = value
102
+
103
+ # Handle nested objects
104
+ if hasattr(value, "to_dict"):
105
+ value = value.to_dict(exclude_none=exclude_none)
106
+ elif isinstance(value, list) and value and hasattr(value[0], "to_dict"):
107
+ value = [
108
+ item.to_dict(exclude_none=exclude_none) if hasattr(item, "to_dict") else item for item in value
109
+ ]
110
+
111
+ # Map Python field name back to API field name
112
+ api_field = reverse_mappings.get(field_def.name, field_def.name)
113
+ result[api_field] = value
114
+
40
115
  return result
116
+
117
+ # Legacy aliases for backward compatibility
118
+ @classmethod
119
+ def model_validate(cls: Type[T], data: Dict[str, Any]) -> T:
120
+ """Legacy alias for from_dict."""
121
+ return cls.from_dict(data)
122
+
123
+ def model_dump(self, exclude_none: bool = False) -> Dict[str, Any]:
124
+ """Legacy alias for to_dict."""
125
+ return self.to_dict(exclude_none=exclude_none)
@@ -7,7 +7,7 @@ It handles all the details of formatting, import registration, and docstring gen
7
7
  for these constructs.
8
8
  """
9
9
 
10
- from typing import List, Optional, Tuple
10
+ from typing import Dict, List, Optional, Tuple
11
11
 
12
12
  from pyopenapi_gen.context.render_context import RenderContext
13
13
 
@@ -146,15 +146,17 @@ class PythonConstructRenderer:
146
146
  fields: List[Tuple[str, str, Optional[str], Optional[str]]], # name, type_hint, default_expr, description
147
147
  description: Optional[str],
148
148
  context: RenderContext,
149
+ field_mappings: Optional[Dict[str, str]] = None,
149
150
  ) -> str:
150
151
  """
151
- Render a dataclass as Python code.
152
+ Render a dataclass as Python code with BaseSchema support.
152
153
 
153
154
  Args:
154
155
  class_name: The name of the dataclass
155
156
  fields: List of (name, type_hint, default_expr, description) tuples for each field
156
157
  description: Optional description for the class docstring
157
158
  context: The rendering context for import registration
159
+ field_mappings: Optional mapping of API field names to Python field names for BaseSchema
158
160
 
159
161
  Returns:
160
162
  Formatted Python code for the dataclass
@@ -162,31 +164,60 @@ class PythonConstructRenderer:
162
164
  Example:
163
165
  ```python
164
166
  @dataclass
165
- class User:
166
- \"\"\"User information.\"\"\"
167
- id: str
168
- name: str
167
+ class User(BaseSchema):
168
+ \"\"\"User information with automatic JSON field mapping.\"\"\"
169
+ id_: str
170
+ first_name: str
169
171
  email: Optional[str] = None
170
172
  is_active: bool = True
173
+
174
+ class Meta:
175
+ \"\"\"Configure field name mapping for JSON conversion.\"\"\"
176
+ key_transform_with_load = {
177
+ 'id': 'id_',
178
+ 'firstName': 'first_name'
179
+ }
171
180
  ```
172
181
  """
173
182
  writer = CodeWriter()
174
183
  context.add_import("dataclasses", "dataclass")
175
184
 
185
+ # Always use self-contained BaseSchema for client independence with automatic field mapping
186
+ # Use the core package from context - could be relative (..core) or absolute (api_sdks.my_core)
187
+ if context.core_package_name.startswith(".."):
188
+ # Already a relative import
189
+ core_import_path = f"{context.core_package_name}.schemas"
190
+ elif "." in context.core_package_name:
191
+ # External core package with dots (e.g., api_sdks.my_core) - use absolute import
192
+ core_import_path = f"{context.core_package_name}.schemas"
193
+ elif context.core_package_name == "core":
194
+ # Default relative core package
195
+ core_import_path = "..core.schemas"
196
+ else:
197
+ # Simple external core package name (e.g., shared_core_pkg) - use absolute import
198
+ core_import_path = f"{context.core_package_name}.schemas"
199
+
200
+ context.add_import(core_import_path, "BaseSchema")
201
+
176
202
  # Add __all__ export
177
203
  writer.write_line(f'__all__ = ["{class_name}"]')
178
204
  writer.write_line("") # Add a blank line for separation
179
205
 
180
206
  writer.write_line("@dataclass")
181
- writer.write_line(f"class {class_name}:")
207
+ writer.write_line(f"class {class_name}(BaseSchema):")
182
208
  writer.indent()
183
209
 
184
210
  # Build and write docstring
185
211
  field_args: list[tuple[str, str, str] | tuple[str, str]] = []
186
212
  for name, type_hint, _, field_desc in fields:
187
213
  field_args.append((name, type_hint, field_desc or ""))
214
+
215
+ # Enhanced description with automatic field mapping
216
+ base_description = description or f"{class_name} dataclass"
217
+ enhanced_description = f"{base_description} with automatic JSON field mapping."
218
+
188
219
  doc_block = DocumentationBlock(
189
- summary=description or f"{class_name} dataclass.",
220
+ summary=enhanced_description,
190
221
  args=field_args if field_args else None,
191
222
  )
192
223
  docstring = DocumentationWriter(width=88).render_docstring(doc_block, indent=0)
@@ -220,6 +251,23 @@ class PythonConstructRenderer:
220
251
  line += f" # {comment_text}"
221
252
  writer.write_line(line)
222
253
 
254
+ # Add Meta class if field mappings are provided (for BaseSchema field mapping)
255
+ if field_mappings:
256
+ writer.write_line("") # Blank line before Meta class
257
+ writer.write_line("class Meta:")
258
+ writer.indent()
259
+ writer.write_line('"""Configure field name mapping for JSON conversion."""')
260
+ writer.write_line("key_transform_with_load = {")
261
+ writer.indent()
262
+
263
+ # Sort mappings for consistent output
264
+ for api_field, python_field in sorted(field_mappings.items()):
265
+ writer.write_line(f"'{api_field}': '{python_field}',")
266
+
267
+ writer.dedent()
268
+ writer.write_line("}")
269
+ writer.dedent()
270
+
223
271
  writer.dedent()
224
272
  return writer.get_code()
225
273
 
@@ -254,7 +302,7 @@ class PythonConstructRenderer:
254
302
  ```
255
303
  """
256
304
  writer = CodeWriter()
257
- bases = f"({', '.join(base_classes)})" if base_classes else ""
305
+ bases = f"({", ".join(base_classes)})" if base_classes else ""
258
306
  writer.write_line(f"class {class_name}{bases}:")
259
307
  writer.indent()
260
308
  has_content = False
@@ -211,7 +211,7 @@ class EndpointsEmitter:
211
211
  init_lines = []
212
212
  if unique_clients:
213
213
  all_list_items = sorted([f'"{cls}"' for cls, _ in unique_clients])
214
- init_lines.append(f"__all__ = [{', '.join(all_list_items)}]")
214
+ init_lines.append(f"__all__ = [{", ".join(all_list_items)}]")
215
215
  for cls, mod in sorted(unique_clients):
216
216
  init_lines.append(f"from .{mod} import {cls}")
217
217
 
@@ -82,26 +82,8 @@ def get_request_body_type(body: IRRequestBody, context: RenderContext, schemas:
82
82
  return "Any"
83
83
 
84
84
 
85
- def get_return_type_unified(
86
- op: IROperation,
87
- context: RenderContext,
88
- schemas: Dict[str, IRSchema],
89
- responses: Optional[Dict[str, IRResponse]] = None,
90
- ) -> str:
91
- """
92
- Determines the primary return type hint for an operation using the unified type service.
93
-
94
- Args:
95
- op: The operation to resolve
96
- context: Render context for imports
97
- schemas: Dictionary of all schemas
98
- responses: Dictionary of all responses (optional)
99
-
100
- Returns:
101
- Python type string
102
- """
103
- type_service = UnifiedTypeService(schemas, responses)
104
- return type_service.resolve_operation_response_type(op, context)
85
+ # REMOVED: get_return_type_unified - replaced with ResponseStrategy pattern
86
+ # All callers should now use ResponseStrategyResolver.resolve() to get consistent response handling
105
87
 
106
88
 
107
89
  def get_return_type(
@@ -326,10 +308,10 @@ def format_method_args(params: list[dict[str, Any]]) -> str:
326
308
  optional = [p for p in params if not p.get("required", True)]
327
309
  arg_strs = []
328
310
  for p in required:
329
- arg_strs.append(f"{p['name']}: {p['type']}")
311
+ arg_strs.append(f"{p["name"]}: {p["type"]}")
330
312
  for p in optional:
331
313
  default = p["default"]
332
- arg_strs.append(f"{p['name']}: {p['type']} = {default}")
314
+ arg_strs.append(f"{p["name"]}: {p["type"]} = {default}")
333
315
  return ", ".join(arg_strs)
334
316
 
335
317
 
@@ -220,7 +220,7 @@ class TypeCleaner:
220
220
  if len(unique_members) == 1:
221
221
  return unique_members[0] # A Union with one member is just that member.
222
222
 
223
- return f"Union[{', '.join(unique_members)}]"
223
+ return f"Union[{", ".join(unique_members)}]"
224
224
 
225
225
  @classmethod
226
226
  def _clean_list_type(cls, type_str: str) -> str:
@@ -73,7 +73,7 @@ class CompositionTypeResolver:
73
73
  return unique_types[0]
74
74
 
75
75
  self.context.add_import("typing", "Union")
76
- union_str = f"Union[{', '.join(unique_types)}]"
76
+ union_str = f"Union[{", ".join(unique_types)}]"
77
77
  return union_str
78
78
 
79
79
  return None
@@ -23,7 +23,7 @@ class TypeFinalizer:
23
23
  if py_type is None:
24
24
  logger.warning(
25
25
  f"[TypeFinalizer] Received None as py_type for schema "
26
- f"'{schema.name or 'anonymous'}'. Defaulting to 'Any'."
26
+ f"'{schema.name or "anonymous"}'. Defaulting to 'Any'."
27
27
  )
28
28
  self.context.add_import("typing", "Any")
29
29
  py_type = "Any"
@@ -20,7 +20,6 @@ class ResolvedType:
20
20
  import_name: Optional[str] = None
21
21
  is_optional: bool = False
22
22
  is_forward_ref: bool = False
23
- was_unwrapped: bool = False
24
23
 
25
24
  def __post_init__(self) -> None:
26
25
  """Validate resolved type data."""