datamodel-code-generator 0.11.12__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. datamodel_code_generator/__init__.py +654 -185
  2. datamodel_code_generator/__main__.py +872 -388
  3. datamodel_code_generator/arguments.py +798 -0
  4. datamodel_code_generator/cli_options.py +295 -0
  5. datamodel_code_generator/format.py +292 -54
  6. datamodel_code_generator/http.py +85 -10
  7. datamodel_code_generator/imports.py +152 -43
  8. datamodel_code_generator/model/__init__.py +138 -1
  9. datamodel_code_generator/model/base.py +531 -120
  10. datamodel_code_generator/model/dataclass.py +211 -0
  11. datamodel_code_generator/model/enum.py +133 -12
  12. datamodel_code_generator/model/imports.py +22 -0
  13. datamodel_code_generator/model/msgspec.py +462 -0
  14. datamodel_code_generator/model/pydantic/__init__.py +30 -25
  15. datamodel_code_generator/model/pydantic/base_model.py +304 -100
  16. datamodel_code_generator/model/pydantic/custom_root_type.py +11 -2
  17. datamodel_code_generator/model/pydantic/dataclass.py +15 -4
  18. datamodel_code_generator/model/pydantic/imports.py +40 -27
  19. datamodel_code_generator/model/pydantic/types.py +188 -96
  20. datamodel_code_generator/model/pydantic_v2/__init__.py +51 -0
  21. datamodel_code_generator/model/pydantic_v2/base_model.py +268 -0
  22. datamodel_code_generator/model/pydantic_v2/imports.py +15 -0
  23. datamodel_code_generator/model/pydantic_v2/root_model.py +35 -0
  24. datamodel_code_generator/model/pydantic_v2/types.py +143 -0
  25. datamodel_code_generator/model/scalar.py +124 -0
  26. datamodel_code_generator/model/template/Enum.jinja2 +15 -2
  27. datamodel_code_generator/model/template/ScalarTypeAliasAnnotation.jinja2 +6 -0
  28. datamodel_code_generator/model/template/ScalarTypeAliasType.jinja2 +6 -0
  29. datamodel_code_generator/model/template/ScalarTypeStatement.jinja2 +6 -0
  30. datamodel_code_generator/model/template/TypeAliasAnnotation.jinja2 +20 -0
  31. datamodel_code_generator/model/template/TypeAliasType.jinja2 +20 -0
  32. datamodel_code_generator/model/template/TypeStatement.jinja2 +20 -0
  33. datamodel_code_generator/model/template/TypedDict.jinja2 +5 -0
  34. datamodel_code_generator/model/template/TypedDictClass.jinja2 +25 -0
  35. datamodel_code_generator/model/template/TypedDictFunction.jinja2 +24 -0
  36. datamodel_code_generator/model/template/UnionTypeAliasAnnotation.jinja2 +10 -0
  37. datamodel_code_generator/model/template/UnionTypeAliasType.jinja2 +10 -0
  38. datamodel_code_generator/model/template/UnionTypeStatement.jinja2 +10 -0
  39. datamodel_code_generator/model/template/dataclass.jinja2 +50 -0
  40. datamodel_code_generator/model/template/msgspec.jinja2 +55 -0
  41. datamodel_code_generator/model/template/pydantic/BaseModel.jinja2 +17 -4
  42. datamodel_code_generator/model/template/pydantic/BaseModel_root.jinja2 +12 -4
  43. datamodel_code_generator/model/template/pydantic/Config.jinja2 +1 -1
  44. datamodel_code_generator/model/template/pydantic/dataclass.jinja2 +15 -2
  45. datamodel_code_generator/model/template/pydantic_v2/BaseModel.jinja2 +57 -0
  46. datamodel_code_generator/model/template/pydantic_v2/ConfigDict.jinja2 +5 -0
  47. datamodel_code_generator/model/template/pydantic_v2/RootModel.jinja2 +48 -0
  48. datamodel_code_generator/model/type_alias.py +70 -0
  49. datamodel_code_generator/model/typed_dict.py +161 -0
  50. datamodel_code_generator/model/types.py +106 -0
  51. datamodel_code_generator/model/union.py +105 -0
  52. datamodel_code_generator/parser/__init__.py +30 -12
  53. datamodel_code_generator/parser/_graph.py +67 -0
  54. datamodel_code_generator/parser/_scc.py +171 -0
  55. datamodel_code_generator/parser/base.py +2426 -380
  56. datamodel_code_generator/parser/graphql.py +652 -0
  57. datamodel_code_generator/parser/jsonschema.py +2518 -647
  58. datamodel_code_generator/parser/openapi.py +631 -222
  59. datamodel_code_generator/py.typed +0 -0
  60. datamodel_code_generator/pydantic_patch.py +28 -0
  61. datamodel_code_generator/reference.py +672 -290
  62. datamodel_code_generator/types.py +521 -145
  63. datamodel_code_generator/util.py +155 -0
  64. datamodel_code_generator/watch.py +65 -0
  65. datamodel_code_generator-0.45.0.dist-info/METADATA +301 -0
  66. datamodel_code_generator-0.45.0.dist-info/RECORD +69 -0
  67. {datamodel_code_generator-0.11.12.dist-info → datamodel_code_generator-0.45.0.dist-info}/WHEEL +1 -1
  68. datamodel_code_generator-0.45.0.dist-info/entry_points.txt +2 -0
  69. datamodel_code_generator/version.py +0 -1
  70. datamodel_code_generator-0.11.12.dist-info/METADATA +0 -440
  71. datamodel_code_generator-0.11.12.dist-info/RECORD +0 -31
  72. datamodel_code_generator-0.11.12.dist-info/entry_points.txt +0 -3
  73. {datamodel_code_generator-0.11.12.dist-info → datamodel_code_generator-0.45.0.dist-info/licenses}/LICENSE +0 -0
@@ -1,5 +1,15 @@
1
+ """Main module for datamodel-code-generator.
2
+
3
+ Provides the main `generate()` function and related enums/exceptions for generating
4
+ Python data models (Pydantic, dataclasses, TypedDict, msgspec) from various schema formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
1
9
  import contextlib
2
10
  import os
11
+ import sys
12
+ from collections.abc import Iterator, Mapping, Sequence
3
13
  from datetime import datetime, timezone
4
14
  from enum import Enum
5
15
  from pathlib import Path
@@ -8,119 +18,131 @@ from typing import (
8
18
  TYPE_CHECKING,
9
19
  Any,
10
20
  Callable,
11
- DefaultDict,
12
- Dict,
13
- Iterator,
14
- List,
15
- Mapping,
16
- Optional,
17
- Sequence,
18
- Set,
21
+ Final,
19
22
  TextIO,
20
- Tuple,
21
- Type,
22
23
  TypeVar,
23
24
  Union,
25
+ cast,
24
26
  )
25
27
  from urllib.parse import ParseResult
26
28
 
27
- import pysnooper
28
29
  import yaml
30
+ import yaml.parser
31
+ from typing_extensions import TypeAlias, TypeAliasType, TypedDict
32
+
33
+ import datamodel_code_generator.pydantic_patch # noqa: F401
34
+ from datamodel_code_generator.format import (
35
+ DEFAULT_FORMATTERS,
36
+ DatetimeClassType,
37
+ Formatter,
38
+ PythonVersion,
39
+ PythonVersionMin,
40
+ )
41
+ from datamodel_code_generator.parser import DefaultPutDict, LiteralType
42
+ from datamodel_code_generator.util import PYDANTIC_V2, SafeLoader
29
43
 
30
44
  if TYPE_CHECKING:
31
- cached_property = property
32
- from yaml import SafeLoader
45
+ from collections import defaultdict
33
46
 
34
- Protocol = object
35
- runtime_checkable: Callable[..., Any]
36
- else:
37
- try:
38
- from typing import Protocol
39
- except ImportError:
40
- from typing_extensions import Protocol
41
- try:
42
- from typing import runtime_checkable
43
- except ImportError:
44
- from typing_extensions import runtime_checkable
45
- try:
46
- from yaml import CSafeLoader as SafeLoader
47
- except ImportError: # pragma: no cover
48
- from yaml import SafeLoader
47
+ from datamodel_code_generator.model.pydantic_v2 import UnionMode
48
+ from datamodel_code_generator.parser.base import Parser
49
+ from datamodel_code_generator.types import StrictTypes
49
50
 
50
- try:
51
- from functools import cached_property
52
- except ImportError:
53
- _NOT_FOUND = object()
51
+ YamlScalar: TypeAlias = Union[str, int, float, bool, None]
52
+ YamlValue = TypeAliasType("YamlValue", "Union[dict[str, YamlValue], list[YamlValue], YamlScalar]")
54
53
 
55
- class cached_property:
56
- def __init__(self, func: Callable) -> None:
57
- self.func: Callable = func
58
- self.__doc__: Any = func.__doc__
54
+ MIN_VERSION: Final[int] = 9
55
+ MAX_VERSION: Final[int] = 13
56
+ DEFAULT_SHARED_MODULE_NAME: Final[str] = "shared"
59
57
 
60
- def __get__(self, instance: Any, owner: Any = None) -> Any:
61
- value = instance.__dict__.get(self.func.__name__, _NOT_FOUND)
62
- if value is _NOT_FOUND: # pragma: no cover
63
- value = instance.__dict__[self.func.__name__] = self.func(instance)
64
- return value
58
+ T = TypeVar("T")
65
59
 
66
60
 
67
- from datamodel_code_generator.format import PythonVersion
68
- from datamodel_code_generator.model.pydantic import dump_resolve_reference_action
69
- from datamodel_code_generator.parser import DefaultPutDict, LiteralType
70
- from datamodel_code_generator.parser.base import Parser
71
- from datamodel_code_generator.types import StrictTypes
61
+ class DataclassArguments(TypedDict, total=False):
62
+ """Arguments for @dataclass decorator."""
63
+
64
+ init: bool
65
+ repr: bool
66
+ eq: bool
67
+ order: bool
68
+ unsafe_hash: bool
69
+ frozen: bool
70
+ match_args: bool
71
+ kw_only: bool
72
+ slots: bool
73
+ weakref_slot: bool
74
+
75
+
76
+ if not TYPE_CHECKING:
77
+ YamlScalar: TypeAlias = Union[str, int, float, bool, None]
78
+ if PYDANTIC_V2:
79
+ YamlValue = TypeAliasType("YamlValue", "Union[dict[str, YamlValue], list[YamlValue], YamlScalar]")
80
+ else:
81
+ # Pydantic v1 cannot handle TypeAliasType, use Any for recursive parts
82
+ YamlValue: TypeAlias = Union[dict[str, Any], list[Any], YamlScalar]
72
83
 
73
- T = TypeVar('T')
84
+ try:
85
+ import pysnooper
74
86
 
75
- pysnooper.tracer.DISABLED = True
87
+ pysnooper.tracer.DISABLED = True
88
+ except ImportError: # pragma: no cover
89
+ pysnooper = None
76
90
 
77
- DEFAULT_BASE_CLASS: str = 'pydantic.BaseModel'
91
+ DEFAULT_BASE_CLASS: str = "pydantic.BaseModel"
78
92
 
79
- SafeLoader.yaml_constructors[
80
- 'tag:yaml.org,2002:timestamp'
81
- ] = SafeLoader.yaml_constructors['tag:yaml.org,2002:str']
82
93
 
94
+ def load_yaml(stream: str | TextIO) -> YamlValue:
95
+ """Load YAML content from a string or file-like object."""
96
+ return yaml.load(stream, Loader=SafeLoader) # noqa: S506
83
97
 
84
- def load_yaml(stream: Union[str, TextIO]) -> Any:
85
- return yaml.load(stream, Loader=SafeLoader)
86
98
 
99
+ def load_yaml_dict(stream: str | TextIO) -> dict[str, YamlValue]:
100
+ """Load YAML and return as dict. Raises TypeError if result is not a dict."""
101
+ result = load_yaml(stream)
102
+ if not isinstance(result, dict):
103
+ msg = f"Expected dict, got {type(result).__name__}"
104
+ raise TypeError(msg)
105
+ return result
87
106
 
88
- def load_yaml_from_path(path: Path, encoding: str) -> Any:
107
+
108
+ def load_yaml_dict_from_path(path: Path, encoding: str) -> dict[str, YamlValue]:
109
+ """Load YAML and return as dict from a file path."""
89
110
  with path.open(encoding=encoding) as f:
90
- return load_yaml(f)
111
+ return load_yaml_dict(f)
112
+
113
+
114
+ def get_version() -> str:
115
+ """Return the installed package version."""
116
+ package = "datamodel-code-generator"
117
+
118
+ from importlib.metadata import version # noqa: PLC0415
119
+
120
+ return version(package)
91
121
 
92
122
 
93
123
  def enable_debug_message() -> None: # pragma: no cover
124
+ """Enable debug tracing with pysnooper."""
125
+ if not pysnooper:
126
+ msg = "Please run `$pip install 'datamodel-code-generator[debug]'` to use debug option"
127
+ raise Exception(msg) # noqa: TRY002
128
+
94
129
  pysnooper.tracer.DISABLED = False
95
130
 
96
131
 
97
- def snooper_to_methods( # type: ignore
98
- output=None,
99
- watch=(),
100
- watch_explode=(),
101
- depth=1,
102
- prefix='',
103
- overwrite=False,
104
- thread_info=False,
105
- custom_repr=(),
106
- max_variable_length=100,
107
- ) -> Callable[..., Any]:
108
- def inner(cls: Type[T]) -> Type[T]:
109
- import inspect
132
+ DEFAULT_MAX_VARIABLE_LENGTH: int = 100
133
+
134
+
135
+ def snooper_to_methods() -> Callable[..., Any]:
136
+ """Class decorator to add pysnooper tracing to all methods."""
137
+
138
+ def inner(cls: type[T]) -> type[T]:
139
+ if not pysnooper:
140
+ return cls
141
+ import inspect # noqa: PLC0415
110
142
 
111
143
  methods = inspect.getmembers(cls, predicate=inspect.isfunction)
112
144
  for name, method in methods:
113
- snooper_method = pysnooper.snoop(
114
- output,
115
- watch,
116
- watch_explode,
117
- depth,
118
- prefix,
119
- overwrite,
120
- thread_info,
121
- custom_repr,
122
- max_variable_length,
123
- )(method)
145
+ snooper_method = pysnooper.snoop(max_variable_length=DEFAULT_MAX_VARIABLE_LENGTH)(method)
124
146
  setattr(cls, name, snooper_method)
125
147
  return cls
126
148
 
@@ -128,210 +150,524 @@ def snooper_to_methods( # type: ignore
128
150
 
129
151
 
130
152
  @contextlib.contextmanager
131
- def chdir(path: Optional[Path]) -> Iterator[None]:
132
- """Changes working directory and returns to previous on exit."""
133
-
153
+ def chdir(path: Path | None) -> Iterator[None]:
154
+ """Change working directory and return to previous on exit."""
134
155
  if path is None:
135
156
  yield
136
157
  else:
137
158
  prev_cwd = Path.cwd()
138
159
  try:
139
-
140
160
  os.chdir(path if path.is_dir() else path.parent)
141
161
  yield
142
162
  finally:
143
163
  os.chdir(prev_cwd)
144
164
 
145
165
 
146
- def is_openapi(text: str) -> bool:
147
- return 'openapi' in load_yaml(text)
166
+ def is_openapi(data: dict) -> bool:
167
+ """Check if the data dict is an OpenAPI specification."""
168
+ return "openapi" in data
169
+
170
+
171
+ JSON_SCHEMA_URLS: tuple[str, ...] = (
172
+ "http://json-schema.org/",
173
+ "https://json-schema.org/",
174
+ )
175
+
176
+
177
+ def is_schema(data: dict) -> bool:
178
+ """Check if the data dict is a JSON Schema."""
179
+ schema = data.get("$schema")
180
+ if isinstance(schema, str) and any(schema.startswith(u) for u in JSON_SCHEMA_URLS): # pragma: no cover
181
+ return True
182
+ if isinstance(data.get("type"), str):
183
+ return True
184
+ if any(
185
+ isinstance(data.get(o), list)
186
+ for o in (
187
+ "allOf",
188
+ "anyOf",
189
+ "oneOf",
190
+ )
191
+ ):
192
+ return True
193
+ return isinstance(data.get("properties"), dict)
148
194
 
149
195
 
150
196
  class InputFileType(Enum):
151
- Auto = 'auto'
152
- OpenAPI = 'openapi'
153
- JsonSchema = 'jsonschema'
154
- Json = 'json'
155
- Yaml = 'yaml'
156
- Dict = 'dict'
157
- CSV = 'csv'
197
+ """Supported input file types for schema parsing."""
198
+
199
+ Auto = "auto"
200
+ OpenAPI = "openapi"
201
+ JsonSchema = "jsonschema"
202
+ Json = "json"
203
+ Yaml = "yaml"
204
+ Dict = "dict"
205
+ CSV = "csv"
206
+ GraphQL = "graphql"
207
+
208
+
209
+ RAW_DATA_TYPES: list[InputFileType] = [
210
+ InputFileType.Json,
211
+ InputFileType.Yaml,
212
+ InputFileType.Dict,
213
+ InputFileType.CSV,
214
+ InputFileType.GraphQL,
215
+ ]
216
+
217
+
218
+ class DataModelType(Enum):
219
+ """Supported output data model types."""
220
+
221
+ PydanticBaseModel = "pydantic.BaseModel"
222
+ PydanticV2BaseModel = "pydantic_v2.BaseModel"
223
+ DataclassesDataclass = "dataclasses.dataclass"
224
+ TypingTypedDict = "typing.TypedDict"
225
+ MsgspecStruct = "msgspec.Struct"
226
+
227
+
228
+ class ReuseScope(Enum):
229
+ """Scope for model reuse deduplication.
230
+
231
+ module: Deduplicate identical models within each module (default).
232
+ tree: Deduplicate identical models across all modules, placing shared models in shared.py.
233
+ """
234
+
235
+ Module = "module"
236
+ Tree = "tree"
158
237
 
159
238
 
160
239
  class OpenAPIScope(Enum):
161
- Schemas = 'schemas'
162
- Paths = 'paths'
240
+ """Scopes for OpenAPI model generation."""
241
+
242
+ Schemas = "schemas"
243
+ Paths = "paths"
244
+ Tags = "tags"
245
+ Parameters = "parameters"
246
+ Webhooks = "webhooks"
247
+
248
+
249
+ class AllExportsScope(Enum):
250
+ """Scope for __all__ exports in __init__.py.
251
+
252
+ children: Export models from direct child modules only.
253
+ recursive: Export models from all descendant modules recursively.
254
+ """
255
+
256
+ Children = "children"
257
+ Recursive = "recursive"
258
+
259
+
260
+ class AllExportsCollisionStrategy(Enum):
261
+ """Strategy for handling name collisions in recursive exports.
262
+
263
+ error: Raise an error when name collision is detected.
264
+ minimal_prefix: Add module prefix only to colliding names.
265
+ full_prefix: Add full module path prefix to all colliding names.
266
+ """
267
+
268
+ Error = "error"
269
+ MinimalPrefix = "minimal-prefix"
270
+ FullPrefix = "full-prefix"
271
+
272
+
273
+ class AllOfMergeMode(Enum):
274
+ """Mode for field merging in allOf schemas.
275
+
276
+ constraints: Merge only constraint fields (minItems, maxItems, pattern, etc.) from parent.
277
+ all: Merge constraints plus annotation fields (default, examples) from parent.
278
+ none: Do not merge any fields from parent properties.
279
+ """
280
+
281
+ Constraints = "constraints"
282
+ All = "all"
283
+ NoMerge = "none"
284
+
285
+
286
+ class GraphQLScope(Enum):
287
+ """Scopes for GraphQL model generation."""
288
+
289
+ Schema = "schema"
290
+
291
+
292
+ class ReadOnlyWriteOnlyModelType(Enum):
293
+ """Model generation strategy for readOnly/writeOnly fields.
294
+
295
+ RequestResponse: Generate only Request/Response model variants (no base model).
296
+ All: Generate Base, Request, and Response models.
297
+ """
298
+
299
+ RequestResponse = "request-response"
300
+ All = "all"
301
+
302
+
303
+ class ModuleSplitMode(Enum):
304
+ """Mode for splitting generated models into separate files.
305
+
306
+ Single: Generate one file per model class.
307
+ """
308
+
309
+ Single = "single"
163
310
 
164
311
 
165
312
  class Error(Exception):
313
+ """Base exception for datamodel-code-generator errors."""
314
+
166
315
  def __init__(self, message: str) -> None:
316
+ """Initialize with message."""
167
317
  self.message: str = message
168
318
 
169
319
  def __str__(self) -> str:
320
+ """Return string representation."""
170
321
  return self.message
171
322
 
172
323
 
173
324
  class InvalidClassNameError(Error):
325
+ """Raised when a schema title cannot be converted to a valid Python class name."""
326
+
174
327
  def __init__(self, class_name: str) -> None:
328
+ """Initialize with class name."""
175
329
  self.class_name = class_name
176
- message = f'title={repr(class_name)} is invalid class name.'
330
+ message = f"title={class_name!r} is invalid class name."
177
331
  super().__init__(message=message)
178
332
 
179
333
 
180
334
  def get_first_file(path: Path) -> Path: # pragma: no cover
335
+ """Find and return the first file in a path (file or directory)."""
181
336
  if path.is_file():
182
337
  return path
183
- elif path.is_dir():
184
- for child in path.rglob('*'):
338
+ if path.is_dir():
339
+ for child in path.rglob("*"):
185
340
  if child.is_file():
186
341
  return child
187
- raise Error('File not found')
342
+ msg = f"No file found in: {path}"
343
+ raise FileNotFoundError(msg)
344
+
345
+
346
+ def _find_future_import_insertion_point(header: str) -> int:
347
+ """Find position in header where __future__ import should be inserted."""
348
+ import ast # noqa: PLC0415
349
+
350
+ try:
351
+ tree = ast.parse(header)
352
+ except SyntaxError:
353
+ return 0
354
+
355
+ lines = header.splitlines(keepends=True)
356
+
357
+ def line_end_pos(line_num: int) -> int:
358
+ return sum(len(lines[i]) for i in range(line_num))
188
359
 
360
+ if not tree.body:
361
+ return len(header)
189
362
 
190
- def generate(
191
- input_: Union[Path, str, ParseResult],
363
+ first_stmt = tree.body[0]
364
+ is_docstring = isinstance(first_stmt, ast.Expr) and (
365
+ (isinstance(first_stmt.value, ast.Constant) and isinstance(first_stmt.value.value, str))
366
+ or isinstance(first_stmt.value, ast.JoinedStr)
367
+ )
368
+ if is_docstring:
369
+ end_line = first_stmt.end_lineno or len(lines)
370
+ pos = line_end_pos(end_line)
371
+ while end_line < len(lines) and not lines[end_line].strip():
372
+ pos += len(lines[end_line])
373
+ end_line += 1
374
+ return pos
375
+
376
+ pos = 0
377
+ for i in range(first_stmt.lineno - 1):
378
+ pos += len(lines[i])
379
+ return pos
380
+
381
+
382
+ def generate( # noqa: PLR0912, PLR0913, PLR0914, PLR0915
383
+ input_: Path | str | ParseResult | Mapping[str, Any],
192
384
  *,
193
- input_filename: Optional[str] = None,
385
+ input_filename: str | None = None,
194
386
  input_file_type: InputFileType = InputFileType.Auto,
195
- output: Optional[Path] = None,
196
- target_python_version: PythonVersion = PythonVersion.PY_37,
197
- base_class: str = DEFAULT_BASE_CLASS,
198
- custom_template_dir: Optional[Path] = None,
199
- extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
387
+ output: Path | None = None,
388
+ output_model_type: DataModelType = DataModelType.PydanticBaseModel,
389
+ target_python_version: PythonVersion = PythonVersionMin,
390
+ base_class: str = "",
391
+ additional_imports: list[str] | None = None,
392
+ custom_template_dir: Path | None = None,
393
+ extra_template_data: defaultdict[str, dict[str, Any]] | None = None,
200
394
  validation: bool = False,
201
395
  field_constraints: bool = False,
202
396
  snake_case_field: bool = False,
203
397
  strip_default_none: bool = False,
204
- aliases: Optional[Mapping[str, str]] = None,
398
+ aliases: Mapping[str, str] | None = None,
205
399
  disable_timestamp: bool = False,
400
+ enable_version_header: bool = False,
401
+ enable_command_header: bool = False,
402
+ command_line: str | None = None,
206
403
  allow_population_by_field_name: bool = False,
404
+ allow_extra_fields: bool = False,
405
+ extra_fields: str | None = None,
207
406
  apply_default_values_for_required_fields: bool = False,
208
407
  force_optional_for_required_fields: bool = False,
209
- class_name: Optional[str] = None,
408
+ class_name: str | None = None,
210
409
  use_standard_collections: bool = False,
211
410
  use_schema_description: bool = False,
411
+ use_field_description: bool = False,
412
+ use_attribute_docstrings: bool = False,
413
+ use_inline_field_description: bool = False,
414
+ use_default_kwarg: bool = False,
212
415
  reuse_model: bool = False,
213
- encoding: str = 'utf-8',
214
- enum_field_as_literal: Optional[LiteralType] = None,
416
+ reuse_scope: ReuseScope = ReuseScope.Module,
417
+ shared_module_name: str = DEFAULT_SHARED_MODULE_NAME,
418
+ encoding: str = "utf-8",
419
+ enum_field_as_literal: LiteralType | None = None,
420
+ use_one_literal_as_default: bool = False,
421
+ use_enum_values_in_discriminator: bool = False,
215
422
  set_default_enum_member: bool = False,
423
+ use_subclass_enum: bool = False,
424
+ use_specialized_enum: bool = True,
216
425
  strict_nullable: bool = False,
217
426
  use_generic_container_types: bool = False,
218
427
  enable_faux_immutability: bool = False,
219
428
  disable_appending_item_suffix: bool = False,
220
- strict_types: Optional[Sequence[StrictTypes]] = None,
221
- empty_enum_field_name: Optional[str] = None,
222
- custom_class_name_generator: Optional[Callable[[str], str]] = None,
223
- field_extra_keys: Optional[Set[str]] = None,
429
+ strict_types: Sequence[StrictTypes] | None = None,
430
+ empty_enum_field_name: str | None = None,
431
+ custom_class_name_generator: Callable[[str], str] | None = None,
432
+ field_extra_keys: set[str] | None = None,
224
433
  field_include_all_keys: bool = False,
225
- openapi_scopes: Optional[List[OpenAPIScope]] = None,
226
- wrap_string_literal: Optional[bool] = None,
434
+ field_extra_keys_without_x_prefix: set[str] | None = None,
435
+ openapi_scopes: list[OpenAPIScope] | None = None,
436
+ include_path_parameters: bool = False,
437
+ graphql_scopes: list[GraphQLScope] | None = None, # noqa: ARG001
438
+ wrap_string_literal: bool | None = None,
227
439
  use_title_as_name: bool = False,
228
- http_headers: Optional[Sequence[Tuple[str, str]]] = None,
440
+ use_operation_id_as_name: bool = False,
441
+ use_unique_items_as_set: bool = False,
442
+ allof_merge_mode: AllOfMergeMode = AllOfMergeMode.Constraints,
443
+ http_headers: Sequence[tuple[str, str]] | None = None,
444
+ http_ignore_tls: bool = False,
229
445
  use_annotated: bool = False,
446
+ use_serialize_as_any: bool = False,
447
+ use_non_positive_negative_number_constrained_types: bool = False,
448
+ use_decimal_for_multiple_of: bool = False,
449
+ original_field_name_delimiter: str | None = None,
450
+ use_double_quotes: bool = False,
451
+ use_union_operator: bool = False,
452
+ collapse_root_models: bool = False,
453
+ skip_root_model: bool = False,
454
+ use_type_alias: bool = False,
455
+ special_field_name_prefix: str | None = None,
456
+ remove_special_field_name_prefix: bool = False,
457
+ capitalise_enum_members: bool = False,
458
+ keep_model_order: bool = False,
459
+ custom_file_header: str | None = None,
460
+ custom_file_header_path: Path | None = None,
461
+ custom_formatters: list[str] | None = None,
462
+ custom_formatters_kwargs: dict[str, Any] | None = None,
463
+ use_pendulum: bool = False,
464
+ http_query_parameters: Sequence[tuple[str, str]] | None = None,
465
+ treat_dot_as_module: bool = False,
466
+ use_exact_imports: bool = False,
467
+ union_mode: UnionMode | None = None,
468
+ output_datetime_class: DatetimeClassType | None = None,
469
+ keyword_only: bool = False,
470
+ frozen_dataclasses: bool = False,
471
+ no_alias: bool = False,
472
+ use_frozen_field: bool = False,
473
+ formatters: list[Formatter] = DEFAULT_FORMATTERS,
474
+ settings_path: Path | None = None,
475
+ parent_scoped_naming: bool = False,
476
+ dataclass_arguments: DataclassArguments | None = None,
477
+ disable_future_imports: bool = False,
478
+ type_mappings: list[str] | None = None,
479
+ read_only_write_only_model_type: ReadOnlyWriteOnlyModelType | None = None,
480
+ all_exports_scope: AllExportsScope | None = None,
481
+ all_exports_collision_strategy: AllExportsCollisionStrategy | None = None,
482
+ module_split_mode: ModuleSplitMode | None = None,
230
483
  ) -> None:
484
+ """Generate Python data models from schema definitions or structured data.
485
+
486
+ This is the main entry point for code generation. Supports OpenAPI, JSON Schema,
487
+ GraphQL, and raw data formats (JSON, YAML, Dict, CSV) as input.
488
+ """
231
489
  remote_text_cache: DefaultPutDict[str, str] = DefaultPutDict()
232
490
  if isinstance(input_, str):
233
- input_text: Optional[str] = input_
491
+ input_text: str | None = input_
234
492
  elif isinstance(input_, ParseResult):
235
- from datamodel_code_generator.http import get_body
493
+ from datamodel_code_generator.http import get_body # noqa: PLC0415
236
494
 
237
495
  input_text = remote_text_cache.get_or_put(
238
- input_.geturl(), default_factory=lambda url: get_body(url, http_headers)
496
+ input_.geturl(),
497
+ default_factory=lambda url: get_body(url, http_headers, http_ignore_tls, http_query_parameters),
239
498
  )
240
499
  else:
241
500
  input_text = None
242
501
 
502
+ if dataclass_arguments is None:
503
+ dataclass_arguments = {}
504
+ if frozen_dataclasses:
505
+ dataclass_arguments["frozen"] = True
506
+ if keyword_only:
507
+ dataclass_arguments["kw_only"] = True
508
+
243
509
  if isinstance(input_, Path) and not input_.is_absolute():
244
510
  input_ = input_.expanduser().resolve()
245
511
  if input_file_type == InputFileType.Auto:
246
512
  try:
247
513
  input_text_ = (
248
- get_first_file(input_).read_text(encoding=encoding)
249
- if isinstance(input_, Path)
250
- else input_text
514
+ get_first_file(input_).read_text(encoding=encoding) if isinstance(input_, Path) else input_text
251
515
  )
252
- input_file_type = (
253
- InputFileType.OpenAPI
254
- if is_openapi(input_text_) # type: ignore
255
- else InputFileType.JsonSchema
516
+ except FileNotFoundError as exc:
517
+ msg = "File not found"
518
+ raise Error(msg) from exc
519
+
520
+ try:
521
+ assert isinstance(input_text_, str)
522
+ input_file_type = infer_input_type(input_text_)
523
+ except Exception as exc:
524
+ msg = "Invalid file format"
525
+ raise Error(msg) from exc
526
+ else:
527
+ print( # noqa: T201
528
+ inferred_message.format(input_file_type.value),
529
+ file=sys.stderr,
256
530
  )
257
- except:
258
- raise Error('Invalid file format')
259
531
 
260
- kwargs: Dict[str, Any] = {}
261
- if input_file_type == InputFileType.OpenAPI:
262
- from datamodel_code_generator.parser.openapi import OpenAPIParser
532
+ kwargs: dict[str, Any] = {}
533
+ if input_file_type == InputFileType.OpenAPI: # noqa: PLR1702
534
+ from datamodel_code_generator.parser.openapi import OpenAPIParser # noqa: PLC0415
263
535
 
264
- parser_class: Type[Parser] = OpenAPIParser
265
- kwargs['openapi_scopes'] = openapi_scopes
536
+ parser_class: type[Parser] = OpenAPIParser
537
+ kwargs["openapi_scopes"] = openapi_scopes
538
+ kwargs["include_path_parameters"] = include_path_parameters
539
+ elif input_file_type == InputFileType.GraphQL:
540
+ from datamodel_code_generator.parser.graphql import GraphQLParser # noqa: PLC0415
541
+
542
+ parser_class: type[Parser] = GraphQLParser
266
543
  else:
267
- from datamodel_code_generator.parser.jsonschema import JsonSchemaParser
544
+ from datamodel_code_generator.parser.jsonschema import JsonSchemaParser # noqa: PLC0415
268
545
 
269
546
  parser_class = JsonSchemaParser
270
547
 
271
- if input_file_type in (
272
- InputFileType.Json,
273
- InputFileType.Yaml,
274
- InputFileType.Dict,
275
- InputFileType.CSV,
276
- ):
548
+ if input_file_type in RAW_DATA_TYPES:
549
+ import json # noqa: PLC0415
550
+
277
551
  try:
278
552
  if isinstance(input_, Path) and input_.is_dir(): # pragma: no cover
279
- raise Error(f'Input must be a file for {input_file_type}')
280
- obj: Dict[Any, Any]
553
+ msg = f"Input must be a file for {input_file_type}"
554
+ raise Error(msg) # noqa: TRY301
555
+ obj: dict[str, Any]
281
556
  if input_file_type == InputFileType.CSV:
282
- import csv
557
+ import csv # noqa: PLC0415
283
558
 
284
- def get_header_and_first_line(csv_file: IO[str]) -> Dict[str, Any]:
559
+ def get_header_and_first_line(csv_file: IO[str]) -> dict[str, Any]:
285
560
  csv_reader = csv.DictReader(csv_file)
286
- return dict(zip(csv_reader.fieldnames, next(csv_reader))) # type: ignore
561
+ assert csv_reader.fieldnames is not None
562
+ return dict(zip(csv_reader.fieldnames, next(csv_reader)))
287
563
 
288
564
  if isinstance(input_, Path):
289
565
  with input_.open(encoding=encoding) as f:
290
566
  obj = get_header_and_first_line(f)
291
567
  else:
292
- import io
568
+ import io # noqa: PLC0415
293
569
 
294
570
  obj = get_header_and_first_line(io.StringIO(input_text))
295
- else:
296
- obj = load_yaml(
297
- input_.read_text(encoding=encoding) # type: ignore
571
+ elif input_file_type == InputFileType.Yaml:
572
+ if isinstance(input_, Path):
573
+ obj = load_yaml_dict(input_.read_text(encoding=encoding))
574
+ else: # pragma: no cover
575
+ assert input_text is not None
576
+ obj = load_yaml_dict(input_text)
577
+ elif input_file_type == InputFileType.Json:
578
+ if isinstance(input_, Path):
579
+ obj = json.loads(input_.read_text(encoding=encoding))
580
+ else:
581
+ assert input_text is not None
582
+ obj = json.loads(input_text)
583
+ elif input_file_type == InputFileType.Dict:
584
+ import ast # noqa: PLC0415
585
+
586
+ # Input can be a dict object stored in a python file
587
+ obj = (
588
+ ast.literal_eval(input_.read_text(encoding=encoding))
298
589
  if isinstance(input_, Path)
299
- else input_text
590
+ else cast("dict[str, Any]", input_)
300
591
  )
301
- except:
302
- raise Error('Invalid file format')
303
- import json
592
+ else: # pragma: no cover
593
+ msg = f"Unsupported input file type: {input_file_type}"
594
+ raise Error(msg) # noqa: TRY301
595
+ except Exception as exc:
596
+ msg = "Invalid file format"
597
+ raise Error(msg) from exc
304
598
 
305
- from genson import SchemaBuilder
599
+ from genson import SchemaBuilder # noqa: PLC0415
306
600
 
307
601
  builder = SchemaBuilder()
308
602
  builder.add_object(obj)
309
603
  input_text = json.dumps(builder.to_schema())
310
604
 
605
+ if isinstance(input_, ParseResult) and input_file_type not in RAW_DATA_TYPES:
606
+ input_text = None
607
+
608
+ if union_mode is not None:
609
+ if output_model_type == DataModelType.PydanticV2BaseModel:
610
+ default_field_extras = {"union_mode": union_mode}
611
+ else: # pragma: no cover
612
+ msg = "union_mode is only supported for pydantic_v2.BaseModel"
613
+ raise Error(msg)
614
+ else:
615
+ default_field_extras = None
616
+
617
+ from datamodel_code_generator.model import get_data_model_types # noqa: PLC0415
618
+
619
+ data_model_types = get_data_model_types(output_model_type, target_python_version, use_type_alias=use_type_alias)
620
+
621
+ # Add GraphQL-specific model types if needed
622
+ if input_file_type == InputFileType.GraphQL:
623
+ kwargs["data_model_scalar_type"] = data_model_types.scalar_model
624
+ kwargs["data_model_union_type"] = data_model_types.union_model
625
+
626
+ source = input_text or input_
627
+ assert not isinstance(source, Mapping)
311
628
  parser = parser_class(
312
- source=input_ if isinstance(input_, ParseResult) else input_text or input_,
629
+ source=source,
630
+ data_model_type=data_model_types.data_model,
631
+ data_model_root_type=data_model_types.root_model,
632
+ data_model_field_type=data_model_types.field_model,
633
+ data_type_manager_type=data_model_types.data_type_manager,
313
634
  base_class=base_class,
635
+ additional_imports=additional_imports,
314
636
  custom_template_dir=custom_template_dir,
315
637
  extra_template_data=extra_template_data,
316
638
  target_python_version=target_python_version,
317
- dump_resolve_reference_action=dump_resolve_reference_action,
639
+ dump_resolve_reference_action=data_model_types.dump_resolve_reference_action,
318
640
  validation=validation,
319
641
  field_constraints=field_constraints,
320
642
  snake_case_field=snake_case_field,
321
643
  strip_default_none=strip_default_none,
322
644
  aliases=aliases,
323
645
  allow_population_by_field_name=allow_population_by_field_name,
646
+ allow_extra_fields=allow_extra_fields,
647
+ extra_fields=extra_fields,
324
648
  apply_default_values_for_required_fields=apply_default_values_for_required_fields,
325
649
  force_optional_for_required_fields=force_optional_for_required_fields,
326
650
  class_name=class_name,
327
651
  use_standard_collections=use_standard_collections,
328
- base_path=input_.parent
329
- if isinstance(input_, Path) and input_.is_file()
330
- else None,
652
+ base_path=input_.parent if isinstance(input_, Path) and input_.is_file() else None,
331
653
  use_schema_description=use_schema_description,
654
+ use_field_description=use_field_description,
655
+ use_attribute_docstrings=use_attribute_docstrings,
656
+ use_inline_field_description=use_inline_field_description,
657
+ use_default_kwarg=use_default_kwarg,
332
658
  reuse_model=reuse_model,
333
- enum_field_as_literal=enum_field_as_literal,
334
- set_default_enum_member=set_default_enum_member,
659
+ reuse_scope=reuse_scope,
660
+ shared_module_name=shared_module_name,
661
+ enum_field_as_literal=LiteralType.All
662
+ if output_model_type == DataModelType.TypingTypedDict
663
+ else enum_field_as_literal,
664
+ use_one_literal_as_default=use_one_literal_as_default,
665
+ use_enum_values_in_discriminator=use_enum_values_in_discriminator,
666
+ set_default_enum_member=True
667
+ if output_model_type == DataModelType.DataclassesDataclass
668
+ else set_default_enum_member,
669
+ use_subclass_enum=use_subclass_enum,
670
+ use_specialized_enum=use_specialized_enum,
335
671
  strict_nullable=strict_nullable,
336
672
  use_generic_container_types=use_generic_container_types,
337
673
  enable_faux_immutability=enable_faux_immutability,
@@ -342,35 +678,89 @@ def generate(
342
678
  custom_class_name_generator=custom_class_name_generator,
343
679
  field_extra_keys=field_extra_keys,
344
680
  field_include_all_keys=field_include_all_keys,
681
+ field_extra_keys_without_x_prefix=field_extra_keys_without_x_prefix,
345
682
  wrap_string_literal=wrap_string_literal,
346
683
  use_title_as_name=use_title_as_name,
684
+ use_operation_id_as_name=use_operation_id_as_name,
685
+ use_unique_items_as_set=use_unique_items_as_set,
686
+ allof_merge_mode=allof_merge_mode,
347
687
  http_headers=http_headers,
688
+ http_ignore_tls=http_ignore_tls,
348
689
  use_annotated=use_annotated,
690
+ use_serialize_as_any=use_serialize_as_any,
691
+ use_non_positive_negative_number_constrained_types=use_non_positive_negative_number_constrained_types,
692
+ use_decimal_for_multiple_of=use_decimal_for_multiple_of,
693
+ original_field_name_delimiter=original_field_name_delimiter,
694
+ use_double_quotes=use_double_quotes,
695
+ use_union_operator=use_union_operator,
696
+ collapse_root_models=collapse_root_models,
697
+ skip_root_model=skip_root_model,
698
+ use_type_alias=use_type_alias,
699
+ special_field_name_prefix=special_field_name_prefix,
700
+ remove_special_field_name_prefix=remove_special_field_name_prefix,
701
+ capitalise_enum_members=capitalise_enum_members,
702
+ keep_model_order=keep_model_order,
703
+ known_third_party=data_model_types.known_third_party,
704
+ custom_formatters=custom_formatters,
705
+ custom_formatters_kwargs=custom_formatters_kwargs,
706
+ use_pendulum=use_pendulum,
707
+ http_query_parameters=http_query_parameters,
708
+ treat_dot_as_module=treat_dot_as_module,
709
+ use_exact_imports=use_exact_imports,
710
+ default_field_extras=default_field_extras,
711
+ target_datetime_class=output_datetime_class,
712
+ keyword_only=keyword_only,
713
+ frozen_dataclasses=frozen_dataclasses,
714
+ no_alias=no_alias,
715
+ use_frozen_field=use_frozen_field,
716
+ formatters=formatters,
717
+ encoding=encoding,
718
+ parent_scoped_naming=parent_scoped_naming,
719
+ dataclass_arguments=dataclass_arguments,
720
+ type_mappings=type_mappings,
721
+ read_only_write_only_model_type=read_only_write_only_model_type,
349
722
  **kwargs,
350
723
  )
351
724
 
352
725
  with chdir(output):
353
- results = parser.parse()
726
+ results = parser.parse(
727
+ settings_path=settings_path,
728
+ disable_future_imports=disable_future_imports,
729
+ all_exports_scope=all_exports_scope,
730
+ all_exports_collision_strategy=all_exports_collision_strategy,
731
+ module_split_mode=module_split_mode,
732
+ )
354
733
  if not input_filename: # pragma: no cover
355
734
  if isinstance(input_, str):
356
- input_filename = '<stdin>'
735
+ input_filename = "<stdin>"
357
736
  elif isinstance(input_, ParseResult):
358
737
  input_filename = input_.geturl()
738
+ elif input_file_type == InputFileType.Dict:
739
+ # input_ might be a dict object provided directly, and missing a name field
740
+ input_filename = getattr(input_, "name", "<dict>")
359
741
  else:
742
+ assert isinstance(input_, Path)
360
743
  input_filename = input_.name
361
744
  if not results:
362
- raise Error('Models not found in the input data')
363
- elif isinstance(results, str):
364
-
365
- modules = {output: (results, input_filename)}
745
+ msg = "Models not found in the input data"
746
+ raise Error(msg)
747
+ if isinstance(results, str):
748
+ # Single-file output: body already contains future imports
749
+ # Only store future_imports separately if we have a non-empty custom_file_header
750
+ body = results
751
+ future_imports = ""
752
+ modules: dict[Path | None, tuple[str, str, str | None]] = {output: (body, future_imports, input_filename)}
366
753
  else:
367
754
  if output is None:
368
- raise Error('Modular references require an output directory')
755
+ msg = "Modular references require an output directory"
756
+ raise Error(msg)
369
757
  if output.suffix:
370
- raise Error('Modular references require an output directory, not a file')
758
+ msg = "Modular references require an output directory, not a file"
759
+ raise Error(msg)
371
760
  modules = {
372
761
  output.joinpath(*name): (
373
762
  result.body,
763
+ result.future_imports,
374
764
  str(result.source.as_posix() if result.source else input_filename),
375
765
  )
376
766
  for name, result in sorted(results.items())
@@ -378,29 +768,108 @@ def generate(
378
768
 
379
769
  timestamp = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
380
770
 
381
- header = '''\
771
+ if custom_file_header is None and custom_file_header_path:
772
+ custom_file_header = custom_file_header_path.read_text(encoding=encoding)
773
+
774
+ header = """\
382
775
  # generated by datamodel-codegen:
383
- # filename: {}'''
776
+ # filename: {}"""
384
777
  if not disable_timestamp:
385
- header += f'\n# timestamp: {timestamp}'
386
-
387
- file: Optional[IO[Any]]
388
- for path, body_and_filename in modules.items():
389
- body, filename = body_and_filename
778
+ header += f"\n# timestamp: {timestamp}"
779
+ if enable_version_header:
780
+ header += f"\n# version: {get_version()}"
781
+ if enable_command_header and command_line:
782
+ safe_command_line = command_line.replace("\n", " ").replace("\r", " ")
783
+ header += f"\n# command: {safe_command_line}"
784
+
785
+ file: IO[Any] | None
786
+ for path, (body, future_imports, filename) in modules.items():
390
787
  if path is None:
391
788
  file = None
392
789
  else:
393
790
  if not path.parent.exists():
394
791
  path.parent.mkdir(parents=True)
395
- file = path.open('wt', encoding=encoding)
396
-
397
- print(header.format(filename), file=file)
398
- if body:
399
- print('', file=file)
400
- print(body.rstrip(), file=file)
792
+ file = path.open("wt", encoding=encoding)
793
+
794
+ safe_filename = filename.replace("\n", " ").replace("\r", " ") if filename else ""
795
+ effective_header = custom_file_header or header.format(safe_filename)
796
+
797
+ if custom_file_header and body:
798
+ # Extract future imports from body for correct placement after custom_file_header
799
+ body_without_future = body
800
+ extracted_future = future_imports # Use pre-extracted if available
801
+ lines = body.split("\n")
802
+ future_indices = [i for i, line in enumerate(lines) if line.strip().startswith("from __future__")]
803
+ if future_indices:
804
+ if not extracted_future:
805
+ # Extract future imports from body
806
+ extracted_future = "\n".join(lines[i] for i in future_indices)
807
+ remaining_lines = [line for i, line in enumerate(lines) if i not in future_indices]
808
+ body_without_future = "\n".join(remaining_lines).lstrip("\n")
809
+
810
+ if extracted_future:
811
+ insertion_point = _find_future_import_insertion_point(custom_file_header)
812
+ header_before = custom_file_header[:insertion_point].rstrip()
813
+ header_after = custom_file_header[insertion_point:].strip()
814
+ if header_after:
815
+ content = header_before + "\n" + extracted_future + "\n\n" + header_after
816
+ else:
817
+ content = header_before + "\n\n" + extracted_future
818
+ print(content, file=file)
819
+ print(file=file)
820
+ print(body_without_future.rstrip(), file=file)
821
+ else:
822
+ print(effective_header, file=file)
823
+ print(file=file)
824
+ print(body.rstrip(), file=file)
825
+ else:
826
+ # Body already contains future imports, just print as-is
827
+ print(effective_header, file=file)
828
+ if body:
829
+ print(file=file)
830
+ print(body.rstrip(), file=file)
401
831
 
402
832
  if file is not None:
403
833
  file.close()
404
834
 
405
835
 
406
- __all__ = ['DefaultPutDict', 'LiteralType', 'PythonVersion']
836
+ def infer_input_type(text: str) -> InputFileType:
837
+ """Automatically detect the input file type from text content."""
838
+ try:
839
+ data = load_yaml(text)
840
+ except yaml.parser.ParserError:
841
+ return InputFileType.CSV
842
+ if isinstance(data, dict):
843
+ if is_openapi(data):
844
+ return InputFileType.OpenAPI
845
+ if is_schema(data):
846
+ return InputFileType.JsonSchema
847
+ return InputFileType.Json
848
+ msg = (
849
+ "Can't infer input file type from the input data. "
850
+ "Please specify the input file type explicitly with --input-file-type option."
851
+ )
852
+ raise Error(msg)
853
+
854
+
855
+ inferred_message = (
856
+ "The input file type was determined to be: {}\nThis can be specified explicitly with the "
857
+ "`--input-file-type` option."
858
+ )
859
+
860
+ __all__ = [
861
+ "MAX_VERSION",
862
+ "MIN_VERSION",
863
+ "AllExportsCollisionStrategy",
864
+ "AllExportsScope",
865
+ "DatetimeClassType",
866
+ "DefaultPutDict",
867
+ "Error",
868
+ "InputFileType",
869
+ "InvalidClassNameError",
870
+ "LiteralType",
871
+ "ModuleSplitMode",
872
+ "PythonVersion",
873
+ "ReadOnlyWriteOnlyModelType",
874
+ "generate",
875
+ ]