django-cfg 1.4.71__py3-none-any.whl → 1.4.73__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of django-cfg might be problematic. Click here for more details.

django_cfg/__init__.py CHANGED
@@ -32,7 +32,7 @@ Example:
32
32
  default_app_config = "django_cfg.apps.DjangoCfgConfig"
33
33
 
34
34
  # Version information
35
- __version__ = "1.4.71"
35
+ __version__ = "1.4.73"
36
36
  __license__ = "MIT"
37
37
 
38
38
  # Import registry for organized lazy loading
@@ -6,7 +6,6 @@ Enhanced inline classes with better organization and conditional loading.
6
6
 
7
7
  from unfold.admin import TabularInline
8
8
 
9
- from django_cfg.apps.support.models import Ticket
10
9
  from django_cfg.modules.base import BaseCfgModule
11
10
 
12
11
  from ..models import UserActivity, UserRegistrationSource
@@ -135,6 +134,7 @@ class UserSupportTicketsInline(TabularInline):
135
134
 
136
135
  # Only import if support is enabled
137
136
  if base_module.is_support_enabled():
137
+ from django_cfg.apps.support.models import Ticket
138
138
  self.model = Ticket
139
139
  except (ImportError, Exception):
140
140
  # Support app not available or not enabled
@@ -262,6 +262,8 @@ class CustomUserAdmin(BaseUserAdmin, PydanticAdmin):
262
262
  @computed_field("Emails")
263
263
  def emails_count(self, obj):
264
264
  """Show count of emails sent to user (if newsletter app is enabled)."""
265
+ from django.db.utils import ProgrammingError, OperationalError
266
+
265
267
  try:
266
268
  base_module = BaseCfgModule()
267
269
 
@@ -278,12 +280,17 @@ class CustomUserAdmin(BaseUserAdmin, PydanticAdmin):
278
280
  variant="success",
279
281
  icon=Icons.EMAIL
280
282
  )
283
+ except (ProgrammingError, OperationalError):
284
+ # Table doesn't exist in database
285
+ return None
281
286
  except (ImportError, Exception):
282
287
  return None
283
288
 
284
289
  @computed_field("Tickets")
285
290
  def tickets_count(self, obj):
286
291
  """Show count of support tickets for user (if support app is enabled)."""
292
+ from django.db.utils import ProgrammingError, OperationalError
293
+
287
294
  try:
288
295
  base_module = BaseCfgModule()
289
296
 
@@ -300,6 +307,9 @@ class CustomUserAdmin(BaseUserAdmin, PydanticAdmin):
300
307
  variant="warning",
301
308
  icon=Icons.SUPPORT_AGENT
302
309
  )
310
+ except (ProgrammingError, OperationalError):
311
+ # Table doesn't exist in database
312
+ return None
303
313
  except (ImportError, Exception):
304
314
  return None
305
315
 
@@ -19,7 +19,7 @@ from .config import (
19
19
  )
20
20
 
21
21
  # Generators
22
- from .generator import GoGenerator, PythonGenerator, TypeScriptGenerator
22
+ from .generator import GoGenerator, ProtoGenerator, PythonGenerator, TypeScriptGenerator
23
23
 
24
24
  # Groups
25
25
  from .groups import GroupDetector, GroupManager
@@ -53,4 +53,5 @@ __all__ = [
53
53
  "PythonGenerator",
54
54
  "TypeScriptGenerator",
55
55
  "GoGenerator",
56
+ "ProtoGenerator",
56
57
  ]
@@ -34,6 +34,8 @@ class ArchiveManager:
34
34
  group_name: str,
35
35
  python_dir: Optional[Path] = None,
36
36
  typescript_dir: Optional[Path] = None,
37
+ go_dir: Optional[Path] = None,
38
+ proto_dir: Optional[Path] = None,
37
39
  ) -> Dict:
38
40
  """
39
41
  Archive generated clients.
@@ -42,6 +44,8 @@ class ArchiveManager:
42
44
  group_name: Name of the group
43
45
  python_dir: Python client directory
44
46
  typescript_dir: TypeScript client directory
47
+ go_dir: Go client directory
48
+ proto_dir: Protocol Buffer definitions directory
45
49
 
46
50
  Returns:
47
51
  Archive result dictionary
@@ -65,6 +69,16 @@ class ArchiveManager:
65
69
  shutil.copytree(typescript_dir, dest, dirs_exist_ok=True)
66
70
  copied["typescript"] = str(dest)
67
71
 
72
+ if go_dir and go_dir.exists():
73
+ dest = archive_path / "go"
74
+ shutil.copytree(go_dir, dest, dirs_exist_ok=True)
75
+ copied["go"] = str(dest)
76
+
77
+ if proto_dir and proto_dir.exists():
78
+ dest = archive_path / "proto"
79
+ shutil.copytree(proto_dir, dest, dirs_exist_ok=True)
80
+ copied["proto"] = str(dest)
81
+
68
82
  # Create metadata
69
83
  metadata = {
70
84
  "group": group_name,
@@ -25,6 +25,7 @@ from typing import Literal
25
25
  from ..ir import IRContext
26
26
  from .base import GeneratedFile
27
27
  from .go import GoGenerator
28
+ from .proto import ProtoGenerator
28
29
  from .python import PythonGenerator
29
30
  from .typescript import TypeScriptGenerator
30
31
 
@@ -32,10 +33,12 @@ __all__ = [
32
33
  "PythonGenerator",
33
34
  "TypeScriptGenerator",
34
35
  "GoGenerator",
36
+ "ProtoGenerator",
35
37
  "GeneratedFile",
36
38
  "generate_python",
37
39
  "generate_typescript",
38
40
  "generate_go",
41
+ "generate_proto",
39
42
  "generate_client",
40
43
  ]
41
44
 
@@ -122,9 +125,41 @@ def generate_go(context: IRContext, output_dir: Path | None = None, **kwargs) ->
122
125
  return files
123
126
 
124
127
 
128
+ def generate_proto(context: IRContext, output_dir: Path | None = None, **kwargs) -> list[GeneratedFile]:
129
+ """
130
+ Generate Protocol Buffer definitions from IR.
131
+
132
+ Args:
133
+ context: IRContext from parser
134
+ output_dir: Optional output directory (saves files if provided)
135
+ **kwargs: Additional options (split_files, package_name, etc.)
136
+
137
+ Returns:
138
+ List of GeneratedFile objects
139
+
140
+ Examples:
141
+ >>> files = generate_proto(context)
142
+ >>> # Or save directly
143
+ >>> files = generate_proto(context, output_dir=Path("./generated/proto"))
144
+ >>> # With custom settings
145
+ >>> files = generate_proto(
146
+ ... context,
147
+ ... split_files=False, # Single api.proto file
148
+ ... package_name="myapi.v1"
149
+ ... )
150
+ """
151
+ generator = ProtoGenerator(context, **kwargs)
152
+ files = generator.generate()
153
+
154
+ if output_dir:
155
+ generator.save_files(files, output_dir)
156
+
157
+ return files
158
+
159
+
125
160
  def generate_client(
126
161
  context: IRContext,
127
- language: Literal["python", "typescript", "go"],
162
+ language: Literal["python", "typescript", "go", "proto"],
128
163
  output_dir: Path | None = None,
129
164
  **kwargs,
130
165
  ) -> list[GeneratedFile]:
@@ -133,7 +168,7 @@ def generate_client(
133
168
 
134
169
  Args:
135
170
  context: IRContext from parser
136
- language: Target language ('python', 'typescript', or 'go')
171
+ language: Target language ('python', 'typescript', 'go', or 'proto')
137
172
  output_dir: Optional output directory
138
173
  **kwargs: Additional language-specific options
139
174
 
@@ -144,6 +179,7 @@ def generate_client(
144
179
  >>> files = generate_client(context, "python")
145
180
  >>> files = generate_client(context, "typescript", Path("./generated"))
146
181
  >>> files = generate_client(context, "go", Path("./generated"), generate_package_files=True)
182
+ >>> files = generate_client(context, "proto", Path("./generated"), split_files=False)
147
183
  """
148
184
  if language == "python":
149
185
  return generate_python(context, output_dir)
@@ -151,5 +187,7 @@ def generate_client(
151
187
  return generate_typescript(context, output_dir)
152
188
  elif language == "go":
153
189
  return generate_go(context, output_dir, **kwargs)
190
+ elif language == "proto":
191
+ return generate_proto(context, output_dir, **kwargs)
154
192
  else:
155
193
  raise ValueError(f"Unsupported language: {language}")
@@ -0,0 +1,17 @@
1
+ """
2
+ Proto Generator Module - Protocol Buffer/gRPC code generation.
3
+
4
+ Generates .proto files from OpenAPI/IR for gRPC client generation.
5
+ """
6
+
7
+ from .generator import ProtoGenerator
8
+ from .messages_generator import ProtoMessagesGenerator
9
+ from .services_generator import ProtoServicesGenerator
10
+ from .type_mapper import ProtoTypeMapper
11
+
12
+ __all__ = [
13
+ "ProtoGenerator",
14
+ "ProtoTypeMapper",
15
+ "ProtoMessagesGenerator",
16
+ "ProtoServicesGenerator",
17
+ ]
@@ -0,0 +1,461 @@
1
+ """
2
+ Proto Generator - Main Protocol Buffer code generator.
3
+
4
+ Generates .proto files from IR (Intermediate Representation) for gRPC client generation.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ from ..base import BaseGenerator, GeneratedFile
12
+ from .messages_generator import ProtoMessagesGenerator
13
+ from .services_generator import ProtoServicesGenerator
14
+ from .type_mapper import ProtoTypeMapper
15
+
16
+ if TYPE_CHECKING:
17
+ from django_cfg.modules.django_client.core.ir import IRContext, IROperationObject, IRSchemaObject
18
+
19
+
20
+ class ProtoGenerator(BaseGenerator):
21
+ """
22
+ Protocol Buffer generator for gRPC clients.
23
+
24
+ Generates:
25
+ - messages.proto: Message definitions (models)
26
+ - services.proto: Service and RPC definitions (API endpoints)
27
+ - Or combined api.proto with both messages and services
28
+
29
+ The generated .proto files can be used with protoc to generate:
30
+ - Python gRPC client (via grpc_tools.protoc)
31
+ - Go gRPC client (via protoc-gen-go and protoc-gen-go-grpc)
32
+ - TypeScript gRPC client (via protoc-gen-ts)
33
+ - Any other language with protoc support
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ context: IRContext,
39
+ split_files: bool = True,
40
+ package_name: str | None = None,
41
+ **kwargs,
42
+ ):
43
+ """
44
+ Initialize Proto generator.
45
+
46
+ Args:
47
+ context: IRContext from parser
48
+ split_files: If True, generate separate messages.proto and services.proto
49
+ If False, generate single api.proto
50
+ package_name: Proto package name (e.g., "myapi.v1")
51
+ Defaults to "api.v1"
52
+ **kwargs: Additional arguments passed to BaseGenerator
53
+ """
54
+ super().__init__(context, **kwargs)
55
+
56
+ self.split_files = split_files
57
+ self.package_name = package_name or "api.v1"
58
+
59
+ # Initialize sub-generators
60
+ self.type_mapper = ProtoTypeMapper()
61
+ self.messages_generator = ProtoMessagesGenerator(self.type_mapper)
62
+ self.services_generator = ProtoServicesGenerator(self.type_mapper, context)
63
+
64
+ def generate(self) -> list[GeneratedFile]:
65
+ """
66
+ Generate all proto files.
67
+
68
+ Returns:
69
+ List of GeneratedFile objects organized by service/tag
70
+ """
71
+ files = []
72
+
73
+ # Group operations by tag (similar to other generators)
74
+ ops_by_tag = self.group_operations_by_tag()
75
+
76
+ # Generate proto files for each tag/service
77
+ for tag, operations in sorted(ops_by_tag.items()):
78
+ folder_name = self.tag_and_app_to_folder_name(tag, operations)
79
+
80
+ # Get schemas used by this service
81
+ service_schemas = self._get_schemas_for_operations(operations)
82
+
83
+ # Generate messages.proto for this service
84
+ messages_file = self._generate_service_messages_file(
85
+ folder_name, tag, service_schemas
86
+ )
87
+ files.append(messages_file)
88
+
89
+ # Generate service.proto for this service
90
+ service_file = self._generate_service_file(
91
+ folder_name, tag, operations
92
+ )
93
+ files.append(service_file)
94
+
95
+ # Generate root README.md with protoc compilation instructions
96
+ readme_file = self._generate_readme_file(ops_by_tag)
97
+ files.append(readme_file)
98
+
99
+ return files
100
+
101
+ def _get_schemas_for_operations(self, operations: list[IROperationObject]) -> dict[str, IRSchemaObject]:
102
+ """
103
+ Get all schemas used by given operations.
104
+
105
+ This resolves all schema dependencies to ensure nested schemas are included.
106
+ """
107
+ schemas = {}
108
+
109
+ def add_schema(schema_name: str):
110
+ """Recursively add schema and its dependencies."""
111
+ if schema_name in schemas or schema_name not in self.context.schemas:
112
+ return
113
+
114
+ schema = self.context.schemas[schema_name]
115
+ schemas[schema_name] = schema
116
+
117
+ # Recursively add referenced schemas
118
+ if schema.properties:
119
+ for prop_schema in schema.properties.values():
120
+ if prop_schema.ref and prop_schema.ref in self.context.schemas:
121
+ add_schema(prop_schema.ref)
122
+ elif prop_schema.type == "array" and prop_schema.items:
123
+ if prop_schema.items.ref:
124
+ add_schema(prop_schema.items.ref)
125
+
126
+ for operation in operations:
127
+ # Request body schemas
128
+ if operation.request_body and operation.request_body.schema_name:
129
+ schema_name = operation.request_body.schema_name
130
+ # Check in schemas, request_models, and response_models
131
+ if schema_name in self.context.schemas:
132
+ add_schema(schema_name)
133
+ elif schema_name in self.context.request_models:
134
+ schemas[schema_name] = self.context.request_models[schema_name]
135
+
136
+ # Patch request body schemas (important for PATCH operations!)
137
+ if hasattr(operation, 'patch_request_body') and operation.patch_request_body and operation.patch_request_body.schema_name:
138
+ schema_name = operation.patch_request_body.schema_name
139
+ # Check in schemas, request_models, patch_models
140
+ if schema_name in self.context.schemas:
141
+ add_schema(schema_name)
142
+ elif schema_name in self.context.patch_models:
143
+ schemas[schema_name] = self.context.patch_models[schema_name]
144
+ elif schema_name in self.context.request_models:
145
+ schemas[schema_name] = self.context.request_models[schema_name]
146
+
147
+ # Response schemas
148
+ for response in operation.responses.values():
149
+ if response.schema_name:
150
+ schema_name = response.schema_name
151
+ if schema_name in self.context.schemas:
152
+ add_schema(schema_name)
153
+ elif schema_name in self.context.response_models:
154
+ schemas[schema_name] = self.context.response_models[schema_name]
155
+
156
+ # Parameter schemas (if they reference components)
157
+ for param in operation.parameters:
158
+ if hasattr(param, 'schema_name') and param.schema_name:
159
+ if param.schema_name in self.context.schemas:
160
+ add_schema(param.schema_name)
161
+
162
+ return schemas
163
+
164
+ def _generate_service_messages_file(
165
+ self, folder_name: str, tag: str, schemas: dict[str, IRSchemaObject]
166
+ ) -> GeneratedFile:
167
+ """Generate messages.proto file for a specific service."""
168
+ # Generate message definitions for these schemas
169
+ self.messages_generator.generate_all_messages(schemas)
170
+ messages_content = self.messages_generator.get_all_definitions()
171
+
172
+ # Build proto file content
173
+ content = self._build_proto_header(f"{folder_name}/messages.proto", tag)
174
+
175
+ if messages_content:
176
+ content += "\n\n" + messages_content
177
+
178
+ return GeneratedFile(
179
+ path=f"{folder_name}/messages.proto",
180
+ content=content,
181
+ description=f"Protocol Buffer message definitions for {tag}",
182
+ )
183
+
184
+ def _generate_service_file(
185
+ self, folder_name: str, tag: str, operations: list[IROperationObject]
186
+ ) -> GeneratedFile:
187
+ """Generate service.proto file for a specific service."""
188
+ # Generate service definitions from operations
189
+ service_definitions = self.services_generator.generate_all_services(operations)
190
+
191
+ # Build proto file content
192
+ content = self._build_proto_header(f"{folder_name}/service.proto", tag)
193
+ # Import messages.proto from the same folder
194
+ content += f'\nimport "{folder_name}/messages.proto";\n'
195
+
196
+ # Add all service definitions
197
+ for service_name, service_def in service_definitions.items():
198
+ content += "\n\n" + service_def
199
+
200
+ return GeneratedFile(
201
+ path=f"{folder_name}/service.proto",
202
+ content=content,
203
+ description=f"gRPC service definitions for {tag}",
204
+ )
205
+
206
+ def _generate_readme_file(self, ops_by_tag: dict[str, list[IROperationObject]]) -> GeneratedFile:
207
+ """Generate README.md with protoc compilation instructions."""
208
+ lines = [
209
+ "# Protocol Buffer Definitions",
210
+ "",
211
+ f"Generated from OpenAPI specification for package `{self.package_name}`",
212
+ "",
213
+ "## Structure",
214
+ "",
215
+ "Each service has its own folder containing:",
216
+ "- `messages.proto` - Message definitions (models)",
217
+ "- `service.proto` - Service and RPC definitions",
218
+ "",
219
+ "## Services",
220
+ "",
221
+ ]
222
+
223
+ for tag in sorted(ops_by_tag.keys()):
224
+ operations = ops_by_tag[tag]
225
+ folder_name = self.tag_and_app_to_folder_name(tag, operations)
226
+ lines.append(f"- **{tag}**: `{folder_name}/` ({len(operations)} operations)")
227
+
228
+ lines.extend([
229
+ "",
230
+ "## Compilation",
231
+ "",
232
+ "### Python (grpc_tools)",
233
+ "```bash",
234
+ "# Install dependencies",
235
+ "pip install grpcio grpcio-tools",
236
+ "",
237
+ "# Compile each service",
238
+ ])
239
+
240
+ for tag in sorted(ops_by_tag.keys()):
241
+ operations = ops_by_tag[tag]
242
+ folder_name = self.tag_and_app_to_folder_name(tag, operations)
243
+ lines.append(f"python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. {folder_name}/*.proto")
244
+
245
+ lines.extend([
246
+ "```",
247
+ "",
248
+ "### Go",
249
+ "```bash",
250
+ "# Install dependencies",
251
+ "go install google.golang.org/protobuf/cmd/protoc-gen-go@latest",
252
+ "go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest",
253
+ "",
254
+ "# Compile each service",
255
+ ])
256
+
257
+ for tag in sorted(ops_by_tag.keys()):
258
+ operations = ops_by_tag[tag]
259
+ folder_name = self.tag_and_app_to_folder_name(tag, operations)
260
+ lines.append(f"protoc -I. --go_out=. --go-grpc_out=. {folder_name}/*.proto")
261
+
262
+ lines.extend([
263
+ "```",
264
+ "",
265
+ "### TypeScript (ts-proto)",
266
+ "```bash",
267
+ "# Install dependencies",
268
+ "npm install ts-proto",
269
+ "",
270
+ "# Compile each service",
271
+ ])
272
+
273
+ for tag in sorted(ops_by_tag.keys()):
274
+ operations = ops_by_tag[tag]
275
+ folder_name = self.tag_and_app_to_folder_name(tag, operations)
276
+ lines.append(f"protoc -I. --plugin=./node_modules/.bin/protoc-gen-ts_proto --ts_proto_out=. {folder_name}/*.proto")
277
+
278
+ lines.extend([
279
+ "```",
280
+ "",
281
+ "## Usage Example",
282
+ "",
283
+ "After compilation, you can use the generated clients in your application.",
284
+ "",
285
+ "### Python",
286
+ "```python",
287
+ "import grpc",
288
+ f"from {self.package_name.replace('.', '_')} import service_pb2, service_pb2_grpc",
289
+ "",
290
+ "# Create channel",
291
+ "channel = grpc.insecure_channel('localhost:50051')",
292
+ "",
293
+ "# Create stub",
294
+ "stub = service_pb2_grpc.YourServiceStub(channel)",
295
+ "",
296
+ "# Make request",
297
+ "request = service_pb2.YourRequest(field='value')",
298
+ "response = stub.YourMethod(request)",
299
+ "```",
300
+ "",
301
+ "---",
302
+ "",
303
+ "*Generated by django-cfg django_client module*",
304
+ ])
305
+
306
+ return GeneratedFile(
307
+ path="README.md",
308
+ content="\n".join(lines),
309
+ description="Protocol Buffer compilation and usage instructions",
310
+ )
311
+
312
+ def _generate_messages_file(self) -> GeneratedFile:
313
+ """Generate messages.proto file with all message definitions."""
314
+ # Collect all schemas from context
315
+ all_schemas = {
316
+ **self.context.schemas,
317
+ **self.context.request_models,
318
+ **self.context.response_models,
319
+ **self.context.patch_models,
320
+ }
321
+
322
+ # Generate message definitions
323
+ self.messages_generator.generate_all_messages(all_schemas)
324
+ messages_content = self.messages_generator.get_all_definitions()
325
+
326
+ # Build proto file content
327
+ content = self._build_proto_header("messages.proto")
328
+
329
+ if messages_content:
330
+ content += "\n\n" + messages_content
331
+
332
+ return GeneratedFile(
333
+ path="messages.proto",
334
+ content=content,
335
+ description="Protocol Buffer message definitions",
336
+ )
337
+
338
+ def _generate_services_file(self) -> GeneratedFile:
339
+ """Generate services.proto file with all service definitions."""
340
+ # Generate service definitions from operations
341
+ operations = list(self.context.operations.values())
342
+ service_definitions = self.services_generator.generate_all_services(operations)
343
+
344
+ # Build proto file content
345
+ content = self._build_proto_header("services.proto")
346
+ content += '\nimport "messages.proto";\n'
347
+
348
+ # Add all service definitions
349
+ for service_name, service_def in service_definitions.items():
350
+ content += "\n\n" + service_def
351
+
352
+ return GeneratedFile(
353
+ path="services.proto",
354
+ content=content,
355
+ description="gRPC service definitions",
356
+ )
357
+
358
+ def _generate_combined_file(self) -> GeneratedFile:
359
+ """Generate single api.proto file with both messages and services."""
360
+ # Collect all schemas
361
+ all_schemas = {
362
+ **self.context.schemas,
363
+ **self.context.request_models,
364
+ **self.context.response_models,
365
+ **self.context.patch_models,
366
+ }
367
+
368
+ # Generate message definitions
369
+ self.messages_generator.generate_all_messages(all_schemas)
370
+ messages_content = self.messages_generator.get_all_definitions()
371
+
372
+ # Generate service definitions
373
+ operations = list(self.context.operations.values())
374
+ service_definitions = self.services_generator.generate_all_services(operations)
375
+
376
+ # Build combined proto file
377
+ content = self._build_proto_header("api.proto")
378
+
379
+ # Add messages first
380
+ if messages_content:
381
+ content += "\n\n// ===== Messages =====\n\n"
382
+ content += messages_content
383
+
384
+ # Add services
385
+ if service_definitions:
386
+ content += "\n\n// ===== Services =====\n\n"
387
+ for service_name, service_def in service_definitions.items():
388
+ content += service_def + "\n\n"
389
+
390
+ return GeneratedFile(
391
+ path="api.proto",
392
+ content=content,
393
+ description="Combined Protocol Buffer definitions",
394
+ )
395
+
396
+ def _build_proto_header(self, file_name: str, tag: str | None = None) -> str:
397
+ """
398
+ Build proto file header with syntax, package, and imports.
399
+
400
+ Args:
401
+ file_name: Name of the proto file
402
+ tag: Optional service tag for package naming
403
+
404
+ Returns:
405
+ Header string with syntax declaration, package, and imports
406
+ """
407
+ # Use tag-specific package if provided
408
+ package_name = f"{self.package_name}.{self.tag_to_property_name(tag)}" if tag else self.package_name
409
+
410
+ lines = [
411
+ f'// {file_name}',
412
+ '// Generated by django-cfg django_client module',
413
+ '// DO NOT EDIT - This file is auto-generated',
414
+ '',
415
+ 'syntax = "proto3";',
416
+ '',
417
+ f'package {package_name};',
418
+ '',
419
+ ]
420
+
421
+ # Add required imports
422
+ imports = self.type_mapper.get_required_imports()
423
+ if imports:
424
+ for import_path in imports:
425
+ lines.append(f'import "{import_path}";')
426
+ lines.append('')
427
+
428
+ return '\n'.join(lines)
429
+
430
+ # ===== Abstract Method Implementations (Not used for proto) =====
431
+
432
+ def generate_schema(self, schema: IRSchemaObject) -> str:
433
+ """
434
+ Generate proto message for a single schema.
435
+
436
+ Note: This is called by BaseGenerator abstract method requirement,
437
+ but proto generation works differently - we generate all messages at once.
438
+ """
439
+ return self.messages_generator.generate_message(schema)
440
+
441
+ def generate_enum(self, schema: IRSchemaObject) -> str:
442
+ """
443
+ Generate proto enum from schema.
444
+
445
+ Note: This is called by BaseGenerator abstract method requirement,
446
+ but enums are generated as part of message generation in proto.
447
+ """
448
+ if not schema.enum or not schema.name:
449
+ return ""
450
+
451
+ return self.messages_generator._generate_enum(schema, schema.name)
452
+
453
+ def generate_operation(self, operation: IROperationObject) -> str:
454
+ """
455
+ Generate RPC definition for a single operation.
456
+
457
+ Note: This is called by BaseGenerator abstract method requirement,
458
+ but proto generation works differently - we generate all services at once.
459
+ """
460
+ service_name, definitions = self.services_generator.generate_rpc(operation)
461
+ return '\n'.join(definitions)