framework-m-studio 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,406 @@
1
+ """DocType Transformer - LibCST-based file transformer.
2
+
3
+ This module provides file-level operations for updating DocType Python files:
4
+ - Parse existing files with LibCST
5
+ - Update/add/remove fields while preserving formatting
6
+ - Handle edge cases: field renames, type changes, deletions
7
+ - Preserve comments and custom methods
8
+
9
+ Usage:
10
+ from framework_m_studio.codegen.transformer import update_doctype
11
+
12
+ update_doctype(
13
+ file_path="src/doctypes/todo.py",
14
+ schema={"name": "Todo", "fields": [...]},
15
+ )
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from pathlib import Path
21
+ from typing import Any, cast
22
+
23
+ import libcst as cst
24
+
25
+ # =============================================================================
26
+ # LibCST Transformer for DocType Updates
27
+ # =============================================================================
28
+
29
+
30
+ class DocTypeTransformer(cst.CSTTransformer):
31
+ """LibCST transformer to update DocType class fields.
32
+
33
+ Handles:
34
+ - Adding new fields
35
+ - Updating existing field types/defaults
36
+ - Removing deleted fields
37
+ - Renaming fields
38
+ - Preserving comments and custom methods
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ target_class: str,
44
+ new_fields: dict[str, dict[str, Any]],
45
+ fields_to_remove: set[str] | None = None,
46
+ field_renames: dict[str, str] | None = None,
47
+ ) -> None:
48
+ """Initialize transformer.
49
+
50
+ Args:
51
+ target_class: Name of the DocType class to update
52
+ new_fields: Dict of field_name -> field_schema
53
+ fields_to_remove: Set of field names to remove
54
+ field_renames: Dict of old_name -> new_name for renames
55
+ """
56
+ self.target_class = target_class
57
+ self.new_fields = new_fields
58
+ self.fields_to_remove = fields_to_remove or set()
59
+ self.field_renames = field_renames or {}
60
+
61
+ self.in_target_class = False
62
+ self.existing_fields: set[str] = set()
63
+ self._class_body_updated = False
64
+
65
+ def visit_ClassDef(self, node: cst.ClassDef) -> bool:
66
+ """Track when inside target class."""
67
+ if node.name.value == self.target_class:
68
+ self.in_target_class = True
69
+ return True
70
+
71
+ def leave_ClassDef(
72
+ self,
73
+ original_node: cst.ClassDef,
74
+ updated_node: cst.ClassDef,
75
+ ) -> cst.ClassDef:
76
+ """Add new fields when leaving target class."""
77
+ if original_node.name.value != self.target_class:
78
+ return updated_node
79
+
80
+ self.in_target_class = False
81
+
82
+ # Find fields that need to be added (not already existing)
83
+ fields_to_add = [
84
+ (name, schema)
85
+ for name, schema in self.new_fields.items()
86
+ if name not in self.existing_fields
87
+ ]
88
+
89
+ if not fields_to_add:
90
+ return updated_node
91
+
92
+ # Generate new field statements
93
+ new_statements: list[cst.SimpleStatementLine] = []
94
+ for field_name, field_schema in fields_to_add:
95
+ stmt = self._create_field_statement(field_name, field_schema)
96
+ new_statements.append(stmt)
97
+
98
+ # Find insertion point (after last field, before methods)
99
+ body_list = cast(list[cst.BaseStatement], list(updated_node.body.body))
100
+ insert_index = self._find_field_insert_index(body_list)
101
+
102
+ # Insert new fields
103
+ for i, stmt in enumerate(new_statements):
104
+ body_list.insert(insert_index + i, stmt)
105
+
106
+ return updated_node.with_changes(
107
+ body=updated_node.body.with_changes(body=body_list)
108
+ )
109
+
110
+ def leave_SimpleStatementLine(
111
+ self,
112
+ original_node: cst.SimpleStatementLine,
113
+ updated_node: cst.SimpleStatementLine,
114
+ ) -> cst.SimpleStatementLine | cst.RemovalSentinel:
115
+ """Handle field updates and removals."""
116
+ if not self.in_target_class:
117
+ return updated_node
118
+
119
+ # Check if this is an annotated assignment (field)
120
+ if not updated_node.body:
121
+ return updated_node
122
+
123
+ stmt = updated_node.body[0]
124
+ if not isinstance(stmt, cst.AnnAssign):
125
+ return updated_node
126
+
127
+ if not isinstance(stmt.target, cst.Name):
128
+ return updated_node
129
+
130
+ field_name = stmt.target.value
131
+
132
+ # Skip private fields
133
+ if field_name.startswith("_"):
134
+ return updated_node
135
+
136
+ self.existing_fields.add(field_name)
137
+
138
+ # Handle removal
139
+ if field_name in self.fields_to_remove:
140
+ return cst.RemovalSentinel.REMOVE
141
+
142
+ # Handle rename
143
+ if field_name in self.field_renames:
144
+ new_name = self.field_renames[field_name]
145
+ new_stmt = stmt.with_changes(target=cst.Name(new_name))
146
+ self.existing_fields.add(new_name)
147
+
148
+ # Also update if there's new schema for renamed field
149
+ if new_name in self.new_fields:
150
+ new_stmt = self._update_field(new_stmt, self.new_fields[new_name])
151
+
152
+ return updated_node.with_changes(body=[new_stmt])
153
+
154
+ # Handle update
155
+ if field_name in self.new_fields:
156
+ new_stmt = self._update_field(stmt, self.new_fields[field_name])
157
+ return updated_node.with_changes(body=[new_stmt])
158
+
159
+ return updated_node
160
+
161
+ def _update_field(
162
+ self,
163
+ stmt: cst.AnnAssign,
164
+ schema: dict[str, Any],
165
+ ) -> cst.AnnAssign:
166
+ """Update field type and default value."""
167
+ # Update type
168
+ new_type = schema.get("type", "str")
169
+ if not schema.get("required", True) and "None" not in new_type:
170
+ new_type = f"{new_type} | None"
171
+
172
+ new_annotation = cst.Annotation(annotation=cst.parse_expression(new_type))
173
+
174
+ # Update default
175
+ new_value = None
176
+ if schema.get("default"):
177
+ new_value = cst.parse_expression(schema["default"])
178
+ elif not schema.get("required", True):
179
+ new_value = cst.Name("None")
180
+
181
+ return stmt.with_changes(
182
+ annotation=new_annotation,
183
+ value=new_value,
184
+ )
185
+
186
+ def _create_field_statement(
187
+ self,
188
+ field_name: str,
189
+ schema: dict[str, Any],
190
+ ) -> cst.SimpleStatementLine:
191
+ """Create a new field statement."""
192
+ type_str = schema.get("type", "str")
193
+ if not schema.get("required", True) and "None" not in type_str:
194
+ type_str = f"{type_str} | None"
195
+
196
+ annotation = cst.Annotation(annotation=cst.parse_expression(type_str))
197
+
198
+ value = None
199
+ if schema.get("default"):
200
+ value = cst.parse_expression(schema["default"])
201
+ elif not schema.get("required", True):
202
+ value = cst.Name("None")
203
+
204
+ return cst.SimpleStatementLine(
205
+ body=[
206
+ cst.AnnAssign(
207
+ target=cst.Name(field_name),
208
+ annotation=annotation,
209
+ value=value,
210
+ )
211
+ ]
212
+ )
213
+
214
+ def _find_field_insert_index(
215
+ self,
216
+ body: list[cst.BaseStatement],
217
+ ) -> int:
218
+ """Find the best index to insert new fields.
219
+
220
+ Insert after last field definition, before methods and Config class.
221
+ """
222
+ last_field_index = 0
223
+
224
+ for i, stmt in enumerate(body):
225
+ # Skip docstrings
226
+ if isinstance(stmt, cst.SimpleStatementLine):
227
+ if stmt.body and isinstance(stmt.body[0], cst.Expr):
228
+ expr = stmt.body[0].value
229
+ if isinstance(expr, cst.SimpleString | cst.ConcatenatedString):
230
+ continue
231
+
232
+ # This is a field or assignment
233
+ if stmt.body and isinstance(stmt.body[0], cst.AnnAssign):
234
+ last_field_index = i + 1
235
+
236
+ # Stop before methods or nested classes
237
+ elif isinstance(stmt, (cst.FunctionDef, cst.ClassDef)):
238
+ break
239
+
240
+ return last_field_index
241
+
242
+
243
+ # =============================================================================
244
+ # Public API
245
+ # =============================================================================
246
+
247
+
248
+ def update_doctype(
249
+ file_path: str | Path,
250
+ schema: dict[str, Any],
251
+ fields_to_remove: list[str] | None = None,
252
+ field_renames: dict[str, str] | None = None,
253
+ ) -> str:
254
+ """Update a DocType file with new field definitions.
255
+
256
+ Args:
257
+ file_path: Path to the DocType Python file
258
+ schema: Updated DocType schema with fields
259
+ fields_to_remove: List of field names to remove
260
+ field_renames: Dict of old_name -> new_name for renames
261
+
262
+ Returns:
263
+ Updated source code (also written to file)
264
+
265
+ Raises:
266
+ FileNotFoundError: If file doesn't exist
267
+ ValueError: If file can't be parsed
268
+
269
+ Example:
270
+ >>> update_doctype(
271
+ ... "src/doctypes/todo.py",
272
+ ... {
273
+ ... "name": "Todo",
274
+ ... "fields": [
275
+ ... {"name": "title", "type": "str", "required": True},
276
+ ... {"name": "priority", "type": "int", "default": "1"},
277
+ ... ],
278
+ ... },
279
+ ... fields_to_remove=["old_field"],
280
+ ... field_renames={"status": "state"},
281
+ ... )
282
+ """
283
+ path = Path(file_path)
284
+ if not path.exists():
285
+ raise FileNotFoundError(f"File not found: {file_path}")
286
+
287
+ source = path.read_text(encoding="utf-8")
288
+
289
+ try:
290
+ tree = cst.parse_module(source)
291
+ except cst.ParserSyntaxError as e:
292
+ raise ValueError(f"Failed to parse Python file: {e}") from e
293
+
294
+ # Build field dict from schema
295
+ new_fields: dict[str, dict[str, Any]] = {}
296
+ for field in schema.get("fields", []):
297
+ new_fields[field["name"]] = field
298
+
299
+ # Create and apply transformer
300
+ transformer = DocTypeTransformer(
301
+ target_class=schema["name"],
302
+ new_fields=new_fields,
303
+ fields_to_remove=set(fields_to_remove or []),
304
+ field_renames=field_renames or {},
305
+ )
306
+
307
+ updated_tree = tree.visit(transformer)
308
+ updated_source = updated_tree.code
309
+
310
+ # Write back to file
311
+ path.write_text(updated_source, encoding="utf-8")
312
+
313
+ return updated_source
314
+
315
+
316
+ def add_field(
317
+ file_path: str | Path,
318
+ doctype_name: str,
319
+ field_name: str,
320
+ field_type: str,
321
+ default: str | None = None,
322
+ required: bool = True,
323
+ ) -> str:
324
+ """Add a single field to a DocType file.
325
+
326
+ Convenience function for adding one field.
327
+
328
+ Args:
329
+ file_path: Path to the DocType file
330
+ doctype_name: Name of the DocType class
331
+ field_name: Name of the new field
332
+ field_type: Python type annotation
333
+ default: Default value expression
334
+ required: Whether field is required
335
+
336
+ Returns:
337
+ Updated source code
338
+ """
339
+ return update_doctype(
340
+ file_path,
341
+ {
342
+ "name": doctype_name,
343
+ "fields": [
344
+ {
345
+ "name": field_name,
346
+ "type": field_type,
347
+ "default": default,
348
+ "required": required,
349
+ }
350
+ ],
351
+ },
352
+ )
353
+
354
+
355
+ def remove_field(
356
+ file_path: str | Path,
357
+ doctype_name: str,
358
+ field_name: str,
359
+ ) -> str:
360
+ """Remove a field from a DocType file.
361
+
362
+ Args:
363
+ file_path: Path to the DocType file
364
+ doctype_name: Name of the DocType class
365
+ field_name: Name of the field to remove
366
+
367
+ Returns:
368
+ Updated source code
369
+ """
370
+ return update_doctype(
371
+ file_path,
372
+ {"name": doctype_name, "fields": []},
373
+ fields_to_remove=[field_name],
374
+ )
375
+
376
+
377
+ def rename_field(
378
+ file_path: str | Path,
379
+ doctype_name: str,
380
+ old_name: str,
381
+ new_name: str,
382
+ ) -> str:
383
+ """Rename a field in a DocType file.
384
+
385
+ Args:
386
+ file_path: Path to the DocType file
387
+ doctype_name: Name of the DocType class
388
+ old_name: Current field name
389
+ new_name: New field name
390
+
391
+ Returns:
392
+ Updated source code
393
+ """
394
+ return update_doctype(
395
+ file_path,
396
+ {"name": doctype_name, "fields": []},
397
+ field_renames={old_name: new_name},
398
+ )
399
+
400
+
401
+ __all__ = [
402
+ "add_field",
403
+ "remove_field",
404
+ "rename_field",
405
+ "update_doctype",
406
+ ]
@@ -0,0 +1,193 @@
1
+ """DocType Discovery Service.
2
+
3
+ This module provides functionality to scan a project for DocType files.
4
+ It delegates parsing to the codegen.parser module to avoid code duplication.
5
+
6
+ Features:
7
+ - Scan directories for *.py files containing DocType classes
8
+ - Return structured JSON for Studio UI (list view)
9
+
10
+ For deep parsing of individual files, use codegen.parser.parse_doctype() directly.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from dataclasses import dataclass, field
16
+ from fnmatch import fnmatch
17
+ from pathlib import Path
18
+ from typing import Any
19
+
20
+ from framework_m_studio.codegen.parser import parse_doctype
21
+
22
+ # =============================================================================
23
+ # Data Models (lightweight for scanning)
24
+ # =============================================================================
25
+
26
+
27
+ @dataclass
28
+ class FieldInfo:
29
+ """Parsed field information from a DocType class."""
30
+
31
+ name: str
32
+ type: str
33
+ default: str | None = None
34
+ required: bool = True
35
+ description: str | None = None
36
+ label: str | None = None
37
+ validators: dict[str, Any] = field(default_factory=dict)
38
+
39
+
40
+ @dataclass
41
+ class DocTypeInfo:
42
+ """Parsed DocType information for list view."""
43
+
44
+ name: str
45
+ module: str
46
+ file_path: str
47
+ fields: list[FieldInfo] = field(default_factory=list)
48
+ docstring: str | None = None
49
+ meta: dict[str, Any] = field(default_factory=dict)
50
+
51
+
52
+ # =============================================================================
53
+ # Public API
54
+ # =============================================================================
55
+
56
+
57
+ def parse_doctype_file(file_path: Path) -> list[DocTypeInfo]:
58
+ """Parse a Python file and extract DocType definitions.
59
+
60
+ Delegates to codegen.parser.parse_doctype() and converts to DocTypeInfo.
61
+
62
+ Args:
63
+ file_path: Path to the Python file
64
+
65
+ Returns:
66
+ List of DocTypeInfo objects found in the file
67
+ """
68
+ try:
69
+ schema = parse_doctype(file_path)
70
+ except (FileNotFoundError, ValueError):
71
+ return []
72
+
73
+ # Convert parser schema to DocTypeInfo
74
+ fields = [
75
+ FieldInfo(
76
+ name=f["name"],
77
+ type=f["type"],
78
+ default=f.get("default"),
79
+ required=f.get("required", True),
80
+ description=f.get("description"),
81
+ label=f.get("label"),
82
+ validators=f.get("validators", {}),
83
+ )
84
+ for f in schema.get("fields", [])
85
+ ]
86
+
87
+ doctype = DocTypeInfo(
88
+ name=schema["name"],
89
+ module=schema.get("module", ""),
90
+ file_path=schema.get("file_path", str(file_path)),
91
+ fields=fields,
92
+ docstring=schema.get("docstring"),
93
+ meta=schema.get("config", {}),
94
+ )
95
+
96
+ return [doctype]
97
+
98
+
99
+ def scan_doctypes(
100
+ root_dir: Path,
101
+ exclude_patterns: list[str] | None = None,
102
+ ) -> list[DocTypeInfo]:
103
+ """Scan a directory tree for DocType definitions.
104
+
105
+ Args:
106
+ root_dir: Root directory to scan
107
+ exclude_patterns: Glob patterns to exclude (e.g., ["**/test_*"])
108
+
109
+ Returns:
110
+ List of all DocTypeInfo objects found
111
+ """
112
+ exclude_patterns = exclude_patterns or [
113
+ "**/test_*.py",
114
+ "**/tests/**",
115
+ "**/__pycache__/**",
116
+ "**/.venv/**",
117
+ "**/node_modules/**",
118
+ ]
119
+
120
+ doctypes: list[DocTypeInfo] = []
121
+
122
+ # Find all Python files
123
+ for py_file in root_dir.rglob("*.py"):
124
+ relative_path = str(py_file.relative_to(root_dir))
125
+ path_parts = relative_path.split("/")
126
+
127
+ # Check if file matches any exclusion pattern
128
+ excluded = False
129
+
130
+ # Check path segments for excluded directories
131
+ for part in path_parts:
132
+ if part in ("tests", "__pycache__", ".venv", "node_modules"):
133
+ excluded = True
134
+ break
135
+ if part.startswith("test_") and part.endswith(".py"):
136
+ excluded = True
137
+ break
138
+
139
+ # Also check fnmatch patterns
140
+ if not excluded:
141
+ for pattern in exclude_patterns:
142
+ if fnmatch(relative_path, pattern):
143
+ excluded = True
144
+ break
145
+
146
+ if excluded:
147
+ continue
148
+
149
+ # Quick check: does file likely contain a DocType?
150
+ try:
151
+ content = py_file.read_text(encoding="utf-8")
152
+ if "BaseDocType" not in content and "DocType" not in content:
153
+ continue
154
+ except (OSError, UnicodeDecodeError):
155
+ continue
156
+
157
+ # Parse and extract DocTypes (delegates to parser.py)
158
+ file_doctypes = parse_doctype_file(py_file)
159
+ doctypes.extend(file_doctypes)
160
+
161
+ return doctypes
162
+
163
+
164
+ def doctype_to_dict(doctype: DocTypeInfo) -> dict[str, Any]:
165
+ """Convert DocTypeInfo to dictionary for JSON serialization."""
166
+ return {
167
+ "name": doctype.name,
168
+ "module": doctype.module,
169
+ "file_path": doctype.file_path,
170
+ "docstring": doctype.docstring,
171
+ "fields": [
172
+ {
173
+ "name": f.name,
174
+ "type": f.type,
175
+ "default": f.default,
176
+ "required": f.required,
177
+ "description": f.description,
178
+ "label": f.label,
179
+ "validators": f.validators,
180
+ }
181
+ for f in doctype.fields
182
+ ],
183
+ "meta": doctype.meta,
184
+ }
185
+
186
+
187
+ __all__ = [
188
+ "DocTypeInfo",
189
+ "FieldInfo",
190
+ "doctype_to_dict",
191
+ "parse_doctype_file",
192
+ "scan_doctypes",
193
+ ]