comptext-codex 5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,536 @@
1
+ """Module L: ETL - Data extraction, transformation, loading with validations."""
2
+
3
+ from typing import Any, Dict, List
4
+ from datetime import datetime
5
+
6
+ from comptext_codex.registry import codex_module, codex_command
7
+ from .base import BaseModule
8
+
9
+
10
+ @codex_module(
11
+ code="L",
12
+ name="ETL/Data Pipelines",
13
+ purpose="Data extraction, transformation, loading with validations",
14
+ token_priority="high",
15
+ security={"pii_safe": False, "threat_model": "data_masking"},
16
+ privacy={"dp_budget": "epsilon<=0.5_per_pipeline", "audit_logging": True},
17
+ )
18
+ class ModuleL(BaseModule):
19
+ """ETL module for data pipelines."""
20
+
21
+ @codex_command(syntax="@EXTRACT[source, query, ...]", description="Extract data from various sources", token_cost_hint=70)
22
+ def execute_extract(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
23
+ source = kwargs.get('source', 'database')
24
+ query = kwargs.get('query', None)
25
+ filters = kwargs.get('filters', {})
26
+ batch_size = kwargs.get('batch_size', 1000)
27
+ parallel = kwargs.get('parallel', False)
28
+
29
+ # Simulate extraction based on source type
30
+ source_configs = {
31
+ 'database': self._extract_database(kwargs),
32
+ 'api': self._extract_api(kwargs),
33
+ 'file': self._extract_file(kwargs),
34
+ 's3': self._extract_s3(kwargs),
35
+ 'kafka': self._extract_kafka(kwargs),
36
+ 'sftp': self._extract_sftp(kwargs),
37
+ 'websocket': self._extract_websocket(kwargs)
38
+ }
39
+
40
+ extraction_result = source_configs.get(source, self._extract_database(kwargs))
41
+
42
+ return {
43
+ 'source': source,
44
+ 'source_config': extraction_result,
45
+ 'records_extracted': extraction_result.get('record_count', 10000),
46
+ 'extraction_time': '2.5s',
47
+ 'batches': extraction_result.get('record_count', 10000) // batch_size,
48
+ 'batch_size': batch_size,
49
+ 'parallel_execution': parallel,
50
+ 'filters_applied': filters,
51
+ 'status': 'success',
52
+ 'metadata': {
53
+ 'timestamp': datetime.now().isoformat(),
54
+ 'schema_detected': True,
55
+ 'data_types': self._infer_schema()
56
+ }
57
+ }
58
+
59
+ @codex_command(syntax="@TRANSFORM[operations, ...]", description="Transform data with multiple operations", token_cost_hint=65)
60
+ def execute_transform(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
61
+ operations = kwargs.get('operations', [])
62
+ if isinstance(operations, str):
63
+ operations = [operations]
64
+
65
+ # Get previous results from context
66
+ input_records = context.get('_last_result', {}).get('records_extracted', 10000) if context else 10000
67
+
68
+ # Apply transformations
69
+ transformations = []
70
+ for op in operations:
71
+ if op in ['clean', 'cleanse']:
72
+ transformations.append(self._transform_clean())
73
+ elif op in ['normalize', 'normalise']:
74
+ transformations.append(self._transform_normalize())
75
+ elif op == 'dedupe':
76
+ transformations.append(self._transform_dedupe())
77
+ elif op == 'enrich':
78
+ transformations.append(self._transform_enrich())
79
+ elif op == 'aggregate':
80
+ transformations.append(self._transform_aggregate())
81
+ elif op == 'pivot':
82
+ transformations.append(self._transform_pivot())
83
+
84
+ # Additional specific transformations from kwargs
85
+ if kwargs.get('clean'):
86
+ transformations.append(self._transform_clean())
87
+ if kwargs.get('normalize'):
88
+ transformations.append(self._transform_normalize())
89
+ if kwargs.get('type_cast'):
90
+ transformations.append(self._transform_type_cast(kwargs.get('type_cast')))
91
+
92
+ valid_records = int(input_records * 0.98) # 98% pass validation
93
+ filtered_records = input_records - valid_records
94
+
95
+ return {
96
+ 'transformations_applied': len(transformations),
97
+ 'transformation_details': transformations,
98
+ 'records_input': input_records,
99
+ 'records_processed': input_records,
100
+ 'records_valid': valid_records,
101
+ 'records_filtered': filtered_records,
102
+ 'data_quality_score': 0.98,
103
+ 'execution_time': '1.8s',
104
+ 'memory_usage': '256MB',
105
+ 'status': 'success',
106
+ 'warnings': self._generate_warnings(filtered_records)
107
+ }
108
+
109
+ @codex_command(syntax="@LOAD[destination, mode, ...]", description="Load data to destination", token_cost_hint=60)
110
+ def execute_load(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
111
+ destination = kwargs.get('destination', 'warehouse')
112
+ mode = kwargs.get('mode', 'append')
113
+ batch_size = kwargs.get('batch_size', 1000)
114
+ verify = kwargs.get('verify', True)
115
+
116
+ # Get records from context
117
+ input_records = context.get('_last_result', {}).get('records_valid', 9800) if context else 9800
118
+
119
+ destination_configs = {
120
+ 'warehouse': self._load_warehouse(kwargs),
121
+ 'database': self._load_database(kwargs),
122
+ 's3': self._load_s3(kwargs),
123
+ 'lake': self._load_data_lake(kwargs),
124
+ 'api': self._load_api(kwargs),
125
+ 'file': self._load_file(kwargs)
126
+ }
127
+
128
+ load_result = destination_configs.get(destination, self._load_warehouse(kwargs))
129
+
130
+ return {
131
+ 'destination': destination,
132
+ 'destination_config': load_result,
133
+ 'mode': mode,
134
+ 'records_loaded': input_records,
135
+ 'records_failed': 0,
136
+ 'batch_size': batch_size,
137
+ 'batches_completed': input_records // batch_size,
138
+ 'load_time': '3.2s',
139
+ 'verification': 'passed' if verify else 'skipped',
140
+ 'indexes_updated': kwargs.get('update_indexes', True),
141
+ 'status': 'success',
142
+ 'metadata': {
143
+ 'timestamp': datetime.now().isoformat(),
144
+ 'load_id': 'load_' + datetime.now().strftime('%Y%m%d_%H%M%S'),
145
+ 'checksum': 'abc123def456'
146
+ }
147
+ }
148
+
149
+ @codex_command(syntax="@VALIDATE[rules, ...]", description="Validate data against rules", token_cost_hint=50)
150
+ def execute_validate(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
151
+ rules = kwargs.get('rules', [])
152
+ strict = kwargs.get('strict', False)
153
+
154
+ validation_results = []
155
+ for rule in rules if isinstance(rules, list) else [rules]:
156
+ validation_results.append(self._validate_rule(rule, strict))
157
+
158
+ input_records = context.get('_last_result', {}).get('records_processed', 10000) if context else 10000
159
+ failed = sum(v.get('failures', 0) for v in validation_results)
160
+
161
+ return {
162
+ 'validations_performed': len(validation_results),
163
+ 'validation_details': validation_results,
164
+ 'records_validated': input_records,
165
+ 'records_passed': input_records - failed,
166
+ 'records_failed': failed,
167
+ 'success_rate': (input_records - failed) / input_records if input_records > 0 else 0,
168
+ 'strict_mode': strict,
169
+ 'status': 'success' if failed == 0 or not strict else 'warning'
170
+ }
171
+
172
+ @codex_command(syntax="@DEDUPE[strategy, keys, ...]", description="Remove duplicate records", token_cost_hint=45)
173
+ def execute_dedupe(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
174
+ strategy = kwargs.get('strategy', 'exact')
175
+ keys = kwargs.get('keys', [])
176
+
177
+ input_records = context.get('_last_result', {}).get('records_processed', 10000) if context else 10000
178
+ duplicates_found = int(input_records * 0.05) # 5% duplicates
179
+
180
+ strategies = {
181
+ 'exact': 'Exact match on all fields',
182
+ 'fuzzy': 'Fuzzy matching with threshold',
183
+ 'key_based': f'Based on keys: {", ".join(keys) if keys else "id"}',
184
+ 'hash': 'Hash-based deduplication'
185
+ }
186
+
187
+ return {
188
+ 'strategy': strategy,
189
+ 'strategy_description': strategies.get(strategy, strategies['exact']),
190
+ 'keys': keys if keys else ['id'],
191
+ 'records_input': input_records,
192
+ 'duplicates_found': duplicates_found,
193
+ 'records_output': input_records - duplicates_found,
194
+ 'deduplication_rate': duplicates_found / input_records if input_records > 0 else 0,
195
+ 'execution_time': '0.8s',
196
+ 'status': 'success'
197
+ }
198
+
199
+ @codex_command(syntax="@ENRICH[sources, fields, ...]", description="Enrich data from external sources", token_cost_hint=55)
200
+ def execute_enrich(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
201
+ sources = kwargs.get('sources', [])
202
+ fields = kwargs.get('fields', [])
203
+
204
+ input_records = context.get('_last_result', {}).get('records_processed', 10000) if context else 10000
205
+ enriched = int(input_records * 0.95) # 95% successfully enriched
206
+
207
+ enrichment_details = []
208
+ for source in sources if isinstance(sources, list) else [sources]:
209
+ enrichment_details.append({
210
+ 'source': source,
211
+ 'fields_added': fields if fields else ['additional_field_1', 'additional_field_2'],
212
+ 'match_rate': 0.95,
213
+ 'api_calls': enriched
214
+ })
215
+
216
+ return {
217
+ 'enrichment_sources': len(enrichment_details),
218
+ 'enrichment_details': enrichment_details,
219
+ 'records_input': input_records,
220
+ 'records_enriched': enriched,
221
+ 'records_not_enriched': input_records - enriched,
222
+ 'fields_added': len(fields) if fields else 2,
223
+ 'enrichment_rate': enriched / input_records if input_records > 0 else 0,
224
+ 'execution_time': '5.2s',
225
+ 'status': 'success'
226
+ }
227
+
228
+ @codex_command(syntax="@AGGREGATE[operations, group_by, ...]", description="Aggregate data with grouping", token_cost_hint=50)
229
+ def execute_aggregate(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
230
+ operations = kwargs.get('operations', ['sum', 'count'])
231
+ group_by = kwargs.get('group_by', [])
232
+
233
+ input_records = context.get('_last_result', {}).get('records_processed', 10000) if context else 10000
234
+ output_groups = len(group_by) * 100 if group_by else 50
235
+
236
+ aggregations = []
237
+ for op in operations if isinstance(operations, list) else [operations]:
238
+ aggregations.append({
239
+ 'operation': op,
240
+ 'field': kwargs.get('field', 'value'),
241
+ 'result': f'{op}_result'
242
+ })
243
+
244
+ return {
245
+ 'aggregations_performed': len(aggregations),
246
+ 'aggregation_details': aggregations,
247
+ 'group_by_fields': group_by if group_by else ['category'],
248
+ 'records_input': input_records,
249
+ 'groups_output': output_groups,
250
+ 'reduction_ratio': input_records / output_groups if output_groups > 0 else 0,
251
+ 'execution_time': '1.2s',
252
+ 'status': 'success'
253
+ }
254
+
255
+ @codex_command(syntax="@PIPELINE[steps, schedule, ...]", description="Define and execute a complete ETL pipeline", token_cost_hint=75)
256
+ def execute_pipeline(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
257
+ steps = kwargs.get('steps', ['extract', 'transform', 'load'])
258
+ schedule = kwargs.get('schedule', None)
259
+ retry_policy = kwargs.get('retry', {'max_attempts': 3, 'backoff': 'exponential'})
260
+
261
+ pipeline_steps = []
262
+ total_time = 0.0
263
+
264
+ for step in steps if isinstance(steps, list) else [steps]:
265
+ step_result = {
266
+ 'step': step,
267
+ 'status': 'success',
268
+ 'duration': f'{1.5 + len(step) * 0.1:.1f}s',
269
+ 'records_processed': 10000
270
+ }
271
+ total_time += float(step_result['duration'].rstrip('s'))
272
+ pipeline_steps.append(step_result)
273
+
274
+ return {
275
+ 'pipeline_name': kwargs.get('name', 'etl_pipeline'),
276
+ 'steps': len(pipeline_steps),
277
+ 'step_details': pipeline_steps,
278
+ 'total_execution_time': f'{total_time:.1f}s',
279
+ 'schedule': schedule or 'manual',
280
+ 'retry_policy': retry_policy,
281
+ 'parallel_execution': kwargs.get('parallel', False),
282
+ 'status': 'success',
283
+ 'next_run': 'Not scheduled' if not schedule else 'In 24 hours',
284
+ 'metadata': {
285
+ 'pipeline_id': 'pipe_' + datetime.now().strftime('%Y%m%d_%H%M%S'),
286
+ 'version': kwargs.get('version', '1.0.0'),
287
+ 'author': kwargs.get('author', 'comptext')
288
+ }
289
+ }
290
+
291
+ # Helper methods for extraction
292
+
293
+ def _extract_database(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
294
+ """Extract from database."""
295
+ return {
296
+ 'type': 'database',
297
+ 'connection': kwargs.get('connection', 'postgres://localhost/db'),
298
+ 'table': kwargs.get('table', 'users'),
299
+ 'query': kwargs.get('query', 'SELECT * FROM users'),
300
+ 'record_count': 10000,
301
+ 'columns': ['id', 'name', 'email', 'created_at']
302
+ }
303
+
304
+ def _extract_api(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
305
+ """Extract from API."""
306
+ return {
307
+ 'type': 'api',
308
+ 'endpoint': kwargs.get('endpoint', '/api/v1/data'),
309
+ 'method': kwargs.get('method', 'GET'),
310
+ 'auth': kwargs.get('auth', 'bearer_token'),
311
+ 'pagination': kwargs.get('pagination', {'type': 'offset', 'page_size': 100}),
312
+ 'record_count': 10000,
313
+ 'rate_limit': '1000 req/hour'
314
+ }
315
+
316
+ def _extract_file(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
317
+ """Extract from file."""
318
+ return {
319
+ 'type': 'file',
320
+ 'path': kwargs.get('path', '/data/input.csv'),
321
+ 'format': kwargs.get('format', 'csv'),
322
+ 'delimiter': kwargs.get('delimiter', ','),
323
+ 'encoding': kwargs.get('encoding', 'utf-8'),
324
+ 'record_count': 10000,
325
+ 'file_size': '5.2 MB'
326
+ }
327
+
328
+ def _extract_s3(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
329
+ """Extract from S3."""
330
+ return {
331
+ 'type': 's3',
332
+ 'bucket': kwargs.get('bucket', 'data-bucket'),
333
+ 'prefix': kwargs.get('prefix', 'raw/'),
334
+ 'format': kwargs.get('format', 'parquet'),
335
+ 'compression': kwargs.get('compression', 'snappy'),
336
+ 'record_count': 10000,
337
+ 'objects': 5
338
+ }
339
+
340
+ def _extract_kafka(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
341
+ """Extract from Kafka."""
342
+ return {
343
+ 'type': 'kafka',
344
+ 'topic': kwargs.get('topic', 'events'),
345
+ 'consumer_group': kwargs.get('consumer_group', 'etl-group'),
346
+ 'offset': kwargs.get('offset', 'latest'),
347
+ 'record_count': 10000,
348
+ 'lag': '0'
349
+ }
350
+
351
+ def _extract_sftp(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
352
+ """Extract from SFTP."""
353
+ return {
354
+ 'type': 'sftp',
355
+ 'host': kwargs.get('host', 'sftp.example.com'),
356
+ 'path': kwargs.get('path', '/data/'),
357
+ 'pattern': kwargs.get('pattern', '*.csv'),
358
+ 'record_count': 10000,
359
+ 'files': 3
360
+ }
361
+
362
+ def _extract_websocket(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
363
+ """Extract from WebSocket."""
364
+ return {
365
+ 'type': 'websocket',
366
+ 'url': kwargs.get('url', 'wss://stream.example.com'),
367
+ 'protocol': kwargs.get('protocol', 'json'),
368
+ 'record_count': 10000,
369
+ 'duration': '60s'
370
+ }
371
+
372
+ # Helper methods for transformation
373
+
374
+ def _transform_clean(self) -> Dict[str, Any]:
375
+ """Clean data transformation."""
376
+ return {
377
+ 'type': 'clean',
378
+ 'operations': ['trim_whitespace', 'remove_nulls', 'standardize_case'],
379
+ 'records_affected': 1200
380
+ }
381
+
382
+ def _transform_normalize(self) -> Dict[str, Any]:
383
+ """Normalize data transformation."""
384
+ return {
385
+ 'type': 'normalize',
386
+ 'operations': ['scale_values', 'standardize_dates', 'format_phone_numbers'],
387
+ 'records_affected': 10000
388
+ }
389
+
390
+ def _transform_dedupe(self) -> Dict[str, Any]:
391
+ """Dedupe transformation."""
392
+ return {
393
+ 'type': 'dedupe',
394
+ 'duplicates_removed': 500,
395
+ 'strategy': 'key_based'
396
+ }
397
+
398
+ def _transform_enrich(self) -> Dict[str, Any]:
399
+ """Enrich transformation."""
400
+ return {
401
+ 'type': 'enrich',
402
+ 'fields_added': ['geo_location', 'timezone'],
403
+ 'records_enriched': 9500
404
+ }
405
+
406
+ def _transform_aggregate(self) -> Dict[str, Any]:
407
+ """Aggregate transformation."""
408
+ return {
409
+ 'type': 'aggregate',
410
+ 'aggregations': ['sum', 'avg', 'count'],
411
+ 'groups': 50
412
+ }
413
+
414
+ def _transform_pivot(self) -> Dict[str, Any]:
415
+ """Pivot transformation."""
416
+ return {
417
+ 'type': 'pivot',
418
+ 'pivot_column': 'category',
419
+ 'value_column': 'amount',
420
+ 'columns_created': 12
421
+ }
422
+
423
+ def _transform_type_cast(self, types: Dict[str, str]) -> Dict[str, Any]:
424
+ """Type casting transformation."""
425
+ return {
426
+ 'type': 'type_cast',
427
+ 'conversions': types,
428
+ 'records_affected': 10000
429
+ }
430
+
431
+ # Helper methods for loading
432
+
433
+ def _load_warehouse(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
434
+ """Load to data warehouse."""
435
+ return {
436
+ 'type': 'warehouse',
437
+ 'platform': kwargs.get('platform', 'snowflake'),
438
+ 'schema': kwargs.get('schema', 'public'),
439
+ 'table': kwargs.get('table', 'fact_table'),
440
+ 'partitioning': kwargs.get('partition_by', 'date')
441
+ }
442
+
443
+ def _load_database(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
444
+ """Load to database."""
445
+ return {
446
+ 'type': 'database',
447
+ 'connection': kwargs.get('connection', 'postgres://localhost/db'),
448
+ 'table': kwargs.get('table', 'processed_data'),
449
+ 'indexes': kwargs.get('indexes', ['id', 'created_at'])
450
+ }
451
+
452
+ def _load_s3(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
453
+ """Load to S3."""
454
+ return {
455
+ 'type': 's3',
456
+ 'bucket': kwargs.get('bucket', 'processed-data'),
457
+ 'prefix': kwargs.get('prefix', 'processed/'),
458
+ 'format': kwargs.get('format', 'parquet'),
459
+ 'compression': kwargs.get('compression', 'snappy')
460
+ }
461
+
462
+ def _load_data_lake(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
463
+ """Load to data lake."""
464
+ return {
465
+ 'type': 'data_lake',
466
+ 'platform': kwargs.get('platform', 'delta_lake'),
467
+ 'path': kwargs.get('path', '/lake/processed/'),
468
+ 'format': kwargs.get('format', 'delta'),
469
+ 'partition_by': kwargs.get('partition_by', ['year', 'month'])
470
+ }
471
+
472
+ def _load_api(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
473
+ """Load via API."""
474
+ return {
475
+ 'type': 'api',
476
+ 'endpoint': kwargs.get('endpoint', '/api/v1/load'),
477
+ 'method': 'POST',
478
+ 'batch_size': kwargs.get('batch_size', 100),
479
+ 'retry_on_failure': True
480
+ }
481
+
482
+ def _load_file(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
483
+ """Load to file."""
484
+ return {
485
+ 'type': 'file',
486
+ 'path': kwargs.get('path', '/data/output.csv'),
487
+ 'format': kwargs.get('format', 'csv'),
488
+ 'compression': kwargs.get('compression', None)
489
+ }
490
+
491
+ # Helper methods for validation
492
+
493
+ def _validate_rule(self, rule: str, strict: bool) -> Dict[str, Any]:
494
+ """Validate a single rule."""
495
+ return {
496
+ 'rule': rule,
497
+ 'type': self._infer_rule_type(rule),
498
+ 'records_checked': 10000,
499
+ 'failures': 100 if not strict else 0,
500
+ 'success_rate': 0.99 if not strict else 1.0
501
+ }
502
+
503
+ def _infer_rule_type(self, rule: str) -> str:
504
+ """Infer validation rule type."""
505
+ if 'email' in rule.lower():
506
+ return 'format'
507
+ elif 'not null' in rule.lower() or 'required' in rule.lower():
508
+ return 'presence'
509
+ elif 'range' in rule.lower() or '>' in rule or '<' in rule:
510
+ return 'range'
511
+ elif 'unique' in rule.lower():
512
+ return 'uniqueness'
513
+ return 'custom'
514
+
515
+ def _infer_schema(self) -> Dict[str, str]:
516
+ """Infer data schema."""
517
+ return {
518
+ 'id': 'integer',
519
+ 'name': 'string',
520
+ 'email': 'string',
521
+ 'created_at': 'timestamp',
522
+ 'value': 'float'
523
+ }
524
+
525
+ def _generate_warnings(self, filtered_count: int) -> List[str]:
526
+ """Generate warnings based on filtered records."""
527
+ warnings = []
528
+ if filtered_count > 0:
529
+ warnings.append(f'{filtered_count} records filtered due to validation failures')
530
+ if filtered_count > 500:
531
+ warnings.append('High number of filtered records - review data quality')
532
+ return warnings
533
+
534
+
535
+ def get_module() -> ModuleL:
536
+ return ModuleL()
@@ -0,0 +1,34 @@
1
+ """Module M: MCP Integration - Multi-agent messaging, tool routing, contract governance."""
2
+
3
+ from typing import Any, Dict
4
+
5
+ from comptext_codex.registry import codex_module, codex_command
6
+ from .base import BaseModule
7
+
8
+
9
+ @codex_module(
10
+ code="M",
11
+ name="MCP Integration",
12
+ purpose="Multi-agent messaging, tool routing, and contract governance",
13
+ token_priority="high",
14
+ security={"pii_safe": True, "threat_model": "capability_scoped"},
15
+ privacy={"dp_budget": "epsilon<=0.2_per_exchange", "federated_ready": True},
16
+ )
17
+ class ModuleM(BaseModule):
18
+ """MCP Integration module for multi-agent systems."""
19
+
20
+ @codex_command(syntax="@AGENT_ROLE[name, capabilities, ...]", description="Define agent role and capabilities", token_cost_hint=40)
21
+ def execute_agent_role(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
22
+ name = kwargs.get("name", "Agent")
23
+ caps = kwargs.get("capabilities", [])
24
+ return {"agent_name": name, "capabilities": caps if isinstance(caps, list) else [caps], "constraints": kwargs.get("constraints", []), "role_defined": True}
25
+
26
+ @codex_command(syntax="@TASK_ASSIGN[task, workflow, ...]", description="Assign task to agents", token_cost_hint=40)
27
+ def execute_task_assign(self, *args, context: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
28
+ task = kwargs.get("task", "default_task")
29
+ wf = kwargs.get("workflow", [])
30
+ return {"task": task, "workflow": wf if isinstance(wf, list) else [wf], "collaboration": kwargs.get("collaboration", "sequential"), "status": "assigned"}
31
+
32
+
33
+ def get_module() -> ModuleM:
34
+ return ModuleM()