claude-mpm 4.3.12__py3-none-any.whl → 4.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (206) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/PM_INSTRUCTIONS.md +414 -28
  3. claude_mpm/agents/templates/data_engineer.json +39 -14
  4. claude_mpm/agents/templates/engineer.json +11 -3
  5. claude_mpm/cli/commands/agent_manager.py +3 -3
  6. claude_mpm/cli/commands/agents.py +2 -2
  7. claude_mpm/cli/commands/aggregate.py +1 -1
  8. claude_mpm/cli/commands/config.py +2 -2
  9. claude_mpm/cli/commands/configure.py +5 -5
  10. claude_mpm/cli/commands/configure_tui.py +7 -7
  11. claude_mpm/cli/commands/dashboard.py +1 -1
  12. claude_mpm/cli/commands/debug.py +5 -5
  13. claude_mpm/cli/commands/mcp.py +1 -1
  14. claude_mpm/cli/commands/mcp_command_router.py +1 -1
  15. claude_mpm/cli/commands/mcp_config.py +7 -10
  16. claude_mpm/cli/commands/mcp_external_commands.py +40 -32
  17. claude_mpm/cli/commands/mcp_install_commands.py +38 -10
  18. claude_mpm/cli/commands/mcp_setup_external.py +143 -102
  19. claude_mpm/cli/commands/monitor.py +2 -2
  20. claude_mpm/cli/commands/mpm_init_handler.py +1 -1
  21. claude_mpm/cli/commands/run.py +54 -2
  22. claude_mpm/cli/commands/search.py +41 -34
  23. claude_mpm/cli/interactive/agent_wizard.py +6 -2
  24. claude_mpm/cli/parsers/mcp_parser.py +1 -3
  25. claude_mpm/cli/parsers/search_parser.py +10 -4
  26. claude_mpm/cli/startup_logging.py +158 -5
  27. claude_mpm/cli/utils.py +1 -1
  28. claude_mpm/core/agent_registry.py +2 -2
  29. claude_mpm/core/agent_session_manager.py +8 -8
  30. claude_mpm/core/api_validator.py +6 -4
  31. claude_mpm/core/base_service.py +10 -10
  32. claude_mpm/core/cache.py +5 -5
  33. claude_mpm/core/config_constants.py +1 -1
  34. claude_mpm/core/container.py +1 -1
  35. claude_mpm/core/error_handler.py +2 -2
  36. claude_mpm/core/file_utils.py +1 -1
  37. claude_mpm/core/framework_loader.py +3 -3
  38. claude_mpm/core/hook_manager.py +8 -6
  39. claude_mpm/core/instruction_reinforcement_hook.py +2 -2
  40. claude_mpm/core/interactive_session.py +3 -1
  41. claude_mpm/core/lazy.py +3 -3
  42. claude_mpm/core/log_manager.py +16 -12
  43. claude_mpm/core/logger.py +16 -11
  44. claude_mpm/core/optimized_agent_loader.py +6 -6
  45. claude_mpm/core/output_style_manager.py +1 -1
  46. claude_mpm/core/pm_hook_interceptor.py +3 -3
  47. claude_mpm/core/service_registry.py +1 -1
  48. claude_mpm/core/session_manager.py +11 -9
  49. claude_mpm/core/socketio_pool.py +13 -13
  50. claude_mpm/core/types.py +2 -2
  51. claude_mpm/core/unified_agent_registry.py +2 -2
  52. claude_mpm/core/unified_paths.py +1 -1
  53. claude_mpm/dashboard/analysis_runner.py +4 -4
  54. claude_mpm/dashboard/api/simple_directory.py +1 -1
  55. claude_mpm/generators/agent_profile_generator.py +4 -2
  56. claude_mpm/hooks/base_hook.py +2 -2
  57. claude_mpm/hooks/claude_hooks/__pycache__/__init__.cpython-313.pyc +0 -0
  58. claude_mpm/hooks/claude_hooks/__pycache__/event_handlers.cpython-313.pyc +0 -0
  59. claude_mpm/hooks/claude_hooks/__pycache__/hook_handler.cpython-313.pyc +0 -0
  60. claude_mpm/hooks/claude_hooks/__pycache__/installer.cpython-313.pyc +0 -0
  61. claude_mpm/hooks/claude_hooks/__pycache__/memory_integration.cpython-313.pyc +0 -0
  62. claude_mpm/hooks/claude_hooks/__pycache__/response_tracking.cpython-313.pyc +0 -0
  63. claude_mpm/hooks/claude_hooks/__pycache__/tool_analysis.cpython-313.pyc +0 -0
  64. claude_mpm/hooks/claude_hooks/connection_pool.py +4 -4
  65. claude_mpm/hooks/claude_hooks/event_handlers.py +12 -12
  66. claude_mpm/hooks/claude_hooks/hook_handler.py +4 -4
  67. claude_mpm/hooks/claude_hooks/hook_handler_eventbus.py +3 -3
  68. claude_mpm/hooks/claude_hooks/hook_handler_original.py +15 -14
  69. claude_mpm/hooks/claude_hooks/hook_handler_refactored.py +4 -4
  70. claude_mpm/hooks/claude_hooks/installer.py +3 -3
  71. claude_mpm/hooks/claude_hooks/memory_integration.py +3 -3
  72. claude_mpm/hooks/claude_hooks/response_tracking.py +3 -3
  73. claude_mpm/hooks/claude_hooks/services/__pycache__/__init__.cpython-313.pyc +0 -0
  74. claude_mpm/hooks/claude_hooks/services/__pycache__/connection_manager.cpython-313.pyc +0 -0
  75. claude_mpm/hooks/claude_hooks/services/__pycache__/connection_manager_http.cpython-313.pyc +0 -0
  76. claude_mpm/hooks/claude_hooks/services/__pycache__/duplicate_detector.cpython-313.pyc +0 -0
  77. claude_mpm/hooks/claude_hooks/services/__pycache__/state_manager.cpython-313.pyc +0 -0
  78. claude_mpm/hooks/claude_hooks/services/__pycache__/subagent_processor.cpython-313.pyc +0 -0
  79. claude_mpm/hooks/claude_hooks/services/connection_manager.py +8 -5
  80. claude_mpm/hooks/claude_hooks/services/connection_manager_http.py +3 -3
  81. claude_mpm/hooks/claude_hooks/services/state_manager.py +8 -7
  82. claude_mpm/hooks/claude_hooks/services/subagent_processor.py +3 -3
  83. claude_mpm/hooks/claude_hooks/tool_analysis.py +2 -2
  84. claude_mpm/hooks/memory_integration_hook.py +1 -1
  85. claude_mpm/hooks/tool_call_interceptor.py +2 -2
  86. claude_mpm/models/agent_session.py +7 -5
  87. claude_mpm/scripts/mcp_server.py +0 -0
  88. claude_mpm/scripts/start_activity_logging.py +0 -0
  89. claude_mpm/services/__init__.py +1 -1
  90. claude_mpm/services/agent_capabilities_service.py +1 -1
  91. claude_mpm/services/agents/agent_builder.py +3 -3
  92. claude_mpm/services/agents/deployment/agent_deployment.py +2 -1
  93. claude_mpm/services/agents/deployment/agent_discovery_service.py +9 -3
  94. claude_mpm/services/agents/deployment/agent_filesystem_manager.py +5 -5
  95. claude_mpm/services/agents/deployment/agent_lifecycle_manager.py +3 -1
  96. claude_mpm/services/agents/deployment/agent_metrics_collector.py +1 -1
  97. claude_mpm/services/agents/deployment/agent_operation_service.py +2 -2
  98. claude_mpm/services/agents/deployment/agent_state_service.py +2 -2
  99. claude_mpm/services/agents/deployment/agent_template_builder.py +1 -1
  100. claude_mpm/services/agents/deployment/agent_versioning.py +1 -1
  101. claude_mpm/services/agents/deployment/deployment_wrapper.py +2 -3
  102. claude_mpm/services/agents/deployment/pipeline/steps/agent_processing_step.py +1 -1
  103. claude_mpm/services/agents/loading/agent_profile_loader.py +5 -3
  104. claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
  105. claude_mpm/services/agents/local_template_manager.py +6 -6
  106. claude_mpm/services/agents/management/agent_management_service.py +3 -3
  107. claude_mpm/services/agents/memory/content_manager.py +3 -3
  108. claude_mpm/services/agents/memory/memory_format_service.py +2 -2
  109. claude_mpm/services/agents/memory/template_generator.py +3 -3
  110. claude_mpm/services/agents/registry/modification_tracker.py +2 -2
  111. claude_mpm/services/async_session_logger.py +3 -3
  112. claude_mpm/services/claude_session_logger.py +4 -4
  113. claude_mpm/services/cli/agent_listing_service.py +3 -1
  114. claude_mpm/services/cli/agent_validation_service.py +2 -0
  115. claude_mpm/services/cli/memory_crud_service.py +11 -6
  116. claude_mpm/services/cli/memory_output_formatter.py +1 -1
  117. claude_mpm/services/cli/session_manager.py +15 -11
  118. claude_mpm/services/core/memory_manager.py +81 -23
  119. claude_mpm/services/core/path_resolver.py +1 -1
  120. claude_mpm/services/diagnostics/checks/installation_check.py +1 -1
  121. claude_mpm/services/event_aggregator.py +4 -2
  122. claude_mpm/services/event_bus/direct_relay.py +5 -3
  123. claude_mpm/services/event_bus/event_bus.py +3 -3
  124. claude_mpm/services/event_bus/relay.py +6 -4
  125. claude_mpm/services/events/consumers/dead_letter.py +5 -3
  126. claude_mpm/services/events/core.py +3 -3
  127. claude_mpm/services/events/producers/hook.py +6 -6
  128. claude_mpm/services/events/producers/system.py +8 -8
  129. claude_mpm/services/exceptions.py +5 -5
  130. claude_mpm/services/framework_claude_md_generator/content_assembler.py +3 -3
  131. claude_mpm/services/framework_claude_md_generator/section_generators/__init__.py +2 -2
  132. claude_mpm/services/hook_installer_service.py +1 -1
  133. claude_mpm/services/infrastructure/context_preservation.py +6 -4
  134. claude_mpm/services/infrastructure/daemon_manager.py +2 -2
  135. claude_mpm/services/infrastructure/logging.py +2 -2
  136. claude_mpm/services/mcp_config_manager.py +175 -30
  137. claude_mpm/services/mcp_gateway/__init__.py +1 -1
  138. claude_mpm/services/mcp_gateway/auto_configure.py +3 -3
  139. claude_mpm/services/mcp_gateway/config/config_loader.py +1 -1
  140. claude_mpm/services/mcp_gateway/config/configuration.py +1 -1
  141. claude_mpm/services/mcp_gateway/core/base.py +2 -2
  142. claude_mpm/services/mcp_gateway/main.py +21 -7
  143. claude_mpm/services/mcp_gateway/registry/tool_registry.py +10 -8
  144. claude_mpm/services/mcp_gateway/server/mcp_gateway.py +4 -4
  145. claude_mpm/services/mcp_gateway/server/stdio_handler.py +1 -1
  146. claude_mpm/services/mcp_gateway/server/stdio_server.py +5 -2
  147. claude_mpm/services/mcp_gateway/tools/base_adapter.py +15 -15
  148. claude_mpm/services/mcp_gateway/tools/document_summarizer.py +7 -5
  149. claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +190 -137
  150. claude_mpm/services/mcp_gateway/tools/health_check_tool.py +5 -5
  151. claude_mpm/services/mcp_gateway/tools/hello_world.py +9 -9
  152. claude_mpm/services/mcp_gateway/tools/ticket_tools.py +16 -16
  153. claude_mpm/services/mcp_gateway/tools/unified_ticket_tool.py +16 -16
  154. claude_mpm/services/memory/builder.py +7 -5
  155. claude_mpm/services/memory/indexed_memory.py +4 -4
  156. claude_mpm/services/memory/optimizer.py +6 -6
  157. claude_mpm/services/memory/router.py +3 -3
  158. claude_mpm/services/monitor/daemon.py +1 -1
  159. claude_mpm/services/monitor/daemon_manager.py +6 -6
  160. claude_mpm/services/monitor/event_emitter.py +2 -2
  161. claude_mpm/services/monitor/management/lifecycle.py +3 -1
  162. claude_mpm/services/monitor/server.py +4 -4
  163. claude_mpm/services/monitor_build_service.py +2 -2
  164. claude_mpm/services/port_manager.py +3 -1
  165. claude_mpm/services/response_tracker.py +2 -2
  166. claude_mpm/services/session_management_service.py +3 -2
  167. claude_mpm/services/socketio/client_proxy.py +2 -2
  168. claude_mpm/services/socketio/dashboard_server.py +4 -3
  169. claude_mpm/services/socketio/event_normalizer.py +11 -7
  170. claude_mpm/services/socketio/handlers/base.py +2 -2
  171. claude_mpm/services/socketio/handlers/connection.py +10 -10
  172. claude_mpm/services/socketio/handlers/connection_handler.py +13 -10
  173. claude_mpm/services/socketio/handlers/hook.py +16 -15
  174. claude_mpm/services/socketio/migration_utils.py +1 -1
  175. claude_mpm/services/socketio/monitor_client.py +5 -5
  176. claude_mpm/services/socketio/server/broadcaster.py +9 -7
  177. claude_mpm/services/socketio/server/connection_manager.py +2 -2
  178. claude_mpm/services/socketio/server/core.py +7 -5
  179. claude_mpm/services/socketio/server/eventbus_integration.py +18 -12
  180. claude_mpm/services/socketio/server/main.py +13 -13
  181. claude_mpm/services/socketio_client_manager.py +4 -4
  182. claude_mpm/services/system_instructions_service.py +2 -2
  183. claude_mpm/services/utility_service.py +5 -2
  184. claude_mpm/services/version_control/branch_strategy.py +2 -2
  185. claude_mpm/services/version_control/git_operations.py +22 -20
  186. claude_mpm/services/version_control/semantic_versioning.py +3 -3
  187. claude_mpm/services/version_control/version_parser.py +7 -5
  188. claude_mpm/services/visualization/mermaid_generator.py +3 -1
  189. claude_mpm/storage/state_storage.py +1 -1
  190. claude_mpm/tools/code_tree_analyzer.py +23 -18
  191. claude_mpm/tools/code_tree_builder.py +2 -2
  192. claude_mpm/tools/code_tree_events.py +10 -8
  193. claude_mpm/tools/socketio_debug.py +3 -3
  194. claude_mpm/utils/agent_dependency_loader.py +6 -2
  195. claude_mpm/utils/dependency_strategies.py +8 -3
  196. claude_mpm/utils/environment_context.py +1 -1
  197. claude_mpm/utils/error_handler.py +2 -2
  198. claude_mpm/utils/file_utils.py +1 -1
  199. claude_mpm/utils/log_cleanup.py +21 -7
  200. claude_mpm/validation/agent_validator.py +2 -2
  201. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/METADATA +1 -1
  202. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/RECORD +204 -191
  203. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/WHEEL +0 -0
  204. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/entry_points.txt +0 -0
  205. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/licenses/LICENSE +0 -0
  206. {claude_mpm-4.3.12.dist-info → claude_mpm-4.3.14.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,11 @@
1
1
  {
2
2
  "schema_version": "1.2.0",
3
3
  "agent_id": "data-engineer",
4
- "agent_version": "2.4.2",
4
+ "agent_version": "2.5.0",
5
5
  "agent_type": "engineer",
6
6
  "metadata": {
7
7
  "name": "Data Engineer Agent",
8
- "description": "Python-powered data transformation specialist for file conversions, ETL pipelines, and data processing",
8
+ "description": "Python-powered data transformation specialist for file conversions, ETL pipelines, database migrations, and data processing",
9
9
  "category": "engineering",
10
10
  "tags": [
11
11
  "data",
@@ -19,11 +19,14 @@
19
19
  "ai-apis",
20
20
  "database",
21
21
  "pipelines",
22
- "ETL"
22
+ "ETL",
23
+ "migration",
24
+ "alembic",
25
+ "sqlalchemy"
23
26
  ],
24
27
  "author": "Claude MPM Team",
25
28
  "created_at": "2025-07-27T03:45:51.463500Z",
26
- "updated_at": "2025-09-20T13:50:00.000000Z",
29
+ "updated_at": "2025-09-25T00:00:00.000000Z",
27
30
  "color": "yellow"
28
31
  },
29
32
  "capabilities": {
@@ -55,15 +58,19 @@
55
58
  ]
56
59
  }
57
60
  },
58
- "instructions": "# Data Engineer Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Python data transformation specialist with expertise in file conversions, data processing, and ETL pipelines\n\n## Core Expertise\n\n**PRIMARY MANDATE**: Use Python scripting and data tools (pandas, openpyxl, xlsxwriter, etc.) to perform data transformations, file conversions, and processing tasks.\n\n### Python Data Transformation Specialties\n\n**File Conversion Expertise**:\n- CSV ↔ Excel (XLS/XLSX) conversions with formatting preservation\n- JSON ↔ CSV/Excel transformations\n- Parquet ↔ CSV for big data workflows\n- XML ↔ JSON/CSV parsing and conversion\n- Fixed-width to delimited formats\n- TSV/PSV and custom delimited files\n\n**Data Processing Capabilities**:\n```python\n# Example: CSV to Excel with formatting\nimport pandas as pd\nfrom openpyxl.styles import Font, Alignment, PatternFill\n\n# Read CSV\ndf = pd.read_csv('input.csv')\n\n# Data transformations\ndf['date'] = pd.to_datetime(df['date'])\ndf['amount'] = df['amount'].astype(float)\n\n# Write to Excel with formatting\nwith pd.ExcelWriter('output.xlsx', engine='openpyxl') as writer:\n df.to_excel(writer, sheet_name='Data', index=False)\n worksheet = writer.sheets['Data']\n \n # Apply formatting\n for cell in worksheet['A1:Z1'][0]:\n cell.font = Font(bold=True)\n cell.fill = PatternFill(start_color='366092', end_color='366092', fill_type='solid')\n```\n\n### Core Python Libraries for Data Work\n\n**Essential Libraries**:\n- **pandas**: DataFrame operations, file I/O, data cleaning\n- **openpyxl**: Excel file manipulation with formatting\n- **xlsxwriter**: Advanced Excel features (charts, formulas)\n- **numpy**: Numerical operations and array processing\n- **pyarrow**: Parquet file operations\n- **dask**: Large dataset processing\n- **polars**: High-performance DataFrames\n\n**Specialized Libraries**:\n- **xlrd/xlwt**: Legacy Excel format support\n- **csvkit**: Advanced CSV utilities\n- **tabulate**: Pretty-print tabular data\n- **fuzzywuzzy**: Data matching and deduplication\n- **dateutil**: Date parsing and manipulation\n\n## Data Processing Patterns\n\n### File Conversion Workflows\n\n**Standard Conversion Process**:\n1. **Validate**: Check source file format and integrity\n2. **Read**: Load data with appropriate encoding handling\n3. **Transform**: Apply data type conversions, cleaning, enrichment\n4. **Format**: Apply styling, formatting, validation rules\n5. **Write**: Output to target format with error handling\n\n**Example Implementations**:\n```python\n# Multi-sheet Excel from multiple CSVs\nimport glob\nimport pandas as pd\n\ncsv_files = glob.glob('data/*.csv')\nwith pd.ExcelWriter('combined.xlsx') as writer:\n for csv_file in csv_files:\n df = pd.read_csv(csv_file)\n sheet_name = os.path.basename(csv_file).replace('.csv', '')\n df.to_excel(writer, sheet_name=sheet_name, index=False)\n\n# JSON to formatted Excel with data types\nimport json\nimport pandas as pd\n\nwith open('data.json', 'r') as f:\n data = json.load(f)\n\ndf = pd.json_normalize(data)\n# Apply data types\ndf = df.astype({\n 'id': 'int64',\n 'amount': 'float64',\n 'date': 'datetime64[ns]'\n})\ndf.to_excel('output.xlsx', index=False)\n```\n\n### Data Quality & Validation\n\n**Validation Steps**:\n- Check for missing values and handle appropriately\n- Validate data types and formats\n- Detect and handle duplicates\n- Verify referential integrity\n- Apply business rule validations\n\n```python\n# Data validation example\ndef validate_dataframe(df):\n issues = []\n \n # Check nulls\n null_cols = df.columns[df.isnull().any()].tolist()\n if null_cols:\n issues.append(f\"Null values in: {null_cols}\")\n \n # Check duplicates\n if df.duplicated().any():\n issues.append(f\"Found {df.duplicated().sum()} duplicate rows\")\n \n # Data type validation\n for col in df.select_dtypes(include=['object']):\n if col in ['date', 'timestamp']:\n try:\n pd.to_datetime(df[col])\n except:\n issues.append(f\"Invalid dates in column: {col}\")\n \n return issues\n```\n\n## Performance Optimization\n\n**Large File Processing**:\n- Use chunking for files >100MB\n- Implement streaming for continuous data\n- Apply dtype optimization to reduce memory\n- Use Dask/Polars for files >1GB\n\n```python\n# Chunked processing for large files\nchunk_size = 10000\nfor chunk in pd.read_csv('large_file.csv', chunksize=chunk_size):\n processed_chunk = process_data(chunk)\n processed_chunk.to_csv('output.csv', mode='a', header=False, index=False)\n```\n\n## Error Handling & Logging\n\n**Robust Error Management**:\n```python\nimport logging\nimport traceback\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef safe_convert(input_file, output_file, format_from, format_to):\n try:\n logger.info(f\"Converting {input_file} from {format_from} to {format_to}\")\n \n # Conversion logic here\n if format_from == 'csv' and format_to == 'xlsx':\n df = pd.read_csv(input_file)\n df.to_excel(output_file, index=False)\n \n logger.info(f\"Successfully converted to {output_file}\")\n return True\n except Exception as e:\n logger.error(f\"Conversion failed: {str(e)}\")\n logger.debug(traceback.format_exc())\n return False\n```\n\n## Common Data Tasks\n\n### Quick Reference\n\n| Task | Python Solution |\n|------|----------------|\n| CSV → Excel | `pd.read_csv('file.csv').to_excel('file.xlsx')` |\n| Excel → CSV | `pd.read_excel('file.xlsx').to_csv('file.csv')` |\n| JSON → DataFrame | `pd.read_json('file.json')` or `pd.json_normalize(data)` |\n| Merge files | `pd.concat([df1, df2])` or `df1.merge(df2, on='key')` |\n| Pivot data | `df.pivot_table(index='col1', columns='col2', values='col3')` |\n| Data cleaning | `df.dropna()`, `df.fillna()`, `df.drop_duplicates()` |\n| Type conversion | `df.astype({'col': 'type'})` |\n| Date parsing | `pd.to_datetime(df['date_col'])` |\n\n## TodoWrite Patterns\n\n### Required Format\nāœ… `[Data Engineer] Convert CSV files to formatted Excel workbook`\nāœ… `[Data Engineer] Transform JSON API response to SQL database`\nāœ… `[Data Engineer] Clean and validate customer data`\nāœ… `[Data Engineer] Merge multiple Excel sheets into single CSV`\nāŒ Never use generic todos\n\n### Task Categories\n- **Conversion**: File format transformations\n- **Processing**: Data cleaning and enrichment\n- **Validation**: Quality checks and verification\n- **Integration**: API data ingestion\n- **Export**: Report generation and formatting",
61
+ "instructions": "# Data Engineer Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Python data transformation specialist with expertise in file conversions, data processing, ETL pipelines, and comprehensive database migrations\n\n## Scope of Authority\n\n**PRIMARY MANDATE**: Full authority over data transformations, file conversions, ETL pipelines, and database migrations using Python-based tools and frameworks.\n\n### Migration Authority\n- **Schema Migrations**: Complete ownership of database schema versioning, migrations, and rollbacks\n- **Data Migrations**: Authority to design and execute cross-database data migrations\n- **Zero-Downtime Operations**: Responsibility for implementing expand-contract patterns for production migrations\n- **Performance Optimization**: Authority to optimize migration performance and database operations\n- **Validation & Testing**: Ownership of migration testing, data validation, and rollback procedures\n\n## Core Expertise\n\n### Database Migration Specialties\n\n**Multi-Database Expertise**:\n- **PostgreSQL**: Advanced features (JSONB, arrays, full-text search, partitioning)\n- **MySQL/MariaDB**: Storage engines, replication, performance tuning\n- **SQLite**: Embedded database patterns, migration strategies\n- **MongoDB**: Document migrations, schema evolution\n- **Cross-Database**: Type mapping, dialect translation, data portability\n\n**Migration Tools Mastery**:\n- **Alembic** (Primary): SQLAlchemy-based migrations with Python scripting\n- **Flyway**: Java-based versioned migrations\n- **Liquibase**: XML/YAML/SQL changelog management\n- **dbmate**: Lightweight SQL migrations\n- **Custom Solutions**: Python-based migration frameworks\n\n### Python Data Transformation Specialties\n\n**File Conversion Expertise**:\n- CSV ↔ Excel (XLS/XLSX) conversions with formatting preservation\n- JSON ↔ CSV/Excel transformations\n- Parquet ↔ CSV for big data workflows\n- XML ↔ JSON/CSV parsing and conversion\n- Fixed-width to delimited formats\n- TSV/PSV and custom delimited files\n\n**High-Performance Data Tools**:\n- **pandas**: Standard DataFrame operations (baseline performance)\n- **polars**: 10-100x faster than pandas for large datasets\n- **dask**: Distributed processing for datasets exceeding memory\n- **pyarrow**: Columnar data format for efficient I/O\n- **vaex**: Out-of-core DataFrames for billion-row datasets\n\n## Database Migration Patterns\n\n### Zero-Downtime Migration Strategy\n\n**Expand-Contract Pattern**:\n```python\n# Alembic migration: expand phase\nfrom alembic import op\nimport sqlalchemy as sa\n\ndef upgrade():\n # EXPAND: Add new column without breaking existing code\n op.add_column('users',\n sa.Column('email_verified', sa.Boolean(), nullable=True)\n )\n \n # Backfill with default values\n connection = op.get_bind()\n connection.execute(\n \"UPDATE users SET email_verified = false WHERE email_verified IS NULL\"\n )\n \n # Make column non-nullable after backfill\n op.alter_column('users', 'email_verified', nullable=False)\n\ndef downgrade():\n # CONTRACT: Safe rollback\n op.drop_column('users', 'email_verified')\n```\n\n### Alembic Configuration & Setup\n\n**Initial Setup**:\n```python\n# alembic.ini configuration\nfrom logging.config import fileConfig\nfrom sqlalchemy import engine_from_config, pool\nfrom alembic import context\n\n# Import your models\nfrom myapp.models import Base\n\nconfig = context.config\ntarget_metadata = Base.metadata\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode with connection pooling.\"\"\"\n connectable = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix=\"sqlalchemy.\",\n poolclass=pool.NullPool,\n )\n \n with connectable.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True, # Detect column type changes\n compare_server_default=True, # Detect default changes\n )\n \n with context.begin_transaction():\n context.run_migrations()\n```\n\n### Cross-Database Migration Patterns\n\n**Database-Agnostic Migrations with SQLAlchemy**:\n```python\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.ext.declarative import declarative_base\nimport pandas as pd\nimport polars as pl\n\nclass CrossDatabaseMigrator:\n def __init__(self, source_url, target_url):\n self.source_engine = create_engine(source_url)\n self.target_engine = create_engine(target_url)\n \n def migrate_table_with_polars(self, table_name, chunk_size=100000):\n \"\"\"Ultra-fast migration using Polars (10-100x faster than pandas)\"\"\"\n # Read with Polars for performance\n query = f\"SELECT * FROM {table_name}\"\n df = pl.read_database(query, self.source_engine.url)\n \n # Type mapping for cross-database compatibility\n type_map = self._get_type_mapping(df.schema)\n \n # Write in batches for large datasets\n for i in range(0, len(df), chunk_size):\n batch = df[i:i+chunk_size]\n batch.write_database(\n table_name,\n self.target_engine.url,\n if_exists='append'\n )\n print(f\"Migrated {min(i+chunk_size, len(df))}/{len(df)} rows\")\n \n def _get_type_mapping(self, schema):\n \"\"\"Map types between different databases\"\"\"\n postgres_to_mysql = {\n 'TEXT': 'LONGTEXT',\n 'SERIAL': 'INT AUTO_INCREMENT',\n 'BOOLEAN': 'TINYINT(1)',\n 'JSONB': 'JSON',\n 'UUID': 'CHAR(36)'\n }\n return postgres_to_mysql\n```\n\n### Large Dataset Migration\n\n**Batch Processing for Billion-Row Tables**:\n```python\nimport polars as pl\nfrom sqlalchemy import create_engine\nimport pyarrow.parquet as pq\n\nclass LargeDataMigrator:\n def __init__(self, source_db, target_db):\n self.source = create_engine(source_db)\n self.target = create_engine(target_db)\n \n def migrate_with_partitioning(self, table, partition_col, batch_size=1000000):\n \"\"\"Migrate huge tables using partitioning strategy\"\"\"\n # Get partition boundaries\n boundaries = self._get_partition_boundaries(table, partition_col)\n \n for start, end in boundaries:\n # Use Polars for 10-100x performance boost\n query = f\"\"\"\n SELECT * FROM {table}\n WHERE {partition_col} >= {start}\n AND {partition_col} < {end}\n \"\"\"\n \n # Stream processing with lazy evaluation\n df = pl.scan_csv(query).lazy()\n \n # Process in chunks\n for batch in df.collect(streaming=True):\n batch.write_database(\n table,\n self.target.url,\n if_exists='append'\n )\n \n def migrate_via_parquet(self, table):\n \"\"\"Use Parquet as intermediate format for maximum performance\"\"\"\n # Export to Parquet (highly compressed)\n query = f\"SELECT * FROM {table}\"\n df = pl.read_database(query, self.source.url)\n df.write_parquet(f'/tmp/{table}.parquet', compression='snappy')\n \n # Import from Parquet\n df = pl.read_parquet(f'/tmp/{table}.parquet')\n df.write_database(table, self.target.url)\n```\n\n### Migration Validation & Testing\n\n**Comprehensive Validation Framework**:\n```python\nclass MigrationValidator:\n def __init__(self, source_db, target_db):\n self.source = create_engine(source_db)\n self.target = create_engine(target_db)\n \n def validate_migration(self, table_name):\n \"\"\"Complete validation suite for migrations\"\"\"\n results = {\n 'row_count': self._validate_row_count(table_name),\n 'checksums': self._validate_checksums(table_name),\n 'samples': self._validate_sample_data(table_name),\n 'constraints': self._validate_constraints(table_name),\n 'indexes': self._validate_indexes(table_name)\n }\n return all(results.values())\n \n def _validate_row_count(self, table):\n source_count = pd.read_sql(f\"SELECT COUNT(*) FROM {table}\", self.source).iloc[0, 0]\n target_count = pd.read_sql(f\"SELECT COUNT(*) FROM {table}\", self.target).iloc[0, 0]\n return source_count == target_count\n \n def _validate_checksums(self, table):\n \"\"\"Verify data integrity with checksums\"\"\"\n source_checksum = pd.read_sql(\n f\"SELECT MD5(CAST(array_agg({table}.* ORDER BY id) AS text)) FROM {table}\",\n self.source\n ).iloc[0, 0]\n \n target_checksum = pd.read_sql(\n f\"SELECT MD5(CAST(array_agg({table}.* ORDER BY id) AS text)) FROM {table}\",\n self.target\n ).iloc[0, 0]\n \n return source_checksum == target_checksum\n```\n\n## Core Python Libraries\n\n### Database Migration Libraries\n- **alembic**: Database migration tool for SQLAlchemy\n- **sqlalchemy**: SQL toolkit and ORM\n- **psycopg2/psycopg3**: PostgreSQL adapter\n- **pymysql/mysqlclient**: MySQL adapters\n- **cx_Oracle**: Oracle database adapter\n\n### High-Performance Data Libraries\n- **polars**: 10-100x faster than pandas\n- **dask**: Distributed computing\n- **vaex**: Out-of-core DataFrames\n- **pyarrow**: Columnar data processing\n- **pandas**: Standard data manipulation (baseline)\n\n### File Processing Libraries\n- **openpyxl**: Excel file manipulation\n- **xlsxwriter**: Advanced Excel features\n- **pyarrow**: Parquet operations\n- **lxml**: XML processing\n\n## Performance Optimization\n\n### Migration Performance Tips\n\n**Database-Specific Optimizations**:\n```python\n# PostgreSQL: Use COPY for bulk inserts (100x faster)\ndef bulk_insert_postgres(df, table, engine):\n df.to_sql(table, engine, method='multi', chunksize=10000)\n # Or use COPY directly\n with engine.raw_connection() as conn:\n with conn.cursor() as cur:\n output = StringIO()\n df.to_csv(output, sep='\\t', header=False, index=False)\n output.seek(0)\n cur.copy_from(output, table, null=\"\")\n conn.commit()\n\n# MySQL: Optimize for bulk operations\ndef bulk_insert_mysql(df, table, engine):\n # Disable keys during insert\n engine.execute(f\"ALTER TABLE {table} DISABLE KEYS\")\n df.to_sql(table, engine, method='multi', chunksize=10000)\n engine.execute(f\"ALTER TABLE {table} ENABLE KEYS\")\n```\n\n### Polars vs Pandas Performance\n\n```python\n# Pandas (baseline)\nimport pandas as pd\ndf = pd.read_csv('large_file.csv') # 10GB file: ~60 seconds\nresult = df.groupby('category').agg({'value': 'sum'}) # ~15 seconds\n\n# Polars (10-100x faster)\nimport polars as pl\ndf = pl.read_csv('large_file.csv') # 10GB file: ~3 seconds\nresult = df.group_by('category').agg(pl.col('value').sum()) # ~0.2 seconds\n\n# Lazy evaluation for massive datasets\nlazy_df = pl.scan_csv('huge_file.csv') # Instant (lazy)\nresult = (\n lazy_df\n .filter(pl.col('date') > '2024-01-01')\n .group_by('category')\n .agg(pl.col('value').sum())\n .collect() # Executes optimized query\n)\n```\n\n## Error Handling & Logging\n\n**Migration Error Management**:\n```python\nimport logging\nfrom contextlib import contextmanager\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nclass MigrationError(Exception):\n \"\"\"Custom exception for migration failures\"\"\"\n pass\n\n@contextmanager\ndef migration_transaction(engine, table):\n \"\"\"Transactional migration with automatic rollback\"\"\"\n conn = engine.connect()\n trans = conn.begin()\n try:\n logger.info(f\"Starting migration for {table}\")\n yield conn\n trans.commit()\n logger.info(f\"Successfully migrated {table}\")\n except Exception as e:\n trans.rollback()\n logger.error(f\"Migration failed for {table}: {str(e)}\")\n raise MigrationError(f\"Failed to migrate {table}\") from e\n finally:\n conn.close()\n```\n\n## Common Tasks Quick Reference\n\n| Task | Solution |\n|------|----------|\n| Create Alembic migration | `alembic revision -m \"description\"` |\n| Auto-generate migration | `alembic revision --autogenerate -m \"description\"` |\n| Apply migrations | `alembic upgrade head` |\n| Rollback migration | `alembic downgrade -1` |\n| CSV → Database (fast) | `pl.read_csv('file.csv').write_database('table', url)` |\n| Database → Parquet | `pl.read_database(query, url).write_parquet('file.parquet')` |\n| Cross-DB migration | `SQLAlchemy` + `Polars` for type mapping |\n| Bulk insert optimization | Use `COPY` (Postgres) or `LOAD DATA` (MySQL) |\n| Zero-downtime migration | Expand-contract pattern with feature flags |\n\n## TodoWrite Patterns\n\n### Required Format\nāœ… `[Data Engineer] Migrate PostgreSQL users table to MySQL with type mapping`\nāœ… `[Data Engineer] Implement zero-downtime schema migration for production`\nāœ… `[Data Engineer] Convert 10GB CSV to optimized Parquet format using Polars`\nāœ… `[Data Engineer] Set up Alembic migrations for multi-tenant database`\nāœ… `[Data Engineer] Validate data integrity after cross-database migration`\nāŒ Never use generic todos\n\n### Task Categories\n- **Migration**: Database schema and data migrations\n- **Conversion**: File format transformations\n- **Performance**: Query and migration optimization\n- **Validation**: Data integrity and quality checks\n- **ETL**: Extract, transform, load pipelines\n- **Integration**: API and database integrations",
59
62
  "knowledge": {
60
63
  "domain_expertise": [
61
64
  "Python data transformation and scripting",
62
65
  "File format conversions (CSV, Excel, JSON, Parquet, XML)",
63
66
  "Pandas DataFrame operations and optimization",
67
+ "Polars for 10-100x performance improvements",
64
68
  "Excel automation with openpyxl/xlsxwriter",
65
69
  "Data cleaning and validation techniques",
66
- "Large dataset processing with Dask/Polars",
70
+ "Large dataset processing with Dask/Polars/Vaex",
71
+ "Database migration with Alembic and SQLAlchemy",
72
+ "Cross-database migration patterns",
73
+ "Zero-downtime migration strategies",
67
74
  "Database design patterns",
68
75
  "ETL/ELT architectures",
69
76
  "AI API integration",
@@ -73,17 +80,22 @@
73
80
  ],
74
81
  "best_practices": [
75
82
  "Always use Python libraries for data transformations",
83
+ "Prefer Polars over Pandas for large datasets (10-100x faster)",
84
+ "Implement expand-contract pattern for zero-downtime migrations",
85
+ "Use Alembic for version-controlled database migrations",
86
+ "Validate migrations with checksums and row counts",
76
87
  "Implement robust error handling for file conversions",
77
88
  "Validate data types and formats before processing",
78
- "Use chunking for large file operations",
89
+ "Use chunking and streaming for large file operations",
79
90
  "Apply appropriate encoding when reading files",
80
91
  "Preserve formatting when converting to Excel",
81
92
  "Design efficient schemas with proper indexing",
82
93
  "Implement idempotent ETL operations",
94
+ "Use batch processing for large-scale migrations",
83
95
  "Configure AI APIs with monitoring",
84
96
  "Validate data at pipeline boundaries",
85
97
  "Document architecture decisions",
86
- "Test with representative data"
98
+ "Test migrations with rollback procedures"
87
99
  ],
88
100
  "constraints": [],
89
101
  "examples": []
@@ -131,12 +143,14 @@
131
143
  }
132
144
  },
133
145
  "memory_routing": {
134
- "description": "Stores data pipeline patterns, schema designs, and performance tuning techniques",
146
+ "description": "Stores data pipeline patterns, database migration strategies, schema designs, and performance tuning techniques",
135
147
  "categories": [
136
148
  "Data pipeline patterns and ETL strategies",
137
- "Schema designs and migrations",
138
- "Performance tuning techniques",
139
- "Data quality requirements"
149
+ "Database migration patterns and zero-downtime strategies",
150
+ "Schema designs and version control with Alembic",
151
+ "Cross-database migration and type mapping",
152
+ "Performance tuning techniques with Polars/Dask",
153
+ "Data quality requirements and validation"
140
154
  ],
141
155
  "keywords": [
142
156
  "data",
@@ -146,6 +160,9 @@
146
160
  "etl",
147
161
  "schema",
148
162
  "migration",
163
+ "alembic",
164
+ "sqlalchemy",
165
+ "polars",
149
166
  "streaming",
150
167
  "batch",
151
168
  "warehouse",
@@ -153,18 +170,23 @@
153
170
  "analytics",
154
171
  "pandas",
155
172
  "spark",
156
- "kafka"
173
+ "kafka",
174
+ "postgres",
175
+ "mysql",
176
+ "zero-downtime",
177
+ "expand-contract"
157
178
  ]
158
179
  },
159
180
  "dependencies": {
160
181
  "python": [
161
182
  "pandas>=2.1.0",
183
+ "polars>=0.19.0",
162
184
  "openpyxl>=3.1.0",
163
185
  "xlsxwriter>=3.1.0",
164
186
  "numpy>=1.24.0",
165
187
  "pyarrow>=14.0.0",
166
188
  "dask>=2023.12.0",
167
- "polars>=0.19.0",
189
+ "vaex>=4.17.0",
168
190
  "xlrd>=2.0.0",
169
191
  "xlwt>=1.3.0",
170
192
  "csvkit>=1.3.0",
@@ -172,7 +194,10 @@
172
194
  "python-dateutil>=2.8.0",
173
195
  "lxml>=4.9.0",
174
196
  "sqlalchemy>=2.0.0",
197
+ "alembic>=1.13.0",
175
198
  "psycopg2-binary>=2.9.0",
199
+ "pymysql>=1.1.0",
200
+ "mysqlclient>=2.2.0",
176
201
  "pymongo>=4.5.0",
177
202
  "redis>=5.0.0",
178
203
  "requests>=2.31.0",
@@ -3,9 +3,14 @@
3
3
  "description": "Clean architecture specialist with code reduction focus and dependency injection",
4
4
  "schema_version": "1.3.0",
5
5
  "agent_id": "engineer",
6
- "agent_version": "3.8.1",
7
- "template_version": "2.2.0",
6
+ "agent_version": "3.9.0",
7
+ "template_version": "2.3.0",
8
8
  "template_changelog": [
9
+ {
10
+ "version": "2.3.0",
11
+ "date": "2025-09-25",
12
+ "description": "Added mcp-vector-search integration for finding existing solutions before implementing new code"
13
+ },
9
14
  {
10
15
  "version": "2.2.0",
11
16
  "date": "2025-08-25",
@@ -73,9 +78,12 @@
73
78
  "Code reduction techniques",
74
79
  "Dependency injection patterns",
75
80
  "Modularization strategies",
76
- "Refactoring for legacy code"
81
+ "Refactoring for legacy code",
82
+ "Semantic code search for pattern discovery"
77
83
  ],
78
84
  "best_practices": [
85
+ "Use mcp__mcp-vector-search__search_code FIRST to find existing solutions",
86
+ "Use mcp__mcp-vector-search__search_similar to find reusable patterns",
79
87
  "Search for code to DELETE first",
80
88
  "Apply dependency injection as default",
81
89
  "Enforce 800-line file limit",
@@ -712,7 +712,7 @@ class AgentManagerCommand(AgentCommand):
712
712
  """Load agent configuration."""
713
713
  try:
714
714
  return self.builder_service._load_template(agent_id)
715
- except:
715
+ except Exception:
716
716
  return None
717
717
 
718
718
  def _check_conflicts(self, agent_id: str) -> Optional[str]:
@@ -1074,7 +1074,7 @@ class AgentManagerCommand(AgentCommand):
1074
1074
  except Exception as e:
1075
1075
  return CommandResult.error_result(f"Interactive edit failed: {e}")
1076
1076
 
1077
- def _test_local_agent(self, args) -> CommandResult:
1077
+ def _test_local_agent(self, args) -> CommandResult: # noqa: PLR0911
1078
1078
  """Test a local agent with sample task."""
1079
1079
  try:
1080
1080
  from ...services.agents.local_template_manager import (
@@ -1144,7 +1144,7 @@ class AgentManagerCommand(AgentCommand):
1144
1144
  except Exception as e:
1145
1145
  return CommandResult.error_result(f"Local agent test failed: {e}")
1146
1146
 
1147
- def _delete_local_agents(self, args) -> CommandResult:
1147
+ def _delete_local_agents(self, args) -> CommandResult: # noqa: PLR0911
1148
1148
  """Delete local agent templates with comprehensive options."""
1149
1149
  try:
1150
1150
  from ...services.agents.local_template_manager import (
@@ -938,7 +938,7 @@ class AgentsCommand(AgentCommand):
938
938
  self.logger.error(f"Error creating local agent: {e}", exc_info=True)
939
939
  return CommandResult.error_result(f"Error creating local agent: {e}")
940
940
 
941
- def _edit_local_agent(self, args) -> CommandResult:
941
+ def _edit_local_agent(self, args) -> CommandResult: # noqa: PLR0911
942
942
  """Edit a local agent template."""
943
943
  try:
944
944
  agent_id = getattr(args, "agent_id", None)
@@ -993,7 +993,7 @@ class AgentsCommand(AgentCommand):
993
993
  self.logger.error(f"Error editing local agent: {e}", exc_info=True)
994
994
  return CommandResult.error_result(f"Error editing local agent: {e}")
995
995
 
996
- def _delete_local_agent(self, args) -> CommandResult:
996
+ def _delete_local_agent(self, args) -> CommandResult: # noqa: PLR0911
997
997
  """Delete local agent templates."""
998
998
  try:
999
999
  agent_ids = getattr(args, "agent_ids", [])
@@ -116,7 +116,7 @@ def aggregate_command(args):
116
116
  return result.exit_code
117
117
 
118
118
 
119
- def aggregate_command_legacy(args):
119
+ def aggregate_command_legacy(args): # noqa: PLR0911
120
120
  """Legacy aggregate command dispatcher.
121
121
 
122
122
  WHY: This contains the original aggregate_command logic, preserved during migration
@@ -58,7 +58,7 @@ class ConfigCommand(BaseCommand):
58
58
  f"Unknown config command: {args.config_command}"
59
59
  )
60
60
 
61
- def _validate_config(self, args) -> CommandResult:
61
+ def _validate_config(self, args) -> CommandResult: # noqa: PLR0911
62
62
  """Validate configuration file."""
63
63
  config_file = getattr(args, "config_file", None) or Path(
64
64
  ".claude-mpm/configuration.yaml"
@@ -168,7 +168,7 @@ class ConfigCommand(BaseCommand):
168
168
  console.print(f"[red]Failed to validate configuration: {e}[/red]")
169
169
  return CommandResult.error_result(f"Failed to validate configuration: {e}")
170
170
 
171
- def _view_config(self, args) -> CommandResult:
171
+ def _view_config(self, args) -> CommandResult: # noqa: PLR0911
172
172
  """View current configuration."""
173
173
  try:
174
174
  # Load configuration
@@ -185,7 +185,7 @@ class ConfigureCommand(BaseCommand):
185
185
 
186
186
  return None
187
187
 
188
- def run(self, args) -> CommandResult:
188
+ def run(self, args) -> CommandResult: # noqa: PLR0911
189
189
  """Execute the configure command."""
190
190
  # Set configuration scope
191
191
  self.current_scope = getattr(args, "scope", "project")
@@ -441,7 +441,7 @@ class ConfigureCommand(BaseCommand):
441
441
  tools_display = f"Model: {model}"
442
442
  else:
443
443
  tools_display = "Default"
444
- except:
444
+ except Exception:
445
445
  tools_display = "Default"
446
446
 
447
447
  # Truncate description for table display
@@ -858,7 +858,7 @@ class ConfigureCommand(BaseCommand):
858
858
  [bold]Tags:[/bold] {', '.join(tags) if tags else 'None'}
859
859
  [bold]Tools:[/bold] {', '.join(tools[:5]) if tools else 'None'}{'...' if len(tools) > 5 else ''}
860
860
  """
861
- except:
861
+ except Exception:
862
862
  pass
863
863
 
864
864
  # Create detail panel
@@ -1048,7 +1048,7 @@ class ConfigureCommand(BaseCommand):
1048
1048
  )
1049
1049
  if result.returncode == 0:
1050
1050
  claude_version = result.stdout.strip()
1051
- except:
1051
+ except Exception:
1052
1052
  pass
1053
1053
 
1054
1054
  # Create version panel
@@ -1191,7 +1191,7 @@ Directory: {self.project_dir}
1191
1191
  )
1192
1192
  if result.returncode == 0:
1193
1193
  data["claude_version"] = result.stdout.strip()
1194
- except:
1194
+ except Exception:
1195
1195
  data["claude_version"] = "Unknown"
1196
1196
 
1197
1197
  # Print formatted output
@@ -485,11 +485,11 @@ class AgentManagementScreen(Container):
485
485
  try:
486
486
  rel_path = agent.template_path.relative_to(Path.home())
487
487
  path_str = f"~/{rel_path}"
488
- except:
488
+ except Exception:
489
489
  try:
490
490
  rel_path = agent.template_path.relative_to(self.project_dir)
491
491
  path_str = f"./{rel_path}"
492
- except:
492
+ except Exception:
493
493
  path_str = str(agent.template_path)
494
494
 
495
495
  self.log(f"Adding row: {agent.name}, {status}, {agent.version}, {path_str}")
@@ -794,11 +794,11 @@ model: {metadata.get('model', 'claude-3-5-sonnet-20241022')}
794
794
  try:
795
795
  rel_path = agent.template_path.relative_to(Path.home())
796
796
  path_str = f"~/{rel_path}"
797
- except:
797
+ except Exception:
798
798
  try:
799
799
  rel_path = agent.template_path.relative_to(self.project_dir)
800
800
  path_str = f"./{rel_path}"
801
- except:
801
+ except Exception:
802
802
  path_str = str(agent.template_path)
803
803
 
804
804
  table.add_row(agent.name, status, agent.version, path_str, key=agent.name)
@@ -1208,7 +1208,7 @@ class SettingsScreen(Container):
1208
1208
  )
1209
1209
  if result.returncode == 0:
1210
1210
  claude_version = result.stdout.strip()
1211
- except:
1211
+ except Exception:
1212
1212
  pass
1213
1213
 
1214
1214
  version_container = self.query_one("#version-info", Container)
@@ -1258,7 +1258,7 @@ Python: {sys.version.split()[0]}"""
1258
1258
  current_screen.load_templates()
1259
1259
  elif hasattr(current_screen, "load_behavior_files"):
1260
1260
  current_screen.load_behavior_files()
1261
- except:
1261
+ except Exception:
1262
1262
  pass
1263
1263
 
1264
1264
  self.notify(f"Switched to {self.current_scope} scope")
@@ -1907,7 +1907,7 @@ def can_use_tui() -> bool:
1907
1907
  cols, rows = shutil.get_terminal_size()
1908
1908
  if cols < 80 or rows < 24:
1909
1909
  return False
1910
- except:
1910
+ except Exception:
1911
1911
  return False
1912
1912
 
1913
1913
  return True
@@ -65,7 +65,7 @@ class DashboardCommand(BaseCommand):
65
65
  self.logger.error(f"Error executing dashboard command: {e}", exc_info=True)
66
66
  return CommandResult.error_result(f"Error executing dashboard command: {e}")
67
67
 
68
- def _start_dashboard(self, args) -> CommandResult:
68
+ def _start_dashboard(self, args) -> CommandResult: # noqa: PLR0911
69
69
  """Start the dashboard server."""
70
70
  port = getattr(args, "port", 8765)
71
71
  host = getattr(args, "host", "localhost")
@@ -21,7 +21,7 @@ from typing import Any, Dict
21
21
  from ...core.logger import get_logger
22
22
 
23
23
 
24
- def manage_debug(args):
24
+ def manage_debug(args): # noqa: PLR0911
25
25
  """
26
26
  Main entry point for debug commands.
27
27
 
@@ -216,7 +216,7 @@ def debug_services(args, logger):
216
216
  container = DIContainer.get_instance()
217
217
  else:
218
218
  container = DIContainer()
219
- except:
219
+ except Exception:
220
220
  # Create a new container if none exists
221
221
  container = DIContainer()
222
222
  logger.warning("No active container found, created new instance")
@@ -408,7 +408,7 @@ def debug_agents(args, logger):
408
408
  agent_name = agent_file.stem
409
409
  size = agent_file.stat().st_size
410
410
  modified = datetime.fromtimestamp(
411
- agent_file.stat().st_mtime, timezone.utc
411
+ agent_file.stat().st_mtime, tz=timezone.utc
412
412
  )
413
413
  print(f" • {agent_name}")
414
414
  print(f" Size: {size:,} bytes")
@@ -684,7 +684,7 @@ def debug_hooks(args, logger):
684
684
  hook.execute(test_context)
685
685
  elapsed = time.time() - start
686
686
  times.append(elapsed)
687
- except:
687
+ except Exception:
688
688
  pass
689
689
 
690
690
  if times:
@@ -1075,7 +1075,7 @@ def _profile_memory_operations():
1075
1075
  try:
1076
1076
  memory.add_memory(f"category_{i}", {"data": f"test_{i}"})
1077
1077
  memory.get_memories()
1078
- except:
1078
+ except Exception:
1079
1079
  pass
1080
1080
 
1081
1081
 
@@ -186,7 +186,7 @@ def _show_status(
186
186
  ToolRegistry()
187
187
  # Don't initialize fully, just check
188
188
  print("\nšŸ”§ Tools: Check with 'claude-mpm mcp tools'")
189
- except:
189
+ except Exception:
190
190
  print("\nšŸ”§ Tools: Registry not available")
191
191
 
192
192
  print("\nšŸ’” Available Commands:")
@@ -16,7 +16,7 @@ class MCPCommandRouter:
16
16
  """Initialize the command router."""
17
17
  self.logger = logger
18
18
 
19
- def route_command(self, args) -> int:
19
+ def route_command(self, args) -> int: # noqa: PLR0911
20
20
  """Route command to appropriate handler."""
21
21
  if args.mcp_command == MCPCommands.START.value:
22
22
  return asyncio.run(self._start_server(args))
@@ -6,7 +6,6 @@ Command for managing MCP service configurations with pipx preference.
6
6
  """
7
7
 
8
8
  import json
9
- import sys
10
9
  from pathlib import Path
11
10
 
12
11
  from ...services.mcp_config_manager import MCPConfigManager
@@ -29,16 +28,14 @@ class MCPConfigCommand(BaseCommand):
29
28
 
30
29
  if command == "detect":
31
30
  return self._detect_services(manager)
32
- elif command == "update":
31
+ if command == "update":
33
32
  return self._update_config(manager, args)
34
- elif command == "validate":
33
+ if command == "validate":
35
34
  return self._validate_config(manager)
36
- elif command == "install":
35
+ if command == "install":
37
36
  return self._install_services(manager)
38
- else:
39
- return self._show_status(manager)
40
- else:
41
37
  return self._show_status(manager)
38
+ return self._show_status(manager)
42
39
 
43
40
  def _detect_services(self, manager: MCPConfigManager) -> CommandResult:
44
41
  """Detect available MCP services."""
@@ -65,7 +62,7 @@ class MCPConfigCommand(BaseCommand):
65
62
  # Show the updated configuration
66
63
  config_path = Path.cwd() / ".mcp.json"
67
64
  if config_path.exists():
68
- with open(config_path, "r") as f:
65
+ with open(config_path) as f:
69
66
  config = json.load(f)
70
67
  return CommandResult(
71
68
  success=True,
@@ -123,7 +120,7 @@ class MCPConfigCommand(BaseCommand):
123
120
  current_config = {}
124
121
  if config_path.exists():
125
122
  try:
126
- with open(config_path, "r") as f:
123
+ with open(config_path) as f:
127
124
  current_config = json.load(f)
128
125
  except Exception:
129
126
  pass
@@ -154,4 +151,4 @@ def manage_mcp_config(args):
154
151
  """
155
152
  command = MCPConfigCommand()
156
153
  result = command.execute(args)
157
- return result.exit_code
154
+ return result.exit_code