claude-mpm 4.3.11__py3-none-any.whl → 4.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/PM_INSTRUCTIONS.md +390 -28
- claude_mpm/agents/templates/data_engineer.json +39 -14
- claude_mpm/agents/templates/research.json +20 -8
- claude_mpm/agents/templates/web_qa.json +25 -10
- claude_mpm/cli/__init__.py +1 -0
- claude_mpm/cli/commands/agent_manager.py +3 -3
- claude_mpm/cli/commands/agents.py +2 -2
- claude_mpm/cli/commands/aggregate.py +1 -1
- claude_mpm/cli/commands/config.py +2 -2
- claude_mpm/cli/commands/configure.py +5 -5
- claude_mpm/cli/commands/configure_tui.py +7 -7
- claude_mpm/cli/commands/dashboard.py +1 -1
- claude_mpm/cli/commands/debug.py +5 -5
- claude_mpm/cli/commands/mcp.py +1 -1
- claude_mpm/cli/commands/mcp_command_router.py +12 -1
- claude_mpm/cli/commands/mcp_config.py +154 -0
- claude_mpm/cli/commands/mcp_external_commands.py +249 -0
- claude_mpm/cli/commands/mcp_install_commands.py +93 -24
- claude_mpm/cli/commands/mcp_setup_external.py +870 -0
- claude_mpm/cli/commands/monitor.py +2 -2
- claude_mpm/cli/commands/mpm_init_handler.py +1 -1
- claude_mpm/cli/commands/run.py +114 -0
- claude_mpm/cli/commands/search.py +292 -0
- claude_mpm/cli/interactive/agent_wizard.py +2 -2
- claude_mpm/cli/parsers/base_parser.py +13 -0
- claude_mpm/cli/parsers/mcp_parser.py +15 -0
- claude_mpm/cli/parsers/run_parser.py +5 -0
- claude_mpm/cli/parsers/search_parser.py +245 -0
- claude_mpm/cli/startup_logging.py +3 -5
- claude_mpm/cli/utils.py +1 -1
- claude_mpm/constants.py +1 -0
- claude_mpm/core/agent_registry.py +12 -8
- claude_mpm/core/agent_session_manager.py +8 -8
- claude_mpm/core/api_validator.py +4 -4
- claude_mpm/core/base_service.py +10 -10
- claude_mpm/core/cache.py +5 -5
- claude_mpm/core/config_constants.py +1 -1
- claude_mpm/core/container.py +1 -1
- claude_mpm/core/error_handler.py +2 -2
- claude_mpm/core/file_utils.py +1 -1
- claude_mpm/core/framework_loader.py +3 -3
- claude_mpm/core/hook_manager.py +8 -6
- claude_mpm/core/instruction_reinforcement_hook.py +2 -2
- claude_mpm/core/interactive_session.py +1 -1
- claude_mpm/core/lazy.py +3 -3
- claude_mpm/core/log_manager.py +16 -12
- claude_mpm/core/logger.py +16 -11
- claude_mpm/core/logging_config.py +4 -2
- claude_mpm/core/oneshot_session.py +1 -1
- claude_mpm/core/optimized_agent_loader.py +6 -6
- claude_mpm/core/output_style_manager.py +1 -1
- claude_mpm/core/pm_hook_interceptor.py +3 -3
- claude_mpm/core/service_registry.py +1 -1
- claude_mpm/core/session_manager.py +11 -9
- claude_mpm/core/socketio_pool.py +13 -13
- claude_mpm/core/types.py +2 -2
- claude_mpm/core/unified_agent_registry.py +9 -2
- claude_mpm/core/unified_paths.py +1 -1
- claude_mpm/dashboard/analysis_runner.py +4 -4
- claude_mpm/dashboard/api/simple_directory.py +1 -1
- claude_mpm/generators/agent_profile_generator.py +4 -2
- claude_mpm/hooks/base_hook.py +2 -2
- claude_mpm/hooks/claude_hooks/connection_pool.py +4 -4
- claude_mpm/hooks/claude_hooks/event_handlers.py +12 -12
- claude_mpm/hooks/claude_hooks/hook_handler.py +4 -4
- claude_mpm/hooks/claude_hooks/hook_handler_eventbus.py +3 -3
- claude_mpm/hooks/claude_hooks/hook_handler_original.py +15 -14
- claude_mpm/hooks/claude_hooks/hook_handler_refactored.py +4 -4
- claude_mpm/hooks/claude_hooks/installer.py +3 -3
- claude_mpm/hooks/claude_hooks/memory_integration.py +3 -3
- claude_mpm/hooks/claude_hooks/response_tracking.py +3 -3
- claude_mpm/hooks/claude_hooks/services/connection_manager.py +5 -5
- claude_mpm/hooks/claude_hooks/services/connection_manager_http.py +3 -3
- claude_mpm/hooks/claude_hooks/services/state_manager.py +8 -7
- claude_mpm/hooks/claude_hooks/services/subagent_processor.py +3 -3
- claude_mpm/hooks/claude_hooks/tool_analysis.py +2 -2
- claude_mpm/hooks/memory_integration_hook.py +1 -1
- claude_mpm/hooks/tool_call_interceptor.py +2 -2
- claude_mpm/models/agent_session.py +5 -5
- claude_mpm/services/__init__.py +1 -1
- claude_mpm/services/agent_capabilities_service.py +1 -1
- claude_mpm/services/agents/agent_builder.py +3 -3
- claude_mpm/services/agents/deployment/agent_deployment.py +29 -13
- claude_mpm/services/agents/deployment/agent_discovery_service.py +22 -6
- claude_mpm/services/agents/deployment/agent_filesystem_manager.py +7 -5
- claude_mpm/services/agents/deployment/agent_lifecycle_manager.py +3 -1
- claude_mpm/services/agents/deployment/agent_metrics_collector.py +1 -1
- claude_mpm/services/agents/deployment/agent_operation_service.py +2 -2
- claude_mpm/services/agents/deployment/agent_state_service.py +2 -2
- claude_mpm/services/agents/deployment/agent_template_builder.py +1 -1
- claude_mpm/services/agents/deployment/agent_versioning.py +1 -1
- claude_mpm/services/agents/deployment/deployment_wrapper.py +2 -3
- claude_mpm/services/agents/deployment/multi_source_deployment_service.py +6 -4
- claude_mpm/services/agents/deployment/pipeline/steps/agent_processing_step.py +1 -1
- claude_mpm/services/agents/loading/agent_profile_loader.py +5 -3
- claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
- claude_mpm/services/agents/local_template_manager.py +6 -6
- claude_mpm/services/agents/management/agent_management_service.py +3 -3
- claude_mpm/services/agents/memory/content_manager.py +3 -3
- claude_mpm/services/agents/memory/memory_format_service.py +2 -2
- claude_mpm/services/agents/memory/template_generator.py +3 -3
- claude_mpm/services/agents/registry/__init__.py +1 -1
- claude_mpm/services/agents/registry/modification_tracker.py +2 -2
- claude_mpm/services/async_session_logger.py +3 -3
- claude_mpm/services/claude_session_logger.py +4 -4
- claude_mpm/services/cli/agent_cleanup_service.py +5 -0
- claude_mpm/services/cli/agent_listing_service.py +1 -1
- claude_mpm/services/cli/agent_validation_service.py +1 -0
- claude_mpm/services/cli/memory_crud_service.py +11 -6
- claude_mpm/services/cli/memory_output_formatter.py +1 -1
- claude_mpm/services/cli/session_manager.py +15 -11
- claude_mpm/services/cli/unified_dashboard_manager.py +1 -1
- claude_mpm/services/core/memory_manager.py +81 -23
- claude_mpm/services/core/path_resolver.py +2 -2
- claude_mpm/services/diagnostics/checks/installation_check.py +1 -1
- claude_mpm/services/event_aggregator.py +4 -2
- claude_mpm/services/event_bus/direct_relay.py +5 -3
- claude_mpm/services/event_bus/event_bus.py +3 -3
- claude_mpm/services/event_bus/relay.py +6 -4
- claude_mpm/services/events/consumers/dead_letter.py +5 -3
- claude_mpm/services/events/core.py +3 -3
- claude_mpm/services/events/producers/hook.py +6 -6
- claude_mpm/services/events/producers/system.py +8 -8
- claude_mpm/services/exceptions.py +5 -5
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +3 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/__init__.py +2 -2
- claude_mpm/services/hook_installer_service.py +1 -1
- claude_mpm/services/infrastructure/context_preservation.py +6 -4
- claude_mpm/services/infrastructure/daemon_manager.py +2 -2
- claude_mpm/services/infrastructure/logging.py +2 -2
- claude_mpm/services/mcp_config_manager.py +439 -0
- claude_mpm/services/mcp_gateway/__init__.py +1 -1
- claude_mpm/services/mcp_gateway/auto_configure.py +3 -3
- claude_mpm/services/mcp_gateway/config/config_loader.py +1 -1
- claude_mpm/services/mcp_gateway/config/configuration.py +18 -1
- claude_mpm/services/mcp_gateway/core/base.py +2 -2
- claude_mpm/services/mcp_gateway/main.py +52 -0
- claude_mpm/services/mcp_gateway/registry/tool_registry.py +10 -8
- claude_mpm/services/mcp_gateway/server/mcp_gateway.py +4 -4
- claude_mpm/services/mcp_gateway/server/stdio_handler.py +1 -1
- claude_mpm/services/mcp_gateway/server/stdio_server.py +4 -3
- claude_mpm/services/mcp_gateway/tools/base_adapter.py +15 -15
- claude_mpm/services/mcp_gateway/tools/document_summarizer.py +7 -5
- claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +443 -0
- claude_mpm/services/mcp_gateway/tools/health_check_tool.py +5 -5
- claude_mpm/services/mcp_gateway/tools/hello_world.py +9 -9
- claude_mpm/services/mcp_gateway/tools/ticket_tools.py +16 -16
- claude_mpm/services/mcp_gateway/tools/unified_ticket_tool.py +17 -17
- claude_mpm/services/memory/builder.py +7 -5
- claude_mpm/services/memory/indexed_memory.py +4 -4
- claude_mpm/services/memory/optimizer.py +6 -6
- claude_mpm/services/memory/router.py +3 -3
- claude_mpm/services/monitor/daemon.py +1 -1
- claude_mpm/services/monitor/daemon_manager.py +6 -6
- claude_mpm/services/monitor/event_emitter.py +2 -2
- claude_mpm/services/monitor/handlers/file.py +1 -1
- claude_mpm/services/monitor/management/lifecycle.py +1 -1
- claude_mpm/services/monitor/server.py +4 -4
- claude_mpm/services/monitor_build_service.py +2 -2
- claude_mpm/services/port_manager.py +2 -2
- claude_mpm/services/response_tracker.py +2 -2
- claude_mpm/services/session_management_service.py +3 -2
- claude_mpm/services/socketio/client_proxy.py +2 -2
- claude_mpm/services/socketio/dashboard_server.py +4 -3
- claude_mpm/services/socketio/event_normalizer.py +12 -8
- claude_mpm/services/socketio/handlers/base.py +2 -2
- claude_mpm/services/socketio/handlers/connection.py +10 -10
- claude_mpm/services/socketio/handlers/connection_handler.py +13 -10
- claude_mpm/services/socketio/handlers/file.py +1 -1
- claude_mpm/services/socketio/handlers/git.py +1 -1
- claude_mpm/services/socketio/handlers/hook.py +16 -15
- claude_mpm/services/socketio/migration_utils.py +1 -1
- claude_mpm/services/socketio/monitor_client.py +5 -5
- claude_mpm/services/socketio/server/broadcaster.py +9 -7
- claude_mpm/services/socketio/server/connection_manager.py +2 -2
- claude_mpm/services/socketio/server/core.py +7 -5
- claude_mpm/services/socketio/server/eventbus_integration.py +18 -11
- claude_mpm/services/socketio/server/main.py +13 -13
- claude_mpm/services/socketio_client_manager.py +4 -4
- claude_mpm/services/system_instructions_service.py +2 -2
- claude_mpm/services/ticket_services/validation_service.py +1 -1
- claude_mpm/services/utility_service.py +5 -2
- claude_mpm/services/version_control/branch_strategy.py +2 -2
- claude_mpm/services/version_control/git_operations.py +22 -20
- claude_mpm/services/version_control/semantic_versioning.py +3 -3
- claude_mpm/services/version_control/version_parser.py +7 -5
- claude_mpm/services/visualization/mermaid_generator.py +1 -1
- claude_mpm/storage/state_storage.py +1 -1
- claude_mpm/tools/code_tree_analyzer.py +19 -18
- claude_mpm/tools/code_tree_builder.py +2 -2
- claude_mpm/tools/code_tree_events.py +10 -8
- claude_mpm/tools/socketio_debug.py +3 -3
- claude_mpm/utils/agent_dependency_loader.py +2 -2
- claude_mpm/utils/dependency_strategies.py +8 -3
- claude_mpm/utils/environment_context.py +2 -2
- claude_mpm/utils/error_handler.py +2 -2
- claude_mpm/utils/file_utils.py +1 -1
- claude_mpm/utils/imports.py +1 -1
- claude_mpm/utils/log_cleanup.py +21 -7
- claude_mpm/validation/agent_validator.py +2 -2
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/METADATA +4 -1
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/RECORD +207 -200
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.11.dist-info → claude_mpm-4.3.13.dist-info}/top_level.txt +0 -0
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
{
|
|
2
2
|
"schema_version": "1.2.0",
|
|
3
3
|
"agent_id": "data-engineer",
|
|
4
|
-
"agent_version": "2.
|
|
4
|
+
"agent_version": "2.5.0",
|
|
5
5
|
"agent_type": "engineer",
|
|
6
6
|
"metadata": {
|
|
7
7
|
"name": "Data Engineer Agent",
|
|
8
|
-
"description": "Python-powered data transformation specialist for file conversions, ETL pipelines, and data processing",
|
|
8
|
+
"description": "Python-powered data transformation specialist for file conversions, ETL pipelines, database migrations, and data processing",
|
|
9
9
|
"category": "engineering",
|
|
10
10
|
"tags": [
|
|
11
11
|
"data",
|
|
@@ -19,11 +19,14 @@
|
|
|
19
19
|
"ai-apis",
|
|
20
20
|
"database",
|
|
21
21
|
"pipelines",
|
|
22
|
-
"ETL"
|
|
22
|
+
"ETL",
|
|
23
|
+
"migration",
|
|
24
|
+
"alembic",
|
|
25
|
+
"sqlalchemy"
|
|
23
26
|
],
|
|
24
27
|
"author": "Claude MPM Team",
|
|
25
28
|
"created_at": "2025-07-27T03:45:51.463500Z",
|
|
26
|
-
"updated_at": "2025-09-
|
|
29
|
+
"updated_at": "2025-09-25T00:00:00.000000Z",
|
|
27
30
|
"color": "yellow"
|
|
28
31
|
},
|
|
29
32
|
"capabilities": {
|
|
@@ -55,15 +58,19 @@
|
|
|
55
58
|
]
|
|
56
59
|
}
|
|
57
60
|
},
|
|
58
|
-
"instructions": "# Data Engineer Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Python data transformation specialist with expertise in file conversions, data processing, and ETL pipelines\n\n## Core Expertise\n\n**PRIMARY MANDATE**: Use Python scripting and data tools (pandas, openpyxl, xlsxwriter, etc.) to perform data transformations, file conversions, and processing tasks.\n\n### Python Data Transformation Specialties\n\n**File Conversion Expertise**:\n- CSV ↔ Excel (XLS/XLSX) conversions with formatting preservation\n- JSON ↔ CSV/Excel transformations\n- Parquet ↔ CSV for big data workflows\n- XML ↔ JSON/CSV parsing and conversion\n- Fixed-width to delimited formats\n- TSV/PSV and custom delimited files\n\n**Data Processing Capabilities**:\n```python\n# Example: CSV to Excel with formatting\nimport pandas as pd\nfrom openpyxl.styles import Font, Alignment, PatternFill\n\n# Read CSV\ndf = pd.read_csv('input.csv')\n\n# Data transformations\ndf['date'] = pd.to_datetime(df['date'])\ndf['amount'] = df['amount'].astype(float)\n\n# Write to Excel with formatting\nwith pd.ExcelWriter('output.xlsx', engine='openpyxl') as writer:\n df.to_excel(writer, sheet_name='Data', index=False)\n worksheet = writer.sheets['Data']\n \n # Apply formatting\n for cell in worksheet['A1:Z1'][0]:\n cell.font = Font(bold=True)\n cell.fill = PatternFill(start_color='366092', end_color='366092', fill_type='solid')\n```\n\n### Core Python Libraries for Data Work\n\n**Essential Libraries**:\n- **pandas**: DataFrame operations, file I/O, data cleaning\n- **openpyxl**: Excel file manipulation with formatting\n- **xlsxwriter**: Advanced Excel features (charts, formulas)\n- **numpy**: Numerical operations and array processing\n- **pyarrow**: Parquet file operations\n- **dask**: Large dataset processing\n- **polars**: High-performance DataFrames\n\n**Specialized Libraries**:\n- **xlrd/xlwt**: Legacy Excel format support\n- **csvkit**: Advanced CSV utilities\n- **tabulate**: Pretty-print tabular data\n- **fuzzywuzzy**: Data matching and deduplication\n- **dateutil**: Date parsing and manipulation\n\n## Data Processing Patterns\n\n### File Conversion Workflows\n\n**Standard Conversion Process**:\n1. **Validate**: Check source file format and integrity\n2. **Read**: Load data with appropriate encoding handling\n3. **Transform**: Apply data type conversions, cleaning, enrichment\n4. **Format**: Apply styling, formatting, validation rules\n5. **Write**: Output to target format with error handling\n\n**Example Implementations**:\n```python\n# Multi-sheet Excel from multiple CSVs\nimport glob\nimport pandas as pd\n\ncsv_files = glob.glob('data/*.csv')\nwith pd.ExcelWriter('combined.xlsx') as writer:\n for csv_file in csv_files:\n df = pd.read_csv(csv_file)\n sheet_name = os.path.basename(csv_file).replace('.csv', '')\n df.to_excel(writer, sheet_name=sheet_name, index=False)\n\n# JSON to formatted Excel with data types\nimport json\nimport pandas as pd\n\nwith open('data.json', 'r') as f:\n data = json.load(f)\n\ndf = pd.json_normalize(data)\n# Apply data types\ndf = df.astype({\n 'id': 'int64',\n 'amount': 'float64',\n 'date': 'datetime64[ns]'\n})\ndf.to_excel('output.xlsx', index=False)\n```\n\n### Data Quality & Validation\n\n**Validation Steps**:\n- Check for missing values and handle appropriately\n- Validate data types and formats\n- Detect and handle duplicates\n- Verify referential integrity\n- Apply business rule validations\n\n```python\n# Data validation example\ndef validate_dataframe(df):\n issues = []\n \n # Check nulls\n null_cols = df.columns[df.isnull().any()].tolist()\n if null_cols:\n issues.append(f\"Null values in: {null_cols}\")\n \n # Check duplicates\n if df.duplicated().any():\n issues.append(f\"Found {df.duplicated().sum()} duplicate rows\")\n \n # Data type validation\n for col in df.select_dtypes(include=['object']):\n if col in ['date', 'timestamp']:\n try:\n pd.to_datetime(df[col])\n except:\n issues.append(f\"Invalid dates in column: {col}\")\n \n return issues\n```\n\n## Performance Optimization\n\n**Large File Processing**:\n- Use chunking for files >100MB\n- Implement streaming for continuous data\n- Apply dtype optimization to reduce memory\n- Use Dask/Polars for files >1GB\n\n```python\n# Chunked processing for large files\nchunk_size = 10000\nfor chunk in pd.read_csv('large_file.csv', chunksize=chunk_size):\n processed_chunk = process_data(chunk)\n processed_chunk.to_csv('output.csv', mode='a', header=False, index=False)\n```\n\n## Error Handling & Logging\n\n**Robust Error Management**:\n```python\nimport logging\nimport traceback\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef safe_convert(input_file, output_file, format_from, format_to):\n try:\n logger.info(f\"Converting {input_file} from {format_from} to {format_to}\")\n \n # Conversion logic here\n if format_from == 'csv' and format_to == 'xlsx':\n df = pd.read_csv(input_file)\n df.to_excel(output_file, index=False)\n \n logger.info(f\"Successfully converted to {output_file}\")\n return True\n except Exception as e:\n logger.error(f\"Conversion failed: {str(e)}\")\n logger.debug(traceback.format_exc())\n return False\n```\n\n## Common Data Tasks\n\n### Quick Reference\n\n| Task | Python Solution |\n|------|----------------|\n| CSV → Excel | `pd.read_csv('file.csv').to_excel('file.xlsx')` |\n| Excel → CSV | `pd.read_excel('file.xlsx').to_csv('file.csv')` |\n| JSON → DataFrame | `pd.read_json('file.json')` or `pd.json_normalize(data)` |\n| Merge files | `pd.concat([df1, df2])` or `df1.merge(df2, on='key')` |\n| Pivot data | `df.pivot_table(index='col1', columns='col2', values='col3')` |\n| Data cleaning | `df.dropna()`, `df.fillna()`, `df.drop_duplicates()` |\n| Type conversion | `df.astype({'col': 'type'})` |\n| Date parsing | `pd.to_datetime(df['date_col'])` |\n\n## TodoWrite Patterns\n\n### Required Format\n✅ `[Data Engineer] Convert CSV files to formatted Excel workbook`\n✅ `[Data Engineer] Transform JSON API response to SQL database`\n✅ `[Data Engineer] Clean and validate customer data`\n✅ `[Data Engineer] Merge multiple Excel sheets into single CSV`\n❌ Never use generic todos\n\n### Task Categories\n- **Conversion**: File format transformations\n- **Processing**: Data cleaning and enrichment\n- **Validation**: Quality checks and verification\n- **Integration**: API data ingestion\n- **Export**: Report generation and formatting",
|
|
61
|
+
"instructions": "# Data Engineer Agent\n\n**Inherits from**: BASE_AGENT_TEMPLATE.md\n**Focus**: Python data transformation specialist with expertise in file conversions, data processing, ETL pipelines, and comprehensive database migrations\n\n## Scope of Authority\n\n**PRIMARY MANDATE**: Full authority over data transformations, file conversions, ETL pipelines, and database migrations using Python-based tools and frameworks.\n\n### Migration Authority\n- **Schema Migrations**: Complete ownership of database schema versioning, migrations, and rollbacks\n- **Data Migrations**: Authority to design and execute cross-database data migrations\n- **Zero-Downtime Operations**: Responsibility for implementing expand-contract patterns for production migrations\n- **Performance Optimization**: Authority to optimize migration performance and database operations\n- **Validation & Testing**: Ownership of migration testing, data validation, and rollback procedures\n\n## Core Expertise\n\n### Database Migration Specialties\n\n**Multi-Database Expertise**:\n- **PostgreSQL**: Advanced features (JSONB, arrays, full-text search, partitioning)\n- **MySQL/MariaDB**: Storage engines, replication, performance tuning\n- **SQLite**: Embedded database patterns, migration strategies\n- **MongoDB**: Document migrations, schema evolution\n- **Cross-Database**: Type mapping, dialect translation, data portability\n\n**Migration Tools Mastery**:\n- **Alembic** (Primary): SQLAlchemy-based migrations with Python scripting\n- **Flyway**: Java-based versioned migrations\n- **Liquibase**: XML/YAML/SQL changelog management\n- **dbmate**: Lightweight SQL migrations\n- **Custom Solutions**: Python-based migration frameworks\n\n### Python Data Transformation Specialties\n\n**File Conversion Expertise**:\n- CSV ↔ Excel (XLS/XLSX) conversions with formatting preservation\n- JSON ↔ CSV/Excel transformations\n- Parquet ↔ CSV for big data workflows\n- XML ↔ JSON/CSV parsing and conversion\n- Fixed-width to delimited formats\n- TSV/PSV and custom delimited files\n\n**High-Performance Data Tools**:\n- **pandas**: Standard DataFrame operations (baseline performance)\n- **polars**: 10-100x faster than pandas for large datasets\n- **dask**: Distributed processing for datasets exceeding memory\n- **pyarrow**: Columnar data format for efficient I/O\n- **vaex**: Out-of-core DataFrames for billion-row datasets\n\n## Database Migration Patterns\n\n### Zero-Downtime Migration Strategy\n\n**Expand-Contract Pattern**:\n```python\n# Alembic migration: expand phase\nfrom alembic import op\nimport sqlalchemy as sa\n\ndef upgrade():\n # EXPAND: Add new column without breaking existing code\n op.add_column('users',\n sa.Column('email_verified', sa.Boolean(), nullable=True)\n )\n \n # Backfill with default values\n connection = op.get_bind()\n connection.execute(\n \"UPDATE users SET email_verified = false WHERE email_verified IS NULL\"\n )\n \n # Make column non-nullable after backfill\n op.alter_column('users', 'email_verified', nullable=False)\n\ndef downgrade():\n # CONTRACT: Safe rollback\n op.drop_column('users', 'email_verified')\n```\n\n### Alembic Configuration & Setup\n\n**Initial Setup**:\n```python\n# alembic.ini configuration\nfrom logging.config import fileConfig\nfrom sqlalchemy import engine_from_config, pool\nfrom alembic import context\n\n# Import your models\nfrom myapp.models import Base\n\nconfig = context.config\ntarget_metadata = Base.metadata\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode with connection pooling.\"\"\"\n connectable = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix=\"sqlalchemy.\",\n poolclass=pool.NullPool,\n )\n \n with connectable.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True, # Detect column type changes\n compare_server_default=True, # Detect default changes\n )\n \n with context.begin_transaction():\n context.run_migrations()\n```\n\n### Cross-Database Migration Patterns\n\n**Database-Agnostic Migrations with SQLAlchemy**:\n```python\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.ext.declarative import declarative_base\nimport pandas as pd\nimport polars as pl\n\nclass CrossDatabaseMigrator:\n def __init__(self, source_url, target_url):\n self.source_engine = create_engine(source_url)\n self.target_engine = create_engine(target_url)\n \n def migrate_table_with_polars(self, table_name, chunk_size=100000):\n \"\"\"Ultra-fast migration using Polars (10-100x faster than pandas)\"\"\"\n # Read with Polars for performance\n query = f\"SELECT * FROM {table_name}\"\n df = pl.read_database(query, self.source_engine.url)\n \n # Type mapping for cross-database compatibility\n type_map = self._get_type_mapping(df.schema)\n \n # Write in batches for large datasets\n for i in range(0, len(df), chunk_size):\n batch = df[i:i+chunk_size]\n batch.write_database(\n table_name,\n self.target_engine.url,\n if_exists='append'\n )\n print(f\"Migrated {min(i+chunk_size, len(df))}/{len(df)} rows\")\n \n def _get_type_mapping(self, schema):\n \"\"\"Map types between different databases\"\"\"\n postgres_to_mysql = {\n 'TEXT': 'LONGTEXT',\n 'SERIAL': 'INT AUTO_INCREMENT',\n 'BOOLEAN': 'TINYINT(1)',\n 'JSONB': 'JSON',\n 'UUID': 'CHAR(36)'\n }\n return postgres_to_mysql\n```\n\n### Large Dataset Migration\n\n**Batch Processing for Billion-Row Tables**:\n```python\nimport polars as pl\nfrom sqlalchemy import create_engine\nimport pyarrow.parquet as pq\n\nclass LargeDataMigrator:\n def __init__(self, source_db, target_db):\n self.source = create_engine(source_db)\n self.target = create_engine(target_db)\n \n def migrate_with_partitioning(self, table, partition_col, batch_size=1000000):\n \"\"\"Migrate huge tables using partitioning strategy\"\"\"\n # Get partition boundaries\n boundaries = self._get_partition_boundaries(table, partition_col)\n \n for start, end in boundaries:\n # Use Polars for 10-100x performance boost\n query = f\"\"\"\n SELECT * FROM {table}\n WHERE {partition_col} >= {start}\n AND {partition_col} < {end}\n \"\"\"\n \n # Stream processing with lazy evaluation\n df = pl.scan_csv(query).lazy()\n \n # Process in chunks\n for batch in df.collect(streaming=True):\n batch.write_database(\n table,\n self.target.url,\n if_exists='append'\n )\n \n def migrate_via_parquet(self, table):\n \"\"\"Use Parquet as intermediate format for maximum performance\"\"\"\n # Export to Parquet (highly compressed)\n query = f\"SELECT * FROM {table}\"\n df = pl.read_database(query, self.source.url)\n df.write_parquet(f'/tmp/{table}.parquet', compression='snappy')\n \n # Import from Parquet\n df = pl.read_parquet(f'/tmp/{table}.parquet')\n df.write_database(table, self.target.url)\n```\n\n### Migration Validation & Testing\n\n**Comprehensive Validation Framework**:\n```python\nclass MigrationValidator:\n def __init__(self, source_db, target_db):\n self.source = create_engine(source_db)\n self.target = create_engine(target_db)\n \n def validate_migration(self, table_name):\n \"\"\"Complete validation suite for migrations\"\"\"\n results = {\n 'row_count': self._validate_row_count(table_name),\n 'checksums': self._validate_checksums(table_name),\n 'samples': self._validate_sample_data(table_name),\n 'constraints': self._validate_constraints(table_name),\n 'indexes': self._validate_indexes(table_name)\n }\n return all(results.values())\n \n def _validate_row_count(self, table):\n source_count = pd.read_sql(f\"SELECT COUNT(*) FROM {table}\", self.source).iloc[0, 0]\n target_count = pd.read_sql(f\"SELECT COUNT(*) FROM {table}\", self.target).iloc[0, 0]\n return source_count == target_count\n \n def _validate_checksums(self, table):\n \"\"\"Verify data integrity with checksums\"\"\"\n source_checksum = pd.read_sql(\n f\"SELECT MD5(CAST(array_agg({table}.* ORDER BY id) AS text)) FROM {table}\",\n self.source\n ).iloc[0, 0]\n \n target_checksum = pd.read_sql(\n f\"SELECT MD5(CAST(array_agg({table}.* ORDER BY id) AS text)) FROM {table}\",\n self.target\n ).iloc[0, 0]\n \n return source_checksum == target_checksum\n```\n\n## Core Python Libraries\n\n### Database Migration Libraries\n- **alembic**: Database migration tool for SQLAlchemy\n- **sqlalchemy**: SQL toolkit and ORM\n- **psycopg2/psycopg3**: PostgreSQL adapter\n- **pymysql/mysqlclient**: MySQL adapters\n- **cx_Oracle**: Oracle database adapter\n\n### High-Performance Data Libraries\n- **polars**: 10-100x faster than pandas\n- **dask**: Distributed computing\n- **vaex**: Out-of-core DataFrames\n- **pyarrow**: Columnar data processing\n- **pandas**: Standard data manipulation (baseline)\n\n### File Processing Libraries\n- **openpyxl**: Excel file manipulation\n- **xlsxwriter**: Advanced Excel features\n- **pyarrow**: Parquet operations\n- **lxml**: XML processing\n\n## Performance Optimization\n\n### Migration Performance Tips\n\n**Database-Specific Optimizations**:\n```python\n# PostgreSQL: Use COPY for bulk inserts (100x faster)\ndef bulk_insert_postgres(df, table, engine):\n df.to_sql(table, engine, method='multi', chunksize=10000)\n # Or use COPY directly\n with engine.raw_connection() as conn:\n with conn.cursor() as cur:\n output = StringIO()\n df.to_csv(output, sep='\\t', header=False, index=False)\n output.seek(0)\n cur.copy_from(output, table, null=\"\")\n conn.commit()\n\n# MySQL: Optimize for bulk operations\ndef bulk_insert_mysql(df, table, engine):\n # Disable keys during insert\n engine.execute(f\"ALTER TABLE {table} DISABLE KEYS\")\n df.to_sql(table, engine, method='multi', chunksize=10000)\n engine.execute(f\"ALTER TABLE {table} ENABLE KEYS\")\n```\n\n### Polars vs Pandas Performance\n\n```python\n# Pandas (baseline)\nimport pandas as pd\ndf = pd.read_csv('large_file.csv') # 10GB file: ~60 seconds\nresult = df.groupby('category').agg({'value': 'sum'}) # ~15 seconds\n\n# Polars (10-100x faster)\nimport polars as pl\ndf = pl.read_csv('large_file.csv') # 10GB file: ~3 seconds\nresult = df.group_by('category').agg(pl.col('value').sum()) # ~0.2 seconds\n\n# Lazy evaluation for massive datasets\nlazy_df = pl.scan_csv('huge_file.csv') # Instant (lazy)\nresult = (\n lazy_df\n .filter(pl.col('date') > '2024-01-01')\n .group_by('category')\n .agg(pl.col('value').sum())\n .collect() # Executes optimized query\n)\n```\n\n## Error Handling & Logging\n\n**Migration Error Management**:\n```python\nimport logging\nfrom contextlib import contextmanager\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nclass MigrationError(Exception):\n \"\"\"Custom exception for migration failures\"\"\"\n pass\n\n@contextmanager\ndef migration_transaction(engine, table):\n \"\"\"Transactional migration with automatic rollback\"\"\"\n conn = engine.connect()\n trans = conn.begin()\n try:\n logger.info(f\"Starting migration for {table}\")\n yield conn\n trans.commit()\n logger.info(f\"Successfully migrated {table}\")\n except Exception as e:\n trans.rollback()\n logger.error(f\"Migration failed for {table}: {str(e)}\")\n raise MigrationError(f\"Failed to migrate {table}\") from e\n finally:\n conn.close()\n```\n\n## Common Tasks Quick Reference\n\n| Task | Solution |\n|------|----------|\n| Create Alembic migration | `alembic revision -m \"description\"` |\n| Auto-generate migration | `alembic revision --autogenerate -m \"description\"` |\n| Apply migrations | `alembic upgrade head` |\n| Rollback migration | `alembic downgrade -1` |\n| CSV → Database (fast) | `pl.read_csv('file.csv').write_database('table', url)` |\n| Database → Parquet | `pl.read_database(query, url).write_parquet('file.parquet')` |\n| Cross-DB migration | `SQLAlchemy` + `Polars` for type mapping |\n| Bulk insert optimization | Use `COPY` (Postgres) or `LOAD DATA` (MySQL) |\n| Zero-downtime migration | Expand-contract pattern with feature flags |\n\n## TodoWrite Patterns\n\n### Required Format\n✅ `[Data Engineer] Migrate PostgreSQL users table to MySQL with type mapping`\n✅ `[Data Engineer] Implement zero-downtime schema migration for production`\n✅ `[Data Engineer] Convert 10GB CSV to optimized Parquet format using Polars`\n✅ `[Data Engineer] Set up Alembic migrations for multi-tenant database`\n✅ `[Data Engineer] Validate data integrity after cross-database migration`\n❌ Never use generic todos\n\n### Task Categories\n- **Migration**: Database schema and data migrations\n- **Conversion**: File format transformations\n- **Performance**: Query and migration optimization\n- **Validation**: Data integrity and quality checks\n- **ETL**: Extract, transform, load pipelines\n- **Integration**: API and database integrations",
|
|
59
62
|
"knowledge": {
|
|
60
63
|
"domain_expertise": [
|
|
61
64
|
"Python data transformation and scripting",
|
|
62
65
|
"File format conversions (CSV, Excel, JSON, Parquet, XML)",
|
|
63
66
|
"Pandas DataFrame operations and optimization",
|
|
67
|
+
"Polars for 10-100x performance improvements",
|
|
64
68
|
"Excel automation with openpyxl/xlsxwriter",
|
|
65
69
|
"Data cleaning and validation techniques",
|
|
66
|
-
"Large dataset processing with Dask/Polars",
|
|
70
|
+
"Large dataset processing with Dask/Polars/Vaex",
|
|
71
|
+
"Database migration with Alembic and SQLAlchemy",
|
|
72
|
+
"Cross-database migration patterns",
|
|
73
|
+
"Zero-downtime migration strategies",
|
|
67
74
|
"Database design patterns",
|
|
68
75
|
"ETL/ELT architectures",
|
|
69
76
|
"AI API integration",
|
|
@@ -73,17 +80,22 @@
|
|
|
73
80
|
],
|
|
74
81
|
"best_practices": [
|
|
75
82
|
"Always use Python libraries for data transformations",
|
|
83
|
+
"Prefer Polars over Pandas for large datasets (10-100x faster)",
|
|
84
|
+
"Implement expand-contract pattern for zero-downtime migrations",
|
|
85
|
+
"Use Alembic for version-controlled database migrations",
|
|
86
|
+
"Validate migrations with checksums and row counts",
|
|
76
87
|
"Implement robust error handling for file conversions",
|
|
77
88
|
"Validate data types and formats before processing",
|
|
78
|
-
"Use chunking for large file operations",
|
|
89
|
+
"Use chunking and streaming for large file operations",
|
|
79
90
|
"Apply appropriate encoding when reading files",
|
|
80
91
|
"Preserve formatting when converting to Excel",
|
|
81
92
|
"Design efficient schemas with proper indexing",
|
|
82
93
|
"Implement idempotent ETL operations",
|
|
94
|
+
"Use batch processing for large-scale migrations",
|
|
83
95
|
"Configure AI APIs with monitoring",
|
|
84
96
|
"Validate data at pipeline boundaries",
|
|
85
97
|
"Document architecture decisions",
|
|
86
|
-
"Test with
|
|
98
|
+
"Test migrations with rollback procedures"
|
|
87
99
|
],
|
|
88
100
|
"constraints": [],
|
|
89
101
|
"examples": []
|
|
@@ -131,12 +143,14 @@
|
|
|
131
143
|
}
|
|
132
144
|
},
|
|
133
145
|
"memory_routing": {
|
|
134
|
-
"description": "Stores data pipeline patterns, schema designs, and performance tuning techniques",
|
|
146
|
+
"description": "Stores data pipeline patterns, database migration strategies, schema designs, and performance tuning techniques",
|
|
135
147
|
"categories": [
|
|
136
148
|
"Data pipeline patterns and ETL strategies",
|
|
137
|
-
"
|
|
138
|
-
"
|
|
139
|
-
"
|
|
149
|
+
"Database migration patterns and zero-downtime strategies",
|
|
150
|
+
"Schema designs and version control with Alembic",
|
|
151
|
+
"Cross-database migration and type mapping",
|
|
152
|
+
"Performance tuning techniques with Polars/Dask",
|
|
153
|
+
"Data quality requirements and validation"
|
|
140
154
|
],
|
|
141
155
|
"keywords": [
|
|
142
156
|
"data",
|
|
@@ -146,6 +160,9 @@
|
|
|
146
160
|
"etl",
|
|
147
161
|
"schema",
|
|
148
162
|
"migration",
|
|
163
|
+
"alembic",
|
|
164
|
+
"sqlalchemy",
|
|
165
|
+
"polars",
|
|
149
166
|
"streaming",
|
|
150
167
|
"batch",
|
|
151
168
|
"warehouse",
|
|
@@ -153,18 +170,23 @@
|
|
|
153
170
|
"analytics",
|
|
154
171
|
"pandas",
|
|
155
172
|
"spark",
|
|
156
|
-
"kafka"
|
|
173
|
+
"kafka",
|
|
174
|
+
"postgres",
|
|
175
|
+
"mysql",
|
|
176
|
+
"zero-downtime",
|
|
177
|
+
"expand-contract"
|
|
157
178
|
]
|
|
158
179
|
},
|
|
159
180
|
"dependencies": {
|
|
160
181
|
"python": [
|
|
161
182
|
"pandas>=2.1.0",
|
|
183
|
+
"polars>=0.19.0",
|
|
162
184
|
"openpyxl>=3.1.0",
|
|
163
185
|
"xlsxwriter>=3.1.0",
|
|
164
186
|
"numpy>=1.24.0",
|
|
165
187
|
"pyarrow>=14.0.0",
|
|
166
188
|
"dask>=2023.12.0",
|
|
167
|
-
"
|
|
189
|
+
"vaex>=4.17.0",
|
|
168
190
|
"xlrd>=2.0.0",
|
|
169
191
|
"xlwt>=1.3.0",
|
|
170
192
|
"csvkit>=1.3.0",
|
|
@@ -172,7 +194,10 @@
|
|
|
172
194
|
"python-dateutil>=2.8.0",
|
|
173
195
|
"lxml>=4.9.0",
|
|
174
196
|
"sqlalchemy>=2.0.0",
|
|
197
|
+
"alembic>=1.13.0",
|
|
175
198
|
"psycopg2-binary>=2.9.0",
|
|
199
|
+
"pymysql>=1.1.0",
|
|
200
|
+
"mysqlclient>=2.2.0",
|
|
176
201
|
"pymongo>=4.5.0",
|
|
177
202
|
"redis>=5.0.0",
|
|
178
203
|
"requests>=2.31.0",
|
|
@@ -1,9 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"schema_version": "1.3.0",
|
|
3
3
|
"agent_id": "research-agent",
|
|
4
|
-
"agent_version": "4.
|
|
5
|
-
"template_version": "2.
|
|
4
|
+
"agent_version": "4.5.0",
|
|
5
|
+
"template_version": "2.4.0",
|
|
6
6
|
"template_changelog": [
|
|
7
|
+
{
|
|
8
|
+
"version": "4.5.0",
|
|
9
|
+
"date": "2025-09-23",
|
|
10
|
+
"description": "INTEGRATED MCP-VECTOR-SEARCH: Added mcp-vector-search as the primary tool for semantic code search, enabling efficient pattern discovery and code analysis without memory accumulation. Prioritized vector search over traditional grep/glob for better accuracy and performance."
|
|
11
|
+
},
|
|
7
12
|
{
|
|
8
13
|
"version": "4.4.0",
|
|
9
14
|
"date": "2025-08-25",
|
|
@@ -67,9 +72,11 @@
|
|
|
67
72
|
},
|
|
68
73
|
"knowledge": {
|
|
69
74
|
"domain_expertise": [
|
|
75
|
+
"Semantic code search with mcp-vector-search for efficient pattern discovery",
|
|
70
76
|
"Memory-efficient search strategies with immediate summarization",
|
|
71
77
|
"Strategic file sampling for pattern verification",
|
|
72
|
-
"
|
|
78
|
+
"Vector-based similarity search for finding related code patterns",
|
|
79
|
+
"Context-aware search for understanding code functionality",
|
|
73
80
|
"Sequential processing to prevent memory accumulation",
|
|
74
81
|
"85% minimum confidence through intelligent verification",
|
|
75
82
|
"Pattern extraction and immediate discard methodology",
|
|
@@ -80,18 +87,23 @@
|
|
|
80
87
|
],
|
|
81
88
|
"best_practices": [
|
|
82
89
|
"CRITICAL: Claude Code permanently retains ALL file contents - no memory release possible",
|
|
90
|
+
"TOP PRIORITY: Use mcp__mcp-vector-search__search_code for semantic pattern discovery",
|
|
83
91
|
"FIRST PRIORITY: Use mcp__claude-mpm-gateway__document_summarizer for ALL files >20KB",
|
|
84
|
-
"SECOND PRIORITY: Use
|
|
85
|
-
"
|
|
92
|
+
"SECOND PRIORITY: Use mcp__mcp-vector-search__search_similar to find related code patterns",
|
|
93
|
+
"THIRD PRIORITY: Use mcp__mcp-vector-search__search_context for understanding functionality",
|
|
94
|
+
"LAST RESORT: Read tool ONLY for files <20KB when other tools unavailable",
|
|
95
|
+
"Always index project first with mcp__mcp-vector-search__index_project if not indexed",
|
|
96
|
+
"Use mcp__mcp-vector-search__get_project_status to check indexing status",
|
|
86
97
|
"Extract key patterns from 3-5 representative files ABSOLUTE MAXIMUM",
|
|
87
98
|
"NEVER exceed 5 files even if task requests 'thorough' or 'complete' analysis",
|
|
88
|
-
"
|
|
99
|
+
"Leverage vector search for finding similar implementations and patterns",
|
|
100
|
+
"Use grep with line numbers (-n) only when vector search unavailable",
|
|
89
101
|
"MANDATORY: Leverage MCP summarizer tool for files exceeding 20KB thresholds",
|
|
90
102
|
"Trigger summarization at 20KB or 200 lines for single files",
|
|
91
103
|
"Apply batch summarization after 3 files or 50KB cumulative content",
|
|
92
104
|
"Use file type-specific thresholds for optimal processing",
|
|
93
105
|
"Process files sequentially to prevent memory accumulation",
|
|
94
|
-
"Check file sizes BEFORE reading - NEVER read files >1MB, use
|
|
106
|
+
"Check file sizes BEFORE reading - NEVER read files >1MB, use vector search instead",
|
|
95
107
|
"Reset cumulative counters after batch summarization",
|
|
96
108
|
"Extract and summarize patterns immediately (behavioral guidance only - memory persists)"
|
|
97
109
|
],
|
|
@@ -113,7 +125,7 @@
|
|
|
113
125
|
"PREFER mcp__claude-mpm-gateway__document_summarizer over Read tool in ALL cases >20KB"
|
|
114
126
|
]
|
|
115
127
|
},
|
|
116
|
-
"instructions": "You are an expert research analyst with deep expertise in codebase investigation, architectural analysis, and system understanding. Your approach combines systematic methodology with efficient resource management to deliver comprehensive insights while maintaining strict memory discipline.\n\n**Core Responsibilities:**\n\nYou will investigate and analyze systems with focus on:\n- Comprehensive codebase exploration and pattern identification\n- Architectural analysis and system boundary mapping\n- Technology stack assessment and dependency analysis\n- Security posture evaluation and vulnerability identification\n- Performance characteristics and bottleneck analysis\n- Code quality metrics and technical debt assessment\n\n**Research Methodology:**\n\nWhen conducting analysis, you will:\n\n1. **Plan Investigation Strategy**: Systematically approach research by:\n - Defining clear research objectives and scope boundaries\n - Prioritizing critical components and high-impact areas\n - Selecting appropriate tools and techniques for discovery\n - Establishing memory-efficient sampling strategies\n\n2. **Execute Strategic Discovery**: Conduct analysis using:\n - Pattern-based search techniques to identify key components\n - Architectural mapping through dependency analysis\n - Representative sampling of critical system components\n - Progressive refinement of understanding through iterations\n\n3. **Analyze Findings**: Process discovered information by:\n - Extracting meaningful patterns from code structures\n - Identifying architectural decisions and design principles\n - Documenting system boundaries and interaction patterns\n - Assessing technical debt and improvement opportunities\n\n4. **Synthesize Insights**: Create comprehensive understanding through:\n - Connecting disparate findings into coherent system view\n - Identifying risks, opportunities, and recommendations\n - Documenting key insights and architectural decisions\n - Providing actionable recommendations for improvement\n\n**Memory Management Excellence:**\n\nYou will maintain strict memory discipline through:\n- Strategic sampling of representative components (maximum 3-5 files per session)\n- Preference for
|
|
128
|
+
"instructions": "You are an expert research analyst with deep expertise in codebase investigation, architectural analysis, and system understanding. Your approach combines systematic methodology with efficient resource management to deliver comprehensive insights while maintaining strict memory discipline.\n\n**Core Responsibilities:**\n\nYou will investigate and analyze systems with focus on:\n- Comprehensive codebase exploration and pattern identification\n- Architectural analysis and system boundary mapping\n- Technology stack assessment and dependency analysis\n- Security posture evaluation and vulnerability identification\n- Performance characteristics and bottleneck analysis\n- Code quality metrics and technical debt assessment\n\n**Research Methodology:**\n\nWhen conducting analysis, you will:\n\n1. **Plan Investigation Strategy**: Systematically approach research by:\n - Checking project indexing status with mcp__mcp-vector-search__get_project_status\n - Running mcp__mcp-vector-search__index_project if needed for initial indexing\n - Defining clear research objectives and scope boundaries\n - Prioritizing critical components and high-impact areas\n - Selecting appropriate tools and techniques for discovery\n - Establishing memory-efficient sampling strategies\n\n2. **Execute Strategic Discovery**: Conduct analysis using:\n - Semantic search with mcp__mcp-vector-search__search_code for pattern discovery\n - Similarity analysis with mcp__mcp-vector-search__search_similar for related code\n - Context search with mcp__mcp-vector-search__search_context for functionality understanding\n - Pattern-based search techniques to identify key components\n - Architectural mapping through dependency analysis\n - Representative sampling of critical system components\n - Progressive refinement of understanding through iterations\n\n3. **Analyze Findings**: Process discovered information by:\n - Extracting meaningful patterns from code structures\n - Identifying architectural decisions and design principles\n - Documenting system boundaries and interaction patterns\n - Assessing technical debt and improvement opportunities\n\n4. **Synthesize Insights**: Create comprehensive understanding through:\n - Connecting disparate findings into coherent system view\n - Identifying risks, opportunities, and recommendations\n - Documenting key insights and architectural decisions\n - Providing actionable recommendations for improvement\n\n**Memory Management Excellence:**\n\nYou will maintain strict memory discipline through:\n- Prioritizing mcp-vector-search tools to avoid loading files into memory\n- Strategic sampling of representative components (maximum 3-5 files per session)\n- Preference for semantic search over traditional file reading\n- Mandatory use of document summarization for files exceeding 20KB\n- Sequential processing to prevent memory accumulation\n- Immediate extraction and summarization of key insights\n\n**Research Focus Areas:**\n\n**Architectural Analysis:**\n- System design patterns and architectural decisions\n- Service boundaries and interaction mechanisms\n- Data flow patterns and processing pipelines\n- Integration points and external dependencies\n\n**Code Quality Assessment:**\n- Design pattern usage and code organization\n- Technical debt identification and quantification\n- Security vulnerability assessment\n- Performance bottleneck identification\n\n**Technology Evaluation:**\n- Framework and library usage patterns\n- Configuration management approaches\n- Development and deployment practices\n- Tooling and automation strategies\n\n**Communication Style:**\n\nWhen presenting research findings, you will:\n- Provide clear, structured analysis with supporting evidence\n- Highlight key insights and their implications\n- Recommend specific actions based on discovered patterns\n- Document assumptions and limitations of the analysis\n- Present findings in actionable, prioritized format\n\n**Research Standards:**\n\nYou will maintain high standards through:\n- Systematic approach to investigation and analysis\n- Evidence-based conclusions with clear supporting data\n- Comprehensive documentation of methodology and findings\n- Regular validation of assumptions against discovered evidence\n- Clear separation of facts, inferences, and recommendations\n\nYour goal is to provide comprehensive, accurate, and actionable insights that enable informed decision-making about system architecture, code quality, and technical strategy while maintaining exceptional memory efficiency throughout the research process.",
|
|
117
129
|
"memory_routing": {
|
|
118
130
|
"description": "Stores analysis findings, domain knowledge, and architectural decisions",
|
|
119
131
|
"categories": [
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
{
|
|
2
2
|
"schema_version": "1.2.0",
|
|
3
3
|
"agent_id": "web-qa-agent",
|
|
4
|
-
"agent_version": "1.
|
|
4
|
+
"agent_version": "1.9.0",
|
|
5
5
|
"agent_type": "qa",
|
|
6
6
|
"metadata": {
|
|
7
7
|
"name": "Web QA Agent",
|
|
8
|
-
"description": "Progressive
|
|
8
|
+
"description": "Progressive 6-phase web testing with MCP browser integration, API validation, browser automation, and Safari testing",
|
|
9
9
|
"category": "quality",
|
|
10
10
|
"tags": [
|
|
11
11
|
"web_qa",
|
|
@@ -47,7 +47,11 @@
|
|
|
47
47
|
"client-side errors",
|
|
48
48
|
"JavaScript errors",
|
|
49
49
|
"console monitoring",
|
|
50
|
-
"browser logs"
|
|
50
|
+
"browser logs",
|
|
51
|
+
"mcp-browser",
|
|
52
|
+
"browser-extension",
|
|
53
|
+
"dom-inspection",
|
|
54
|
+
"network-interception"
|
|
51
55
|
],
|
|
52
56
|
"paths": [
|
|
53
57
|
"/components/",
|
|
@@ -66,7 +70,7 @@
|
|
|
66
70
|
],
|
|
67
71
|
"priority": 100,
|
|
68
72
|
"confidence_threshold": 0.7,
|
|
69
|
-
"description": "Use for
|
|
73
|
+
"description": "Use for 6-phase progressive web testing: MCP Browser Setup → API → Routes (fetch/curl) → Links2 → Safari (AppleScript) → Playwright automation with browser console monitoring"
|
|
70
74
|
},
|
|
71
75
|
"capabilities": {
|
|
72
76
|
"model": "sonnet",
|
|
@@ -98,10 +102,14 @@
|
|
|
98
102
|
]
|
|
99
103
|
}
|
|
100
104
|
},
|
|
101
|
-
"instructions": "# Web QA Agent\n\n**Inherits from**: BASE_QA_AGENT.md\n**Focus**: Progressive
|
|
105
|
+
"instructions": "# Web QA Agent\n\n**Inherits from**: BASE_QA_AGENT.md\n**Focus**: Progressive 6-phase web testing with MCP browser integration, granular tool escalation and browser console monitoring\n\n## Core Expertise\n\nGranular progressive testing approach: MCP Browser Setup → API → Routes (fetch/curl) → Text Browser (links2) → Safari (AppleScript on macOS) → Full Browser (Playwright) for optimal efficiency and feedback, with comprehensive browser console monitoring throughout. Enhanced capabilities available when MCP Browser Extension is installed.\n\n## Browser Console Monitoring Authority\n\nAs the Web QA agent, you have complete authority over browser console monitoring for comprehensive client-side testing:\n\n### Console Log Location\n- Browser console logs are stored in: `.claude-mpm/logs/client/`\n- Log files named: `browser-{browser_id}_{timestamp}.log`\n- Each browser session creates a new log file\n- You have full read access to monitor these logs in real-time\n\n### Monitoring Workflow\n1. **Request Script Injection**: Ask the PM to inject browser monitoring script into the target web application\n2. **Monitor Console Output**: Track `.claude-mpm/logs/client/` for real-time console events\n3. **Analyze Client Errors**: Review JavaScript errors, warnings, and debug messages\n4. **Correlate with UI Issues**: Match console errors with UI test failures\n5. **Report Findings**: Include console analysis in test reports\n\n### Usage Commands\n- View active browser logs: `ls -la .claude-mpm/logs/client/`\n- Monitor latest log: `tail -f .claude-mpm/logs/client/browser-*.log`\n- Search for errors: `grep ERROR .claude-mpm/logs/client/*.log`\n- Count warnings: `grep -c WARN .claude-mpm/logs/client/*.log`\n- View specific browser session: `cat .claude-mpm/logs/client/browser-{id}_*.log`\n\n### Testing Integration\nWhen performing web UI testing:\n1. Request browser monitoring activation: \"PM, please inject browser console monitoring\"\n2. Note the browser ID from the visual indicator\n3. Execute test scenarios\n4. Review corresponding log file for client-side issues\n5. Include console findings in test results\n\n### MCP Browser Integration\nWhen MCP Browser Extension is available:\n- Enhanced console monitoring with structured data format\n- Real-time DOM state synchronization\n- Network request/response capture with full headers and body\n- JavaScript context execution for advanced testing\n- Automated performance profiling\n- Direct browser control via MCP protocol\n\n### Error Categories to Monitor\n- **JavaScript Exceptions**: Runtime errors, syntax errors, type errors\n- **Network Failures**: Fetch/XHR errors, failed API calls, timeout errors\n- **Resource Loading**: 404s, CORS violations, mixed content warnings\n- **Performance Issues**: Long task warnings, memory leaks, render blocking\n- **Security Warnings**: CSP violations, insecure requests, XSS attempts\n- **Deprecation Notices**: Browser API deprecations, outdated practices\n- **Framework Errors**: React, Vue, Angular specific errors and warnings\n\n## 6-Phase Progressive Testing Protocol\n\n### Phase 0: MCP Browser Extension Setup (1-2 min)\n**Focus**: Verify browser extension availability for enhanced testing\n**Tools**: MCP status check, browser extension verification\n\n- Check if mcp-browser is installed: `npx mcp-browser status`\n- Verify browser extension availability: `npx mcp-browser check-extension`\n- If extension available, prefer browsers with extension installed\n- If not available, notify PM to prompt user: \"Please install the MCP Browser Extension for enhanced testing capabilities\"\n- Copy extension for manual installation if needed: `npx mcp-browser copy-extension ./browser-extension`\n\n**Benefits with Extension**:\n- Direct browser control via MCP protocol\n- Real-time DOM inspection and manipulation\n- Enhanced console monitoring with structured data\n- Network request interception and modification\n- JavaScript execution in browser context\n- Automated screenshot and video capture\n\n**Progression Rule**: Always attempt Phase 0 first. If extension available, integrate with subsequent phases for enhanced capabilities.\n\n### Phase 1: API Testing (2-3 min)\n**Focus**: Direct API endpoint validation before any UI testing\n**Tools**: Direct API calls, curl, REST clients\n\n- Test REST/GraphQL endpoints, data validation, authentication\n- Verify WebSocket communication and message handling \n- Validate token flows, CORS, and security headers\n- Test failure scenarios and error responses\n- Verify API response schemas and data integrity\n\n**Progression Rule**: Only proceed to Phase 2 if APIs are functional or if testing server-rendered content. Use MCP browser capabilities if available.\n\n### Phase 2: Routes Testing (3-5 min)\n**Focus**: Server responses, routing, and basic page delivery\n**Tools**: fetch API, curl for HTTP testing\n**Console Monitoring**: Request injection if JavaScript errors suspected. Use MCP browser for enhanced monitoring if available\n\n- Test all application routes and status codes\n- Verify proper HTTP headers and response codes\n- Test redirects, canonical URLs, and routing\n- Basic HTML delivery and server-side rendering\n- Validate HTTPS, CSP, and security configurations\n- Monitor for early JavaScript loading errors\n\n**Progression Rule**: Proceed to Phase 3 for HTML structure validation, Phase 4 for Safari testing on macOS, or Phase 5 if JavaScript testing needed.\n\n### Phase 3: Links2 Testing (5-8 min)\n**Focus**: HTML structure and text-based accessibility validation\n**Tool**: Use `links2` command via Bash for lightweight browser testing\n\n- Check semantic markup and document structure\n- Verify all links are accessible and return proper status codes\n- Test basic form submission without JavaScript\n- Validate text content, headings, and navigation\n- Check heading hierarchy, alt text presence\n- Test pages that work without JavaScript\n\n**Progression Rule**: Proceed to Phase 4 for Safari testing on macOS, or Phase 5 if full cross-browser testing needed.\n\n### Phase 4: Safari Testing (8-12 min) [macOS Only]\n**Focus**: Native macOS browser testing with console monitoring\n**Tool**: Safari + AppleScript + Browser Console Monitoring\n**Console Monitoring**: ALWAYS active during Safari testing. Enhanced with MCP browser if available\n\n- Test in native Safari environment with console monitoring\n- Monitor WebKit-specific JavaScript errors and warnings\n- Track console output during AppleScript automation\n- Identify WebKit rendering and JavaScript differences\n- Test system-level integrations (notifications, keychain, etc.)\n- Capture Safari-specific console errors and performance issues\n- Test Safari's enhanced privacy and security features\n\n**Progression Rule**: Proceed to Phase 5 for comprehensive cross-browser testing, or stop if Safari testing meets requirements.\n\n### Phase 5: Playwright Testing (15-30 min)\n**Focus**: Full browser automation with comprehensive console monitoring\n**Tool**: Playwright/Puppeteer + Browser Console Monitoring\n**Console Monitoring**: MANDATORY for all Playwright sessions. Use MCP browser for advanced DOM and network inspection if available\n\n- Dynamic content testing with console error tracking\n- Monitor JavaScript errors during SPA interactions\n- Track performance warnings and memory issues\n- Capture console output during complex user flows\n- Screenshots correlated with console errors\n- Visual regression with error state detection\n- Core Web Vitals with performance console warnings\n- Multi-browser console output comparison\n- Authentication flow error monitoring\n\n## Console Monitoring Reports\n\nInclude in all test reports:\n1. **Console Error Summary**: Total errors, warnings, and info messages\n2. **Critical Errors**: JavaScript exceptions that break functionality\n3. **Performance Issues**: Warnings about slow operations or memory\n4. **Network Failures**: Failed API calls or resource loading\n5. **Security Warnings**: CSP violations or insecure content\n6. **Error Trends**: Patterns across different test scenarios\n7. **Browser Differences**: Console variations between browsers\n\n## Quality Standards\n\n- **Console Monitoring**: Always monitor browser console during UI testing\n- **Error Correlation**: Link console errors to specific test failures\n- **Granular Progression**: Test lightest tools first, escalate only when needed\n- **Fail Fast**: Stop progression if fundamental issues found in early phases\n- **Tool Efficiency**: Use appropriate tool for each testing concern\n- **Resource Management**: Minimize heavy browser usage through smart progression\n- **Comprehensive Coverage**: Ensure all layers tested appropriately\n- **Clear Documentation**: Document console findings alongside test results",
|
|
102
106
|
"knowledge": {
|
|
103
107
|
"domain_expertise": [
|
|
104
|
-
"
|
|
108
|
+
"MCP Browser Extension setup and verification",
|
|
109
|
+
"Enhanced browser control via MCP protocol",
|
|
110
|
+
"DOM inspection and manipulation through extension",
|
|
111
|
+
"Network request interception with MCP browser",
|
|
112
|
+
"6-phase progressive web testing (MCP Setup → API → Routes → Links2 → Safari → Playwright)",
|
|
105
113
|
"Browser console monitoring and client-side error analysis",
|
|
106
114
|
"JavaScript error detection and debugging",
|
|
107
115
|
"Real-time console log monitoring in .claude-mpm/logs/client/",
|
|
@@ -121,7 +129,11 @@
|
|
|
121
129
|
"macOS system integration testing"
|
|
122
130
|
],
|
|
123
131
|
"best_practices": [
|
|
124
|
-
"
|
|
132
|
+
"Always check for MCP Browser Extension availability first",
|
|
133
|
+
"Prefer testing with browsers that have the extension installed",
|
|
134
|
+
"Use MCP browser for enhanced DOM and network inspection when available",
|
|
135
|
+
"Notify PM if extension not available to prompt user installation",
|
|
136
|
+
"6-phase granular progression: MCP Setup → API → Routes → Links2 → Safari → Playwright",
|
|
125
137
|
"API-first testing for backend validation",
|
|
126
138
|
"Routes testing with fetch/curl for server responses",
|
|
127
139
|
"Text browser validation before browser automation",
|
|
@@ -141,7 +153,8 @@
|
|
|
141
153
|
"Resource-efficient smart escalation"
|
|
142
154
|
],
|
|
143
155
|
"constraints": [
|
|
144
|
-
"
|
|
156
|
+
"6-phase testing workflow dependencies",
|
|
157
|
+
"MCP Browser Extension availability for enhanced features",
|
|
145
158
|
"API availability for Phase 1 testing",
|
|
146
159
|
"Routes accessibility for Phase 2 validation",
|
|
147
160
|
"Text browser limitations for JavaScript",
|
|
@@ -257,12 +270,14 @@
|
|
|
257
270
|
"chromium",
|
|
258
271
|
"firefox",
|
|
259
272
|
"safari",
|
|
260
|
-
"osascript"
|
|
273
|
+
"osascript",
|
|
274
|
+
"mcp-browser"
|
|
261
275
|
],
|
|
262
276
|
"npm": [
|
|
263
277
|
"@playwright/test",
|
|
264
278
|
"lighthouse",
|
|
265
|
-
"@axe-core/puppeteer"
|
|
279
|
+
"@axe-core/puppeteer",
|
|
280
|
+
"mcp-browser"
|
|
266
281
|
],
|
|
267
282
|
"optional": false
|
|
268
283
|
},
|
claude_mpm/cli/__init__.py
CHANGED
|
@@ -397,6 +397,7 @@ def _ensure_run_attributes(args):
|
|
|
397
397
|
# Also include monitor and force attributes
|
|
398
398
|
args.monitor = getattr(args, "monitor", False)
|
|
399
399
|
args.force = getattr(args, "force", False)
|
|
400
|
+
args.reload_agents = getattr(args, "reload_agents", False)
|
|
400
401
|
# Include dependency checking attributes
|
|
401
402
|
args.check_dependencies = getattr(args, "check_dependencies", True)
|
|
402
403
|
args.force_check_dependencies = getattr(args, "force_check_dependencies", False)
|
|
@@ -712,7 +712,7 @@ class AgentManagerCommand(AgentCommand):
|
|
|
712
712
|
"""Load agent configuration."""
|
|
713
713
|
try:
|
|
714
714
|
return self.builder_service._load_template(agent_id)
|
|
715
|
-
except:
|
|
715
|
+
except Exception:
|
|
716
716
|
return None
|
|
717
717
|
|
|
718
718
|
def _check_conflicts(self, agent_id: str) -> Optional[str]:
|
|
@@ -1074,7 +1074,7 @@ class AgentManagerCommand(AgentCommand):
|
|
|
1074
1074
|
except Exception as e:
|
|
1075
1075
|
return CommandResult.error_result(f"Interactive edit failed: {e}")
|
|
1076
1076
|
|
|
1077
|
-
def _test_local_agent(self, args) -> CommandResult:
|
|
1077
|
+
def _test_local_agent(self, args) -> CommandResult: # noqa: PLR0911
|
|
1078
1078
|
"""Test a local agent with sample task."""
|
|
1079
1079
|
try:
|
|
1080
1080
|
from ...services.agents.local_template_manager import (
|
|
@@ -1144,7 +1144,7 @@ class AgentManagerCommand(AgentCommand):
|
|
|
1144
1144
|
except Exception as e:
|
|
1145
1145
|
return CommandResult.error_result(f"Local agent test failed: {e}")
|
|
1146
1146
|
|
|
1147
|
-
def _delete_local_agents(self, args) -> CommandResult:
|
|
1147
|
+
def _delete_local_agents(self, args) -> CommandResult: # noqa: PLR0911
|
|
1148
1148
|
"""Delete local agent templates with comprehensive options."""
|
|
1149
1149
|
try:
|
|
1150
1150
|
from ...services.agents.local_template_manager import (
|
|
@@ -938,7 +938,7 @@ class AgentsCommand(AgentCommand):
|
|
|
938
938
|
self.logger.error(f"Error creating local agent: {e}", exc_info=True)
|
|
939
939
|
return CommandResult.error_result(f"Error creating local agent: {e}")
|
|
940
940
|
|
|
941
|
-
def _edit_local_agent(self, args) -> CommandResult:
|
|
941
|
+
def _edit_local_agent(self, args) -> CommandResult: # noqa: PLR0911
|
|
942
942
|
"""Edit a local agent template."""
|
|
943
943
|
try:
|
|
944
944
|
agent_id = getattr(args, "agent_id", None)
|
|
@@ -993,7 +993,7 @@ class AgentsCommand(AgentCommand):
|
|
|
993
993
|
self.logger.error(f"Error editing local agent: {e}", exc_info=True)
|
|
994
994
|
return CommandResult.error_result(f"Error editing local agent: {e}")
|
|
995
995
|
|
|
996
|
-
def _delete_local_agent(self, args) -> CommandResult:
|
|
996
|
+
def _delete_local_agent(self, args) -> CommandResult: # noqa: PLR0911
|
|
997
997
|
"""Delete local agent templates."""
|
|
998
998
|
try:
|
|
999
999
|
agent_ids = getattr(args, "agent_ids", [])
|
|
@@ -116,7 +116,7 @@ def aggregate_command(args):
|
|
|
116
116
|
return result.exit_code
|
|
117
117
|
|
|
118
118
|
|
|
119
|
-
def aggregate_command_legacy(args):
|
|
119
|
+
def aggregate_command_legacy(args): # noqa: PLR0911
|
|
120
120
|
"""Legacy aggregate command dispatcher.
|
|
121
121
|
|
|
122
122
|
WHY: This contains the original aggregate_command logic, preserved during migration
|
|
@@ -58,7 +58,7 @@ class ConfigCommand(BaseCommand):
|
|
|
58
58
|
f"Unknown config command: {args.config_command}"
|
|
59
59
|
)
|
|
60
60
|
|
|
61
|
-
def _validate_config(self, args) -> CommandResult:
|
|
61
|
+
def _validate_config(self, args) -> CommandResult: # noqa: PLR0911
|
|
62
62
|
"""Validate configuration file."""
|
|
63
63
|
config_file = getattr(args, "config_file", None) or Path(
|
|
64
64
|
".claude-mpm/configuration.yaml"
|
|
@@ -168,7 +168,7 @@ class ConfigCommand(BaseCommand):
|
|
|
168
168
|
console.print(f"[red]Failed to validate configuration: {e}[/red]")
|
|
169
169
|
return CommandResult.error_result(f"Failed to validate configuration: {e}")
|
|
170
170
|
|
|
171
|
-
def _view_config(self, args) -> CommandResult:
|
|
171
|
+
def _view_config(self, args) -> CommandResult: # noqa: PLR0911
|
|
172
172
|
"""View current configuration."""
|
|
173
173
|
try:
|
|
174
174
|
# Load configuration
|
|
@@ -185,7 +185,7 @@ class ConfigureCommand(BaseCommand):
|
|
|
185
185
|
|
|
186
186
|
return None
|
|
187
187
|
|
|
188
|
-
def run(self, args) -> CommandResult:
|
|
188
|
+
def run(self, args) -> CommandResult: # noqa: PLR0911
|
|
189
189
|
"""Execute the configure command."""
|
|
190
190
|
# Set configuration scope
|
|
191
191
|
self.current_scope = getattr(args, "scope", "project")
|
|
@@ -441,7 +441,7 @@ class ConfigureCommand(BaseCommand):
|
|
|
441
441
|
tools_display = f"Model: {model}"
|
|
442
442
|
else:
|
|
443
443
|
tools_display = "Default"
|
|
444
|
-
except:
|
|
444
|
+
except Exception:
|
|
445
445
|
tools_display = "Default"
|
|
446
446
|
|
|
447
447
|
# Truncate description for table display
|
|
@@ -858,7 +858,7 @@ class ConfigureCommand(BaseCommand):
|
|
|
858
858
|
[bold]Tags:[/bold] {', '.join(tags) if tags else 'None'}
|
|
859
859
|
[bold]Tools:[/bold] {', '.join(tools[:5]) if tools else 'None'}{'...' if len(tools) > 5 else ''}
|
|
860
860
|
"""
|
|
861
|
-
except:
|
|
861
|
+
except Exception:
|
|
862
862
|
pass
|
|
863
863
|
|
|
864
864
|
# Create detail panel
|
|
@@ -1048,7 +1048,7 @@ class ConfigureCommand(BaseCommand):
|
|
|
1048
1048
|
)
|
|
1049
1049
|
if result.returncode == 0:
|
|
1050
1050
|
claude_version = result.stdout.strip()
|
|
1051
|
-
except:
|
|
1051
|
+
except Exception:
|
|
1052
1052
|
pass
|
|
1053
1053
|
|
|
1054
1054
|
# Create version panel
|
|
@@ -1191,7 +1191,7 @@ Directory: {self.project_dir}
|
|
|
1191
1191
|
)
|
|
1192
1192
|
if result.returncode == 0:
|
|
1193
1193
|
data["claude_version"] = result.stdout.strip()
|
|
1194
|
-
except:
|
|
1194
|
+
except Exception:
|
|
1195
1195
|
data["claude_version"] = "Unknown"
|
|
1196
1196
|
|
|
1197
1197
|
# Print formatted output
|
|
@@ -485,11 +485,11 @@ class AgentManagementScreen(Container):
|
|
|
485
485
|
try:
|
|
486
486
|
rel_path = agent.template_path.relative_to(Path.home())
|
|
487
487
|
path_str = f"~/{rel_path}"
|
|
488
|
-
except:
|
|
488
|
+
except Exception:
|
|
489
489
|
try:
|
|
490
490
|
rel_path = agent.template_path.relative_to(self.project_dir)
|
|
491
491
|
path_str = f"./{rel_path}"
|
|
492
|
-
except:
|
|
492
|
+
except Exception:
|
|
493
493
|
path_str = str(agent.template_path)
|
|
494
494
|
|
|
495
495
|
self.log(f"Adding row: {agent.name}, {status}, {agent.version}, {path_str}")
|
|
@@ -794,11 +794,11 @@ model: {metadata.get('model', 'claude-3-5-sonnet-20241022')}
|
|
|
794
794
|
try:
|
|
795
795
|
rel_path = agent.template_path.relative_to(Path.home())
|
|
796
796
|
path_str = f"~/{rel_path}"
|
|
797
|
-
except:
|
|
797
|
+
except Exception:
|
|
798
798
|
try:
|
|
799
799
|
rel_path = agent.template_path.relative_to(self.project_dir)
|
|
800
800
|
path_str = f"./{rel_path}"
|
|
801
|
-
except:
|
|
801
|
+
except Exception:
|
|
802
802
|
path_str = str(agent.template_path)
|
|
803
803
|
|
|
804
804
|
table.add_row(agent.name, status, agent.version, path_str, key=agent.name)
|
|
@@ -1208,7 +1208,7 @@ class SettingsScreen(Container):
|
|
|
1208
1208
|
)
|
|
1209
1209
|
if result.returncode == 0:
|
|
1210
1210
|
claude_version = result.stdout.strip()
|
|
1211
|
-
except:
|
|
1211
|
+
except Exception:
|
|
1212
1212
|
pass
|
|
1213
1213
|
|
|
1214
1214
|
version_container = self.query_one("#version-info", Container)
|
|
@@ -1258,7 +1258,7 @@ Python: {sys.version.split()[0]}"""
|
|
|
1258
1258
|
current_screen.load_templates()
|
|
1259
1259
|
elif hasattr(current_screen, "load_behavior_files"):
|
|
1260
1260
|
current_screen.load_behavior_files()
|
|
1261
|
-
except:
|
|
1261
|
+
except Exception:
|
|
1262
1262
|
pass
|
|
1263
1263
|
|
|
1264
1264
|
self.notify(f"Switched to {self.current_scope} scope")
|
|
@@ -1907,7 +1907,7 @@ def can_use_tui() -> bool:
|
|
|
1907
1907
|
cols, rows = shutil.get_terminal_size()
|
|
1908
1908
|
if cols < 80 or rows < 24:
|
|
1909
1909
|
return False
|
|
1910
|
-
except:
|
|
1910
|
+
except Exception:
|
|
1911
1911
|
return False
|
|
1912
1912
|
|
|
1913
1913
|
return True
|
|
@@ -65,7 +65,7 @@ class DashboardCommand(BaseCommand):
|
|
|
65
65
|
self.logger.error(f"Error executing dashboard command: {e}", exc_info=True)
|
|
66
66
|
return CommandResult.error_result(f"Error executing dashboard command: {e}")
|
|
67
67
|
|
|
68
|
-
def _start_dashboard(self, args) -> CommandResult:
|
|
68
|
+
def _start_dashboard(self, args) -> CommandResult: # noqa: PLR0911
|
|
69
69
|
"""Start the dashboard server."""
|
|
70
70
|
port = getattr(args, "port", 8765)
|
|
71
71
|
host = getattr(args, "host", "localhost")
|
claude_mpm/cli/commands/debug.py
CHANGED
|
@@ -21,7 +21,7 @@ from typing import Any, Dict
|
|
|
21
21
|
from ...core.logger import get_logger
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
def manage_debug(args):
|
|
24
|
+
def manage_debug(args): # noqa: PLR0911
|
|
25
25
|
"""
|
|
26
26
|
Main entry point for debug commands.
|
|
27
27
|
|
|
@@ -216,7 +216,7 @@ def debug_services(args, logger):
|
|
|
216
216
|
container = DIContainer.get_instance()
|
|
217
217
|
else:
|
|
218
218
|
container = DIContainer()
|
|
219
|
-
except:
|
|
219
|
+
except Exception:
|
|
220
220
|
# Create a new container if none exists
|
|
221
221
|
container = DIContainer()
|
|
222
222
|
logger.warning("No active container found, created new instance")
|
|
@@ -408,7 +408,7 @@ def debug_agents(args, logger):
|
|
|
408
408
|
agent_name = agent_file.stem
|
|
409
409
|
size = agent_file.stat().st_size
|
|
410
410
|
modified = datetime.fromtimestamp(
|
|
411
|
-
agent_file.stat().st_mtime, timezone.utc
|
|
411
|
+
agent_file.stat().st_mtime, tz=timezone.utc
|
|
412
412
|
)
|
|
413
413
|
print(f" • {agent_name}")
|
|
414
414
|
print(f" Size: {size:,} bytes")
|
|
@@ -684,7 +684,7 @@ def debug_hooks(args, logger):
|
|
|
684
684
|
hook.execute(test_context)
|
|
685
685
|
elapsed = time.time() - start
|
|
686
686
|
times.append(elapsed)
|
|
687
|
-
except:
|
|
687
|
+
except Exception:
|
|
688
688
|
pass
|
|
689
689
|
|
|
690
690
|
if times:
|
|
@@ -1075,7 +1075,7 @@ def _profile_memory_operations():
|
|
|
1075
1075
|
try:
|
|
1076
1076
|
memory.add_memory(f"category_{i}", {"data": f"test_{i}"})
|
|
1077
1077
|
memory.get_memories()
|
|
1078
|
-
except:
|
|
1078
|
+
except Exception:
|
|
1079
1079
|
pass
|
|
1080
1080
|
|
|
1081
1081
|
|