claude-mpm 3.9.11__py3-none-any.whl → 4.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/__init__.py +2 -2
- claude_mpm/__main__.py +3 -2
- claude_mpm/agents/__init__.py +85 -79
- claude_mpm/agents/agent_loader.py +464 -1003
- claude_mpm/agents/agent_loader_integration.py +45 -45
- claude_mpm/agents/agents_metadata.py +29 -30
- claude_mpm/agents/async_agent_loader.py +156 -138
- claude_mpm/agents/base_agent.json +1 -1
- claude_mpm/agents/base_agent_loader.py +179 -151
- claude_mpm/agents/frontmatter_validator.py +229 -130
- claude_mpm/agents/schema/agent_schema.json +1 -1
- claude_mpm/agents/system_agent_config.py +213 -147
- claude_mpm/agents/templates/__init__.py +13 -13
- claude_mpm/agents/templates/code_analyzer.json +2 -2
- claude_mpm/agents/templates/data_engineer.json +1 -1
- claude_mpm/agents/templates/documentation.json +23 -11
- claude_mpm/agents/templates/engineer.json +22 -6
- claude_mpm/agents/templates/memory_manager.json +1 -1
- claude_mpm/agents/templates/ops.json +2 -2
- claude_mpm/agents/templates/project_organizer.json +1 -1
- claude_mpm/agents/templates/qa.json +1 -1
- claude_mpm/agents/templates/refactoring_engineer.json +222 -0
- claude_mpm/agents/templates/research.json +20 -14
- claude_mpm/agents/templates/security.json +1 -1
- claude_mpm/agents/templates/ticketing.json +2 -2
- claude_mpm/agents/templates/version_control.json +1 -1
- claude_mpm/agents/templates/web_qa.json +3 -1
- claude_mpm/agents/templates/web_ui.json +2 -2
- claude_mpm/cli/__init__.py +79 -51
- claude_mpm/cli/__main__.py +3 -2
- claude_mpm/cli/commands/__init__.py +20 -20
- claude_mpm/cli/commands/agents.py +279 -247
- claude_mpm/cli/commands/aggregate.py +138 -157
- claude_mpm/cli/commands/cleanup.py +147 -147
- claude_mpm/cli/commands/config.py +93 -76
- claude_mpm/cli/commands/info.py +17 -16
- claude_mpm/cli/commands/mcp.py +140 -905
- claude_mpm/cli/commands/mcp_command_router.py +139 -0
- claude_mpm/cli/commands/mcp_config_commands.py +20 -0
- claude_mpm/cli/commands/mcp_install_commands.py +20 -0
- claude_mpm/cli/commands/mcp_server_commands.py +175 -0
- claude_mpm/cli/commands/mcp_tool_commands.py +34 -0
- claude_mpm/cli/commands/memory.py +239 -203
- claude_mpm/cli/commands/monitor.py +330 -86
- claude_mpm/cli/commands/run.py +380 -429
- claude_mpm/cli/commands/run_config_checker.py +160 -0
- claude_mpm/cli/commands/socketio_monitor.py +235 -0
- claude_mpm/cli/commands/tickets.py +363 -220
- claude_mpm/cli/parser.py +24 -1156
- claude_mpm/cli/parsers/__init__.py +29 -0
- claude_mpm/cli/parsers/agents_parser.py +136 -0
- claude_mpm/cli/parsers/base_parser.py +331 -0
- claude_mpm/cli/parsers/config_parser.py +85 -0
- claude_mpm/cli/parsers/mcp_parser.py +152 -0
- claude_mpm/cli/parsers/memory_parser.py +138 -0
- claude_mpm/cli/parsers/monitor_parser.py +124 -0
- claude_mpm/cli/parsers/run_parser.py +147 -0
- claude_mpm/cli/parsers/tickets_parser.py +203 -0
- claude_mpm/cli/ticket_cli.py +7 -3
- claude_mpm/cli/utils.py +55 -37
- claude_mpm/cli_module/__init__.py +6 -6
- claude_mpm/cli_module/args.py +188 -140
- claude_mpm/cli_module/commands.py +79 -70
- claude_mpm/cli_module/migration_example.py +38 -60
- claude_mpm/config/__init__.py +32 -25
- claude_mpm/config/agent_config.py +151 -119
- claude_mpm/config/experimental_features.py +71 -73
- claude_mpm/config/paths.py +94 -208
- claude_mpm/config/socketio_config.py +84 -73
- claude_mpm/constants.py +35 -18
- claude_mpm/core/__init__.py +9 -6
- claude_mpm/core/agent_name_normalizer.py +68 -71
- claude_mpm/core/agent_registry.py +372 -521
- claude_mpm/core/agent_session_manager.py +74 -63
- claude_mpm/core/base_service.py +116 -87
- claude_mpm/core/cache.py +119 -153
- claude_mpm/core/claude_runner.py +425 -1120
- claude_mpm/core/config.py +263 -168
- claude_mpm/core/config_aliases.py +69 -61
- claude_mpm/core/config_constants.py +292 -0
- claude_mpm/core/constants.py +57 -99
- claude_mpm/core/container.py +211 -178
- claude_mpm/core/exceptions.py +233 -89
- claude_mpm/core/factories.py +92 -54
- claude_mpm/core/framework_loader.py +378 -220
- claude_mpm/core/hook_manager.py +198 -83
- claude_mpm/core/hook_performance_config.py +136 -0
- claude_mpm/core/injectable_service.py +61 -55
- claude_mpm/core/interactive_session.py +165 -155
- claude_mpm/core/interfaces.py +221 -195
- claude_mpm/core/lazy.py +96 -96
- claude_mpm/core/logger.py +133 -107
- claude_mpm/core/logging_config.py +185 -157
- claude_mpm/core/minimal_framework_loader.py +20 -15
- claude_mpm/core/mixins.py +30 -29
- claude_mpm/core/oneshot_session.py +215 -181
- claude_mpm/core/optimized_agent_loader.py +134 -138
- claude_mpm/core/optimized_startup.py +159 -157
- claude_mpm/core/pm_hook_interceptor.py +85 -72
- claude_mpm/core/service_registry.py +103 -101
- claude_mpm/core/session_manager.py +97 -87
- claude_mpm/core/socketio_pool.py +212 -158
- claude_mpm/core/tool_access_control.py +58 -51
- claude_mpm/core/types.py +46 -24
- claude_mpm/core/typing_utils.py +166 -82
- claude_mpm/core/unified_agent_registry.py +721 -0
- claude_mpm/core/unified_config.py +550 -0
- claude_mpm/core/unified_paths.py +549 -0
- claude_mpm/dashboard/index.html +1 -1
- claude_mpm/dashboard/open_dashboard.py +51 -17
- claude_mpm/dashboard/static/built/components/agent-inference.js +2 -0
- claude_mpm/dashboard/static/built/components/event-processor.js +2 -0
- claude_mpm/dashboard/static/built/components/event-viewer.js +2 -0
- claude_mpm/dashboard/static/built/components/export-manager.js +2 -0
- claude_mpm/dashboard/static/built/components/file-tool-tracker.js +2 -0
- claude_mpm/dashboard/static/built/components/hud-library-loader.js +2 -0
- claude_mpm/dashboard/static/built/components/hud-manager.js +2 -0
- claude_mpm/dashboard/static/built/components/hud-visualizer.js +2 -0
- claude_mpm/dashboard/static/built/components/module-viewer.js +2 -0
- claude_mpm/dashboard/static/built/components/session-manager.js +2 -0
- claude_mpm/dashboard/static/built/components/socket-manager.js +2 -0
- claude_mpm/dashboard/static/built/components/ui-state-manager.js +2 -0
- claude_mpm/dashboard/static/built/components/working-directory.js +2 -0
- claude_mpm/dashboard/static/built/dashboard.js +2 -0
- claude_mpm/dashboard/static/built/socket-client.js +2 -0
- claude_mpm/dashboard/static/css/dashboard.css +27 -8
- claude_mpm/dashboard/static/dist/components/agent-inference.js +2 -0
- claude_mpm/dashboard/static/dist/components/event-processor.js +2 -0
- claude_mpm/dashboard/static/dist/components/event-viewer.js +2 -0
- claude_mpm/dashboard/static/dist/components/export-manager.js +2 -0
- claude_mpm/dashboard/static/dist/components/file-tool-tracker.js +2 -0
- claude_mpm/dashboard/static/dist/components/hud-library-loader.js +2 -0
- claude_mpm/dashboard/static/dist/components/hud-manager.js +2 -0
- claude_mpm/dashboard/static/dist/components/hud-visualizer.js +2 -0
- claude_mpm/dashboard/static/dist/components/module-viewer.js +2 -0
- claude_mpm/dashboard/static/dist/components/session-manager.js +2 -0
- claude_mpm/dashboard/static/dist/components/socket-manager.js +2 -0
- claude_mpm/dashboard/static/dist/components/ui-state-manager.js +2 -0
- claude_mpm/dashboard/static/dist/components/working-directory.js +2 -0
- claude_mpm/dashboard/static/dist/dashboard.js +2 -0
- claude_mpm/dashboard/static/dist/socket-client.js +2 -0
- claude_mpm/dashboard/static/js/components/agent-inference.js +80 -76
- claude_mpm/dashboard/static/js/components/event-processor.js +71 -67
- claude_mpm/dashboard/static/js/components/event-viewer.js +93 -72
- claude_mpm/dashboard/static/js/components/export-manager.js +31 -28
- claude_mpm/dashboard/static/js/components/file-tool-tracker.js +110 -96
- claude_mpm/dashboard/static/js/components/hud-library-loader.js +11 -11
- claude_mpm/dashboard/static/js/components/hud-manager.js +73 -73
- claude_mpm/dashboard/static/js/components/hud-visualizer.js +163 -163
- claude_mpm/dashboard/static/js/components/module-viewer.js +305 -233
- claude_mpm/dashboard/static/js/components/session-manager.js +32 -29
- claude_mpm/dashboard/static/js/components/socket-manager.js +27 -20
- claude_mpm/dashboard/static/js/components/ui-state-manager.js +21 -18
- claude_mpm/dashboard/static/js/components/working-directory.js +74 -71
- claude_mpm/dashboard/static/js/dashboard.js +178 -453
- claude_mpm/dashboard/static/js/extension-error-handler.js +164 -0
- claude_mpm/dashboard/static/js/socket-client.js +133 -53
- claude_mpm/dashboard/templates/index.html +40 -50
- claude_mpm/experimental/cli_enhancements.py +60 -58
- claude_mpm/generators/__init__.py +1 -1
- claude_mpm/generators/agent_profile_generator.py +75 -65
- claude_mpm/hooks/__init__.py +1 -1
- claude_mpm/hooks/base_hook.py +33 -28
- claude_mpm/hooks/claude_hooks/__init__.py +1 -1
- claude_mpm/hooks/claude_hooks/connection_pool.py +120 -0
- claude_mpm/hooks/claude_hooks/event_handlers.py +743 -0
- claude_mpm/hooks/claude_hooks/hook_handler.py +415 -1331
- claude_mpm/hooks/claude_hooks/hook_wrapper.sh +4 -4
- claude_mpm/hooks/claude_hooks/memory_integration.py +221 -0
- claude_mpm/hooks/claude_hooks/response_tracking.py +348 -0
- claude_mpm/hooks/claude_hooks/tool_analysis.py +230 -0
- claude_mpm/hooks/memory_integration_hook.py +140 -100
- claude_mpm/hooks/tool_call_interceptor.py +89 -76
- claude_mpm/hooks/validation_hooks.py +57 -49
- claude_mpm/init.py +145 -121
- claude_mpm/models/__init__.py +9 -9
- claude_mpm/models/agent_definition.py +33 -23
- claude_mpm/models/agent_session.py +228 -200
- claude_mpm/scripts/__init__.py +1 -1
- claude_mpm/scripts/socketio_daemon.py +192 -75
- claude_mpm/scripts/socketio_server_manager.py +328 -0
- claude_mpm/scripts/start_activity_logging.py +25 -22
- claude_mpm/services/__init__.py +68 -43
- claude_mpm/services/agent_capabilities_service.py +271 -0
- claude_mpm/services/agents/__init__.py +23 -32
- claude_mpm/services/agents/deployment/__init__.py +3 -3
- claude_mpm/services/agents/deployment/agent_config_provider.py +310 -0
- claude_mpm/services/agents/deployment/agent_configuration_manager.py +359 -0
- claude_mpm/services/agents/deployment/agent_definition_factory.py +84 -0
- claude_mpm/services/agents/deployment/agent_deployment.py +415 -2113
- claude_mpm/services/agents/deployment/agent_discovery_service.py +387 -0
- claude_mpm/services/agents/deployment/agent_environment_manager.py +293 -0
- claude_mpm/services/agents/deployment/agent_filesystem_manager.py +387 -0
- claude_mpm/services/agents/deployment/agent_format_converter.py +453 -0
- claude_mpm/services/agents/deployment/agent_frontmatter_validator.py +161 -0
- claude_mpm/services/agents/deployment/agent_lifecycle_manager.py +345 -495
- claude_mpm/services/agents/deployment/agent_metrics_collector.py +279 -0
- claude_mpm/services/agents/deployment/agent_restore_handler.py +88 -0
- claude_mpm/services/agents/deployment/agent_template_builder.py +406 -0
- claude_mpm/services/agents/deployment/agent_validator.py +352 -0
- claude_mpm/services/agents/deployment/agent_version_manager.py +313 -0
- claude_mpm/services/agents/deployment/agent_versioning.py +6 -9
- claude_mpm/services/agents/deployment/agents_directory_resolver.py +79 -0
- claude_mpm/services/agents/deployment/async_agent_deployment.py +298 -234
- claude_mpm/services/agents/deployment/config/__init__.py +13 -0
- claude_mpm/services/agents/deployment/config/deployment_config.py +182 -0
- claude_mpm/services/agents/deployment/config/deployment_config_manager.py +200 -0
- claude_mpm/services/agents/deployment/deployment_config_loader.py +54 -0
- claude_mpm/services/agents/deployment/deployment_type_detector.py +124 -0
- claude_mpm/services/agents/deployment/facade/__init__.py +18 -0
- claude_mpm/services/agents/deployment/facade/async_deployment_executor.py +159 -0
- claude_mpm/services/agents/deployment/facade/deployment_executor.py +73 -0
- claude_mpm/services/agents/deployment/facade/deployment_facade.py +270 -0
- claude_mpm/services/agents/deployment/facade/sync_deployment_executor.py +178 -0
- claude_mpm/services/agents/deployment/interface_adapter.py +227 -0
- claude_mpm/services/agents/deployment/lifecycle_health_checker.py +85 -0
- claude_mpm/services/agents/deployment/lifecycle_performance_tracker.py +100 -0
- claude_mpm/services/agents/deployment/pipeline/__init__.py +32 -0
- claude_mpm/services/agents/deployment/pipeline/pipeline_builder.py +158 -0
- claude_mpm/services/agents/deployment/pipeline/pipeline_context.py +159 -0
- claude_mpm/services/agents/deployment/pipeline/pipeline_executor.py +169 -0
- claude_mpm/services/agents/deployment/pipeline/steps/__init__.py +19 -0
- claude_mpm/services/agents/deployment/pipeline/steps/agent_processing_step.py +195 -0
- claude_mpm/services/agents/deployment/pipeline/steps/base_step.py +119 -0
- claude_mpm/services/agents/deployment/pipeline/steps/configuration_step.py +79 -0
- claude_mpm/services/agents/deployment/pipeline/steps/target_directory_step.py +90 -0
- claude_mpm/services/agents/deployment/pipeline/steps/validation_step.py +100 -0
- claude_mpm/services/agents/deployment/processors/__init__.py +15 -0
- claude_mpm/services/agents/deployment/processors/agent_deployment_context.py +98 -0
- claude_mpm/services/agents/deployment/processors/agent_deployment_result.py +235 -0
- claude_mpm/services/agents/deployment/processors/agent_processor.py +258 -0
- claude_mpm/services/agents/deployment/refactored_agent_deployment_service.py +318 -0
- claude_mpm/services/agents/deployment/results/__init__.py +13 -0
- claude_mpm/services/agents/deployment/results/deployment_metrics.py +200 -0
- claude_mpm/services/agents/deployment/results/deployment_result_builder.py +249 -0
- claude_mpm/services/agents/deployment/strategies/__init__.py +25 -0
- claude_mpm/services/agents/deployment/strategies/base_strategy.py +119 -0
- claude_mpm/services/agents/deployment/strategies/project_strategy.py +150 -0
- claude_mpm/services/agents/deployment/strategies/strategy_selector.py +117 -0
- claude_mpm/services/agents/deployment/strategies/system_strategy.py +116 -0
- claude_mpm/services/agents/deployment/strategies/user_strategy.py +137 -0
- claude_mpm/services/agents/deployment/system_instructions_deployer.py +108 -0
- claude_mpm/services/agents/deployment/validation/__init__.py +19 -0
- claude_mpm/services/agents/deployment/validation/agent_validator.py +323 -0
- claude_mpm/services/agents/deployment/validation/deployment_validator.py +238 -0
- claude_mpm/services/agents/deployment/validation/template_validator.py +299 -0
- claude_mpm/services/agents/deployment/validation/validation_result.py +226 -0
- claude_mpm/services/agents/loading/__init__.py +2 -2
- claude_mpm/services/agents/loading/agent_profile_loader.py +259 -229
- claude_mpm/services/agents/loading/base_agent_manager.py +90 -81
- claude_mpm/services/agents/loading/framework_agent_loader.py +154 -129
- claude_mpm/services/agents/management/__init__.py +2 -2
- claude_mpm/services/agents/management/agent_capabilities_generator.py +72 -58
- claude_mpm/services/agents/management/agent_management_service.py +209 -156
- claude_mpm/services/agents/memory/__init__.py +9 -6
- claude_mpm/services/agents/memory/agent_memory_manager.py +218 -1152
- claude_mpm/services/agents/memory/agent_persistence_service.py +20 -16
- claude_mpm/services/agents/memory/analyzer.py +430 -0
- claude_mpm/services/agents/memory/content_manager.py +376 -0
- claude_mpm/services/agents/memory/template_generator.py +468 -0
- claude_mpm/services/agents/registry/__init__.py +7 -10
- claude_mpm/services/agents/registry/deployed_agent_discovery.py +122 -97
- claude_mpm/services/agents/registry/modification_tracker.py +351 -285
- claude_mpm/services/async_session_logger.py +187 -153
- claude_mpm/services/claude_session_logger.py +87 -72
- claude_mpm/services/command_handler_service.py +217 -0
- claude_mpm/services/communication/__init__.py +3 -2
- claude_mpm/services/core/__init__.py +50 -97
- claude_mpm/services/core/base.py +60 -53
- claude_mpm/services/core/interfaces/__init__.py +188 -0
- claude_mpm/services/core/interfaces/agent.py +351 -0
- claude_mpm/services/core/interfaces/communication.py +343 -0
- claude_mpm/services/core/interfaces/infrastructure.py +413 -0
- claude_mpm/services/core/interfaces/service.py +434 -0
- claude_mpm/services/core/interfaces.py +19 -944
- claude_mpm/services/event_aggregator.py +208 -170
- claude_mpm/services/exceptions.py +387 -308
- claude_mpm/services/framework_claude_md_generator/__init__.py +75 -79
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +69 -60
- claude_mpm/services/framework_claude_md_generator/content_validator.py +65 -61
- claude_mpm/services/framework_claude_md_generator/deployment_manager.py +68 -49
- claude_mpm/services/framework_claude_md_generator/section_generators/__init__.py +34 -34
- claude_mpm/services/framework_claude_md_generator/section_generators/agents.py +25 -22
- claude_mpm/services/framework_claude_md_generator/section_generators/claude_pm_init.py +10 -10
- claude_mpm/services/framework_claude_md_generator/section_generators/core_responsibilities.py +4 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/delegation_constraints.py +4 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/environment_config.py +4 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/footer.py +6 -5
- claude_mpm/services/framework_claude_md_generator/section_generators/header.py +8 -7
- claude_mpm/services/framework_claude_md_generator/section_generators/orchestration_principles.py +4 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/role_designation.py +6 -5
- claude_mpm/services/framework_claude_md_generator/section_generators/subprocess_validation.py +9 -8
- claude_mpm/services/framework_claude_md_generator/section_generators/todo_task_tools.py +4 -3
- claude_mpm/services/framework_claude_md_generator/section_generators/troubleshooting.py +5 -4
- claude_mpm/services/framework_claude_md_generator/section_manager.py +28 -27
- claude_mpm/services/framework_claude_md_generator/version_manager.py +30 -28
- claude_mpm/services/hook_service.py +106 -114
- claude_mpm/services/infrastructure/__init__.py +7 -5
- claude_mpm/services/infrastructure/context_preservation.py +233 -199
- claude_mpm/services/infrastructure/daemon_manager.py +279 -0
- claude_mpm/services/infrastructure/logging.py +83 -76
- claude_mpm/services/infrastructure/monitoring.py +547 -404
- claude_mpm/services/mcp_gateway/__init__.py +30 -13
- claude_mpm/services/mcp_gateway/config/__init__.py +2 -2
- claude_mpm/services/mcp_gateway/config/config_loader.py +61 -56
- claude_mpm/services/mcp_gateway/config/config_schema.py +50 -41
- claude_mpm/services/mcp_gateway/config/configuration.py +82 -75
- claude_mpm/services/mcp_gateway/core/__init__.py +13 -20
- claude_mpm/services/mcp_gateway/core/base.py +80 -67
- claude_mpm/services/mcp_gateway/core/exceptions.py +60 -46
- claude_mpm/services/mcp_gateway/core/interfaces.py +87 -84
- claude_mpm/services/mcp_gateway/main.py +287 -137
- claude_mpm/services/mcp_gateway/registry/__init__.py +1 -1
- claude_mpm/services/mcp_gateway/registry/service_registry.py +97 -94
- claude_mpm/services/mcp_gateway/registry/tool_registry.py +135 -126
- claude_mpm/services/mcp_gateway/server/__init__.py +2 -2
- claude_mpm/services/mcp_gateway/server/mcp_gateway.py +105 -110
- claude_mpm/services/mcp_gateway/server/stdio_handler.py +105 -107
- claude_mpm/services/mcp_gateway/server/stdio_server.py +691 -0
- claude_mpm/services/mcp_gateway/tools/__init__.py +4 -2
- claude_mpm/services/mcp_gateway/tools/base_adapter.py +109 -119
- claude_mpm/services/mcp_gateway/tools/document_summarizer.py +283 -215
- claude_mpm/services/mcp_gateway/tools/hello_world.py +122 -120
- claude_mpm/services/mcp_gateway/tools/ticket_tools.py +652 -0
- claude_mpm/services/mcp_gateway/tools/unified_ticket_tool.py +606 -0
- claude_mpm/services/memory/__init__.py +2 -2
- claude_mpm/services/memory/builder.py +451 -362
- claude_mpm/services/memory/cache/__init__.py +2 -2
- claude_mpm/services/memory/cache/shared_prompt_cache.py +232 -194
- claude_mpm/services/memory/cache/simple_cache.py +107 -93
- claude_mpm/services/memory/indexed_memory.py +195 -193
- claude_mpm/services/memory/optimizer.py +267 -234
- claude_mpm/services/memory/router.py +571 -263
- claude_mpm/services/memory_hook_service.py +237 -0
- claude_mpm/services/port_manager.py +575 -0
- claude_mpm/services/project/__init__.py +3 -3
- claude_mpm/services/project/analyzer.py +451 -305
- claude_mpm/services/project/registry.py +262 -240
- claude_mpm/services/recovery_manager.py +287 -231
- claude_mpm/services/response_tracker.py +87 -67
- claude_mpm/services/runner_configuration_service.py +587 -0
- claude_mpm/services/session_management_service.py +304 -0
- claude_mpm/services/socketio/__init__.py +4 -4
- claude_mpm/services/socketio/client_proxy.py +174 -0
- claude_mpm/services/socketio/handlers/__init__.py +3 -3
- claude_mpm/services/socketio/handlers/base.py +44 -30
- claude_mpm/services/socketio/handlers/connection.py +166 -64
- claude_mpm/services/socketio/handlers/file.py +123 -108
- claude_mpm/services/socketio/handlers/git.py +607 -373
- claude_mpm/services/socketio/handlers/hook.py +185 -0
- claude_mpm/services/socketio/handlers/memory.py +4 -4
- claude_mpm/services/socketio/handlers/project.py +4 -4
- claude_mpm/services/socketio/handlers/registry.py +53 -38
- claude_mpm/services/socketio/server/__init__.py +18 -0
- claude_mpm/services/socketio/server/broadcaster.py +252 -0
- claude_mpm/services/socketio/server/core.py +399 -0
- claude_mpm/services/socketio/server/main.py +323 -0
- claude_mpm/services/socketio_client_manager.py +160 -133
- claude_mpm/services/socketio_server.py +36 -1885
- claude_mpm/services/subprocess_launcher_service.py +316 -0
- claude_mpm/services/system_instructions_service.py +258 -0
- claude_mpm/services/ticket_manager.py +19 -533
- claude_mpm/services/utility_service.py +285 -0
- claude_mpm/services/version_control/__init__.py +18 -21
- claude_mpm/services/version_control/branch_strategy.py +20 -10
- claude_mpm/services/version_control/conflict_resolution.py +37 -13
- claude_mpm/services/version_control/git_operations.py +52 -21
- claude_mpm/services/version_control/semantic_versioning.py +92 -53
- claude_mpm/services/version_control/version_parser.py +145 -125
- claude_mpm/services/version_service.py +270 -0
- claude_mpm/storage/__init__.py +2 -2
- claude_mpm/storage/state_storage.py +177 -181
- claude_mpm/ticket_wrapper.py +2 -2
- claude_mpm/utils/__init__.py +2 -2
- claude_mpm/utils/agent_dependency_loader.py +453 -243
- claude_mpm/utils/config_manager.py +157 -118
- claude_mpm/utils/console.py +1 -1
- claude_mpm/utils/dependency_cache.py +102 -107
- claude_mpm/utils/dependency_manager.py +52 -47
- claude_mpm/utils/dependency_strategies.py +131 -96
- claude_mpm/utils/environment_context.py +110 -102
- claude_mpm/utils/error_handler.py +75 -55
- claude_mpm/utils/file_utils.py +80 -67
- claude_mpm/utils/framework_detection.py +12 -11
- claude_mpm/utils/import_migration_example.py +12 -60
- claude_mpm/utils/imports.py +48 -45
- claude_mpm/utils/path_operations.py +100 -93
- claude_mpm/utils/robust_installer.py +172 -164
- claude_mpm/utils/session_logging.py +30 -23
- claude_mpm/utils/subprocess_utils.py +99 -61
- claude_mpm/validation/__init__.py +1 -1
- claude_mpm/validation/agent_validator.py +151 -111
- claude_mpm/validation/frontmatter_validator.py +92 -71
- {claude_mpm-3.9.11.dist-info → claude_mpm-4.0.4.dist-info}/METADATA +90 -22
- claude_mpm-4.0.4.dist-info/RECORD +417 -0
- {claude_mpm-3.9.11.dist-info → claude_mpm-4.0.4.dist-info}/entry_points.txt +1 -0
- {claude_mpm-3.9.11.dist-info → claude_mpm-4.0.4.dist-info}/licenses/LICENSE +1 -1
- claude_mpm/cli/commands/run_guarded.py +0 -511
- claude_mpm/config/memory_guardian_config.py +0 -325
- claude_mpm/config/memory_guardian_yaml.py +0 -335
- claude_mpm/core/config_paths.py +0 -150
- claude_mpm/core/memory_aware_runner.py +0 -353
- claude_mpm/dashboard/static/js/dashboard-original.js +0 -4134
- claude_mpm/deployment_paths.py +0 -261
- claude_mpm/hooks/claude_hooks/hook_handler_fixed.py +0 -454
- claude_mpm/models/state_models.py +0 -433
- claude_mpm/services/agent/__init__.py +0 -24
- claude_mpm/services/agent/deployment.py +0 -2548
- claude_mpm/services/agent/management.py +0 -598
- claude_mpm/services/agent/registry.py +0 -813
- claude_mpm/services/agents/registry/agent_registry.py +0 -813
- claude_mpm/services/communication/socketio.py +0 -1935
- claude_mpm/services/communication/websocket.py +0 -479
- claude_mpm/services/framework_claude_md_generator.py +0 -624
- claude_mpm/services/health_monitor.py +0 -893
- claude_mpm/services/infrastructure/graceful_degradation.py +0 -616
- claude_mpm/services/infrastructure/health_monitor.py +0 -775
- claude_mpm/services/infrastructure/memory_dashboard.py +0 -479
- claude_mpm/services/infrastructure/memory_guardian.py +0 -944
- claude_mpm/services/infrastructure/restart_protection.py +0 -642
- claude_mpm/services/infrastructure/state_manager.py +0 -774
- claude_mpm/services/mcp_gateway/manager.py +0 -334
- claude_mpm/services/optimized_hook_service.py +0 -542
- claude_mpm/services/project_analyzer.py +0 -864
- claude_mpm/services/project_registry.py +0 -608
- claude_mpm/services/standalone_socketio_server.py +0 -1300
- claude_mpm/services/ticket_manager_di.py +0 -318
- claude_mpm/services/ticketing_service_original.py +0 -510
- claude_mpm/utils/paths.py +0 -395
- claude_mpm/utils/platform_memory.py +0 -524
- claude_mpm-3.9.11.dist-info/RECORD +0 -306
- {claude_mpm-3.9.11.dist-info → claude_mpm-4.0.4.dist-info}/WHEEL +0 -0
- {claude_mpm-3.9.11.dist-info → claude_mpm-4.0.4.dist-info}/top_level.txt +0 -0
| @@ -8,7 +8,7 @@ and reorganizing by priority/relevance. | |
| 8 8 |  | 
| 9 9 | 
             
            This service provides:
         | 
| 10 10 | 
             
            - Duplicate detection and removal
         | 
| 11 | 
            -
            - Related item consolidation | 
| 11 | 
            +
            - Related item consolidation
         | 
| 12 12 | 
             
            - Priority-based reorganization
         | 
| 13 13 | 
             
            - Per-agent optimization strategies
         | 
| 14 14 | 
             
            - Size optimization within limits
         | 
| @@ -22,192 +22,212 @@ rather than aggressively removing content. Better to keep potentially useful | |
| 22 22 | 
             
            information than lose important insights.
         | 
| 23 23 | 
             
            """
         | 
| 24 24 |  | 
| 25 | 
            -
            import re
         | 
| 26 25 | 
             
            import os
         | 
| 27 | 
            -
             | 
| 28 | 
            -
            from typing import Dict, List, Optional, Any, Set, Tuple
         | 
| 26 | 
            +
            import re
         | 
| 29 27 | 
             
            from datetime import datetime
         | 
| 30 28 | 
             
            from difflib import SequenceMatcher
         | 
| 29 | 
            +
            from pathlib import Path
         | 
| 30 | 
            +
            from typing import Any, Dict, List, Optional, Set, Tuple
         | 
| 31 31 |  | 
| 32 | 
            -
            from claude_mpm.core.mixins import LoggerMixin
         | 
| 33 32 | 
             
            from claude_mpm.core.config import Config
         | 
| 34 | 
            -
            from claude_mpm. | 
| 33 | 
            +
            from claude_mpm.core.mixins import LoggerMixin
         | 
| 34 | 
            +
            from claude_mpm.core.unified_paths import get_path_manager
         | 
| 35 35 |  | 
| 36 36 |  | 
| 37 37 | 
             
            class MemoryOptimizer(LoggerMixin):
         | 
| 38 38 | 
             
                """Optimizes agent memory files through deduplication and reorganization.
         | 
| 39 | 
            -
             | 
| 39 | 
            +
             | 
| 40 40 | 
             
                WHY: Memory files need maintenance to stay useful. This service provides
         | 
| 41 41 | 
             
                automated cleanup while preserving valuable information and maintaining
         | 
| 42 42 | 
             
                the structured format agents expect.
         | 
| 43 | 
            -
             | 
| 43 | 
            +
             | 
| 44 44 | 
             
                DESIGN DECISION: Uses similarity thresholds and conservative merging to
         | 
| 45 45 | 
             
                avoid losing important nuances in learnings while removing clear duplicates.
         | 
| 46 46 | 
             
                """
         | 
| 47 | 
            -
             | 
| 47 | 
            +
             | 
| 48 48 | 
             
                # Similarity threshold for considering items duplicates
         | 
| 49 49 | 
             
                SIMILARITY_THRESHOLD = 0.85
         | 
| 50 | 
            -
             | 
| 50 | 
            +
             | 
| 51 51 | 
             
                # Minimum similarity for consolidation
         | 
| 52 52 | 
             
                CONSOLIDATION_THRESHOLD = 0.70
         | 
| 53 | 
            -
             | 
| 53 | 
            +
             | 
| 54 54 | 
             
                # Priority keywords for sorting (higher priority items kept/moved up)
         | 
| 55 55 | 
             
                PRIORITY_KEYWORDS = {
         | 
| 56 | 
            -
                     | 
| 57 | 
            -
             | 
| 58 | 
            -
             | 
| 56 | 
            +
                    "high": [
         | 
| 57 | 
            +
                        "critical",
         | 
| 58 | 
            +
                        "important",
         | 
| 59 | 
            +
                        "essential",
         | 
| 60 | 
            +
                        "required",
         | 
| 61 | 
            +
                        "must",
         | 
| 62 | 
            +
                        "always",
         | 
| 63 | 
            +
                        "never",
         | 
| 64 | 
            +
                    ],
         | 
| 65 | 
            +
                    "medium": ["should", "recommended", "prefer", "avoid", "consider"],
         | 
| 66 | 
            +
                    "low": ["note", "tip", "hint", "example", "reference"],
         | 
| 59 67 | 
             
                }
         | 
| 60 | 
            -
             | 
| 61 | 
            -
                def __init__( | 
| 68 | 
            +
             | 
| 69 | 
            +
                def __init__(
         | 
| 70 | 
            +
                    self, config: Optional[Config] = None, working_directory: Optional[Path] = None
         | 
| 71 | 
            +
                ):
         | 
| 62 72 | 
             
                    """Initialize the memory optimizer.
         | 
| 63 | 
            -
             | 
| 73 | 
            +
             | 
| 64 74 | 
             
                    Args:
         | 
| 65 75 | 
             
                        config: Optional Config object
         | 
| 66 76 | 
             
                        working_directory: Optional working directory. If not provided, uses current working directory.
         | 
| 67 77 | 
             
                    """
         | 
| 68 78 | 
             
                    super().__init__()
         | 
| 69 79 | 
             
                    self.config = config or Config()
         | 
| 70 | 
            -
                    self.project_root =  | 
| 80 | 
            +
                    self.project_root = get_path_manager().get_project_root()
         | 
| 71 81 | 
             
                    # Use current working directory by default, not project root
         | 
| 72 82 | 
             
                    self.working_directory = working_directory or Path(os.getcwd())
         | 
| 73 83 | 
             
                    self.memories_dir = self.working_directory / ".claude-mpm" / "memories"
         | 
| 74 | 
            -
             | 
| 84 | 
            +
             | 
| 75 85 | 
             
                def optimize_agent_memory(self, agent_id: str) -> Dict[str, Any]:
         | 
| 76 86 | 
             
                    """Optimize memory for a specific agent.
         | 
| 77 | 
            -
             | 
| 87 | 
            +
             | 
| 78 88 | 
             
                    WHY: Individual agent memories can be optimized independently, allowing
         | 
| 79 89 | 
             
                    for targeted cleanup of specific agents without affecting others.
         | 
| 80 | 
            -
             | 
| 90 | 
            +
             | 
| 81 91 | 
             
                    Args:
         | 
| 82 92 | 
             
                        agent_id: The agent identifier
         | 
| 83 | 
            -
             | 
| 93 | 
            +
             | 
| 84 94 | 
             
                    Returns:
         | 
| 85 95 | 
             
                        Dict containing optimization results and statistics
         | 
| 86 96 | 
             
                    """
         | 
| 87 97 | 
             
                    try:
         | 
| 88 98 | 
             
                        memory_file = self.memories_dir / f"{agent_id}_agent.md"
         | 
| 89 | 
            -
             | 
| 99 | 
            +
             | 
| 90 100 | 
             
                        if not memory_file.exists():
         | 
| 91 101 | 
             
                            return {
         | 
| 92 102 | 
             
                                "success": False,
         | 
| 93 103 | 
             
                                "agent_id": agent_id,
         | 
| 94 | 
            -
                                "error": "Memory file not found"
         | 
| 104 | 
            +
                                "error": "Memory file not found",
         | 
| 95 105 | 
             
                            }
         | 
| 96 | 
            -
             | 
| 106 | 
            +
             | 
| 97 107 | 
             
                        # Load original content
         | 
| 98 | 
            -
                        original_content = memory_file.read_text(encoding= | 
| 108 | 
            +
                        original_content = memory_file.read_text(encoding="utf-8")
         | 
| 99 109 | 
             
                        original_size = len(original_content)
         | 
| 100 | 
            -
             | 
| 110 | 
            +
             | 
| 101 111 | 
             
                        # Parse memory structure
         | 
| 102 112 | 
             
                        sections = self._parse_memory_sections(original_content)
         | 
| 103 | 
            -
             | 
| 113 | 
            +
             | 
| 104 114 | 
             
                        # Optimize each section
         | 
| 105 115 | 
             
                        optimized_sections = {}
         | 
| 106 116 | 
             
                        optimization_stats = {
         | 
| 107 117 | 
             
                            "duplicates_removed": 0,
         | 
| 108 118 | 
             
                            "items_consolidated": 0,
         | 
| 109 119 | 
             
                            "items_reordered": 0,
         | 
| 110 | 
            -
                            "sections_optimized": 0
         | 
| 120 | 
            +
                            "sections_optimized": 0,
         | 
| 111 121 | 
             
                        }
         | 
| 112 | 
            -
             | 
| 122 | 
            +
             | 
| 113 123 | 
             
                        for section_name, items in sections.items():
         | 
| 114 | 
            -
                            if section_name.lower() in [ | 
| 124 | 
            +
                            if section_name.lower() in ["header", "metadata"]:
         | 
| 115 125 | 
             
                                # Preserve header sections as-is
         | 
| 116 126 | 
             
                                optimized_sections[section_name] = items
         | 
| 117 127 | 
             
                                continue
         | 
| 118 | 
            -
             | 
| 128 | 
            +
             | 
| 119 129 | 
             
                            optimized_items, section_stats = self._optimize_section(items, agent_id)
         | 
| 120 130 | 
             
                            optimized_sections[section_name] = optimized_items
         | 
| 121 | 
            -
             | 
| 131 | 
            +
             | 
| 122 132 | 
             
                            # Aggregate stats
         | 
| 123 133 | 
             
                            for key in optimization_stats:
         | 
| 124 134 | 
             
                                if key in section_stats:
         | 
| 125 135 | 
             
                                    optimization_stats[key] += section_stats[key]
         | 
| 126 | 
            -
             | 
| 127 | 
            -
                            if  | 
| 136 | 
            +
             | 
| 137 | 
            +
                            if (
         | 
| 138 | 
            +
                                section_stats.get("duplicates_removed", 0) > 0
         | 
| 139 | 
            +
                                or section_stats.get("items_consolidated", 0) > 0
         | 
| 140 | 
            +
                            ):
         | 
| 128 141 | 
             
                                optimization_stats["sections_optimized"] += 1
         | 
| 129 | 
            -
             | 
| 142 | 
            +
             | 
| 130 143 | 
             
                        # Rebuild memory content
         | 
| 131 | 
            -
                        optimized_content = self._rebuild_memory_content( | 
| 144 | 
            +
                        optimized_content = self._rebuild_memory_content(
         | 
| 145 | 
            +
                            optimized_sections, agent_id
         | 
| 146 | 
            +
                        )
         | 
| 132 147 | 
             
                        optimized_size = len(optimized_content)
         | 
| 133 | 
            -
             | 
| 148 | 
            +
             | 
| 134 149 | 
             
                        # Create backup before saving
         | 
| 135 150 | 
             
                        backup_path = self._create_backup(memory_file)
         | 
| 136 | 
            -
             | 
| 151 | 
            +
             | 
| 137 152 | 
             
                        # Save optimized content
         | 
| 138 | 
            -
                        memory_file.write_text(optimized_content, encoding= | 
| 139 | 
            -
             | 
| 153 | 
            +
                        memory_file.write_text(optimized_content, encoding="utf-8")
         | 
| 154 | 
            +
             | 
| 140 155 | 
             
                        result = {
         | 
| 141 156 | 
             
                            "success": True,
         | 
| 142 157 | 
             
                            "agent_id": agent_id,
         | 
| 143 158 | 
             
                            "original_size": original_size,
         | 
| 144 159 | 
             
                            "optimized_size": optimized_size,
         | 
| 145 160 | 
             
                            "size_reduction": original_size - optimized_size,
         | 
| 146 | 
            -
                            "size_reduction_percent":  | 
| 161 | 
            +
                            "size_reduction_percent": (
         | 
| 162 | 
            +
                                round(((original_size - optimized_size) / original_size) * 100, 1)
         | 
| 163 | 
            +
                                if original_size > 0
         | 
| 164 | 
            +
                                else 0
         | 
| 165 | 
            +
                            ),
         | 
| 147 166 | 
             
                            "backup_created": str(backup_path),
         | 
| 148 167 | 
             
                            "timestamp": datetime.now().isoformat(),
         | 
| 149 | 
            -
                            **optimization_stats
         | 
| 168 | 
            +
                            **optimization_stats,
         | 
| 150 169 | 
             
                        }
         | 
| 151 | 
            -
             | 
| 170 | 
            +
             | 
| 152 171 | 
             
                        self.logger.info(f"Optimized memory for {agent_id}: {optimization_stats}")
         | 
| 153 172 | 
             
                        return result
         | 
| 154 | 
            -
             | 
| 173 | 
            +
             | 
| 155 174 | 
             
                    except Exception as e:
         | 
| 156 175 | 
             
                        self.logger.error(f"Error optimizing memory for {agent_id}: {e}")
         | 
| 157 | 
            -
                        return {
         | 
| 158 | 
            -
             | 
| 159 | 
            -
                            "agent_id": agent_id,
         | 
| 160 | 
            -
                            "error": str(e)
         | 
| 161 | 
            -
                        }
         | 
| 162 | 
            -
                
         | 
| 176 | 
            +
                        return {"success": False, "agent_id": agent_id, "error": str(e)}
         | 
| 177 | 
            +
             | 
| 163 178 | 
             
                def optimize_all_memories(self) -> Dict[str, Any]:
         | 
| 164 179 | 
             
                    """Optimize all agent memory files.
         | 
| 165 | 
            -
             | 
| 180 | 
            +
             | 
| 166 181 | 
             
                    WHY: Bulk optimization allows maintenance of the entire memory system
         | 
| 167 182 | 
             
                    in one operation, providing comprehensive cleanup and consistency.
         | 
| 168 | 
            -
             | 
| 183 | 
            +
             | 
| 169 184 | 
             
                    Returns:
         | 
| 170 185 | 
             
                        Dict containing results for all agents
         | 
| 171 186 | 
             
                    """
         | 
| 172 187 | 
             
                    try:
         | 
| 173 188 | 
             
                        if not self.memories_dir.exists():
         | 
| 174 | 
            -
                            return {
         | 
| 175 | 
            -
             | 
| 176 | 
            -
                                "error": "Memory directory not found"
         | 
| 177 | 
            -
                            }
         | 
| 178 | 
            -
                        
         | 
| 189 | 
            +
                            return {"success": False, "error": "Memory directory not found"}
         | 
| 190 | 
            +
             | 
| 179 191 | 
             
                        memory_files = list(self.memories_dir.glob("*_agent.md"))
         | 
| 180 192 | 
             
                        results = {}
         | 
| 181 | 
            -
             | 
| 193 | 
            +
             | 
| 182 194 | 
             
                        total_stats = {
         | 
| 183 195 | 
             
                            "agents_processed": 0,
         | 
| 184 196 | 
             
                            "agents_optimized": 0,
         | 
| 185 197 | 
             
                            "total_size_before": 0,
         | 
| 186 198 | 
             
                            "total_size_after": 0,
         | 
| 187 199 | 
             
                            "total_duplicates_removed": 0,
         | 
| 188 | 
            -
                            "total_items_consolidated": 0
         | 
| 200 | 
            +
                            "total_items_consolidated": 0,
         | 
| 189 201 | 
             
                        }
         | 
| 190 | 
            -
             | 
| 202 | 
            +
             | 
| 191 203 | 
             
                        for memory_file in memory_files:
         | 
| 192 | 
            -
                            agent_id = memory_file.stem.replace( | 
| 204 | 
            +
                            agent_id = memory_file.stem.replace("_agent", "")
         | 
| 193 205 | 
             
                            result = self.optimize_agent_memory(agent_id)
         | 
| 194 206 | 
             
                            results[agent_id] = result
         | 
| 195 | 
            -
             | 
| 207 | 
            +
             | 
| 196 208 | 
             
                            total_stats["agents_processed"] += 1
         | 
| 197 | 
            -
             | 
| 209 | 
            +
             | 
| 198 210 | 
             
                            if result.get("success"):
         | 
| 199 211 | 
             
                                total_stats["agents_optimized"] += 1
         | 
| 200 212 | 
             
                                total_stats["total_size_before"] += result.get("original_size", 0)
         | 
| 201 213 | 
             
                                total_stats["total_size_after"] += result.get("optimized_size", 0)
         | 
| 202 | 
            -
                                total_stats["total_duplicates_removed"] += result.get( | 
| 203 | 
            -
             | 
| 204 | 
            -
             | 
| 214 | 
            +
                                total_stats["total_duplicates_removed"] += result.get(
         | 
| 215 | 
            +
                                    "duplicates_removed", 0
         | 
| 216 | 
            +
                                )
         | 
| 217 | 
            +
                                total_stats["total_items_consolidated"] += result.get(
         | 
| 218 | 
            +
                                    "items_consolidated", 0
         | 
| 219 | 
            +
                                )
         | 
| 220 | 
            +
             | 
| 205 221 | 
             
                        # Calculate overall statistics
         | 
| 206 | 
            -
                        total_reduction =  | 
| 207 | 
            -
             | 
| 208 | 
            -
             | 
| 209 | 
            -
                         | 
| 210 | 
            -
             | 
| 222 | 
            +
                        total_reduction = (
         | 
| 223 | 
            +
                            total_stats["total_size_before"] - total_stats["total_size_after"]
         | 
| 224 | 
            +
                        )
         | 
| 225 | 
            +
                        total_reduction_percent = (
         | 
| 226 | 
            +
                            round((total_reduction / total_stats["total_size_before"]) * 100, 1)
         | 
| 227 | 
            +
                            if total_stats["total_size_before"] > 0
         | 
| 228 | 
            +
                            else 0
         | 
| 229 | 
            +
                        )
         | 
| 230 | 
            +
             | 
| 211 231 | 
             
                        return {
         | 
| 212 232 | 
             
                            "success": True,
         | 
| 213 233 | 
             
                            "timestamp": datetime.now().isoformat(),
         | 
| @@ -215,26 +235,25 @@ class MemoryOptimizer(LoggerMixin): | |
| 215 235 | 
             
                            "summary": {
         | 
| 216 236 | 
             
                                **total_stats,
         | 
| 217 237 | 
             
                                "total_size_reduction": total_reduction,
         | 
| 218 | 
            -
                                "total_size_reduction_percent": total_reduction_percent
         | 
| 219 | 
            -
                            }
         | 
| 238 | 
            +
                                "total_size_reduction_percent": total_reduction_percent,
         | 
| 239 | 
            +
                            },
         | 
| 220 240 | 
             
                        }
         | 
| 221 | 
            -
             | 
| 241 | 
            +
             | 
| 222 242 | 
             
                    except Exception as e:
         | 
| 223 243 | 
             
                        self.logger.error(f"Error optimizing all memories: {e}")
         | 
| 224 | 
            -
                        return {
         | 
| 225 | 
            -
             | 
| 226 | 
            -
             | 
| 227 | 
            -
             | 
| 228 | 
            -
                
         | 
| 229 | 
            -
                def analyze_optimization_opportunities(self, agent_id: Optional[str] = None) -> Dict[str, Any]:
         | 
| 244 | 
            +
                        return {"success": False, "error": str(e)}
         | 
| 245 | 
            +
             | 
| 246 | 
            +
                def analyze_optimization_opportunities(
         | 
| 247 | 
            +
                    self, agent_id: Optional[str] = None
         | 
| 248 | 
            +
                ) -> Dict[str, Any]:
         | 
| 230 249 | 
             
                    """Analyze potential optimization opportunities without making changes.
         | 
| 231 | 
            -
             | 
| 250 | 
            +
             | 
| 232 251 | 
             
                    WHY: Users may want to understand what optimizations would be performed
         | 
| 233 252 | 
             
                    before actually running them, allowing for informed decisions.
         | 
| 234 | 
            -
             | 
| 253 | 
            +
             | 
| 235 254 | 
             
                    Args:
         | 
| 236 255 | 
             
                        agent_id: Optional specific agent to analyze
         | 
| 237 | 
            -
             | 
| 256 | 
            +
             | 
| 238 257 | 
             
                    Returns:
         | 
| 239 258 | 
             
                        Dict containing analysis results
         | 
| 240 259 | 
             
                    """
         | 
| @@ -243,159 +262,163 @@ class MemoryOptimizer(LoggerMixin): | |
| 243 262 | 
             
                            return self._analyze_single_agent(agent_id)
         | 
| 244 263 | 
             
                        else:
         | 
| 245 264 | 
             
                            return self._analyze_all_agents()
         | 
| 246 | 
            -
             | 
| 265 | 
            +
             | 
| 247 266 | 
             
                    except Exception as e:
         | 
| 248 267 | 
             
                        self.logger.error(f"Error analyzing optimization opportunities: {e}")
         | 
| 249 268 | 
             
                        return {"success": False, "error": str(e)}
         | 
| 250 | 
            -
             | 
| 269 | 
            +
             | 
| 251 270 | 
             
                def _parse_memory_sections(self, content: str) -> Dict[str, List[str]]:
         | 
| 252 271 | 
             
                    """Parse memory content into sections and items.
         | 
| 253 | 
            -
             | 
| 272 | 
            +
             | 
| 254 273 | 
             
                    Args:
         | 
| 255 274 | 
             
                        content: Memory file content
         | 
| 256 | 
            -
             | 
| 275 | 
            +
             | 
| 257 276 | 
             
                    Returns:
         | 
| 258 277 | 
             
                        Dict mapping section names to lists of items
         | 
| 259 278 | 
             
                    """
         | 
| 260 | 
            -
                    lines = content.split( | 
| 279 | 
            +
                    lines = content.split("\n")
         | 
| 261 280 | 
             
                    sections = {}
         | 
| 262 | 
            -
                    current_section =  | 
| 281 | 
            +
                    current_section = "header"
         | 
| 263 282 | 
             
                    current_items = []
         | 
| 264 | 
            -
             | 
| 283 | 
            +
             | 
| 265 284 | 
             
                    for line in lines:
         | 
| 266 | 
            -
                        if line.startswith( | 
| 285 | 
            +
                        if line.startswith("## "):
         | 
| 267 286 | 
             
                            # Save previous section
         | 
| 268 287 | 
             
                            if current_section:
         | 
| 269 288 | 
             
                                sections[current_section] = current_items
         | 
| 270 | 
            -
             | 
| 289 | 
            +
             | 
| 271 290 | 
             
                            # Start new section
         | 
| 272 | 
            -
                            section_name = line[3:].split( | 
| 291 | 
            +
                            section_name = line[3:].split("(")[0].strip()
         | 
| 273 292 | 
             
                            current_section = section_name
         | 
| 274 293 | 
             
                            current_items = [line]  # Include the header
         | 
| 275 | 
            -
             | 
| 294 | 
            +
             | 
| 276 295 | 
             
                        else:
         | 
| 277 296 | 
             
                            current_items.append(line)
         | 
| 278 | 
            -
             | 
| 297 | 
            +
             | 
| 279 298 | 
             
                    # Save last section
         | 
| 280 299 | 
             
                    if current_section:
         | 
| 281 300 | 
             
                        sections[current_section] = current_items
         | 
| 282 | 
            -
             | 
| 301 | 
            +
             | 
| 283 302 | 
             
                    return sections
         | 
| 284 | 
            -
             | 
| 285 | 
            -
                def _optimize_section( | 
| 303 | 
            +
             | 
| 304 | 
            +
                def _optimize_section(
         | 
| 305 | 
            +
                    self, items: List[str], agent_id: str
         | 
| 306 | 
            +
                ) -> Tuple[List[str], Dict[str, int]]:
         | 
| 286 307 | 
             
                    """Optimize a single section by removing duplicates and consolidating.
         | 
| 287 | 
            -
             | 
| 308 | 
            +
             | 
| 288 309 | 
             
                    Args:
         | 
| 289 310 | 
             
                        items: List of section content lines
         | 
| 290 311 | 
             
                        agent_id: Agent identifier for context
         | 
| 291 | 
            -
             | 
| 312 | 
            +
             | 
| 292 313 | 
             
                    Returns:
         | 
| 293 314 | 
             
                        Tuple of (optimized_items, stats)
         | 
| 294 315 | 
             
                    """
         | 
| 295 | 
            -
                    stats = {
         | 
| 296 | 
            -
             | 
| 297 | 
            -
                        "items_consolidated": 0,
         | 
| 298 | 
            -
                        "items_reordered": 0
         | 
| 299 | 
            -
                    }
         | 
| 300 | 
            -
                    
         | 
| 316 | 
            +
                    stats = {"duplicates_removed": 0, "items_consolidated": 0, "items_reordered": 0}
         | 
| 317 | 
            +
             | 
| 301 318 | 
             
                    # Separate header and bullet points
         | 
| 302 319 | 
             
                    header_lines = []
         | 
| 303 320 | 
             
                    bullet_points = []
         | 
| 304 321 | 
             
                    other_lines = []
         | 
| 305 | 
            -
             | 
| 322 | 
            +
             | 
| 306 323 | 
             
                    for line in items:
         | 
| 307 324 | 
             
                        stripped = line.strip()
         | 
| 308 | 
            -
                        if stripped.startswith( | 
| 325 | 
            +
                        if stripped.startswith("- "):
         | 
| 309 326 | 
             
                            bullet_points.append(line)
         | 
| 310 | 
            -
                        elif stripped.startswith( | 
| 327 | 
            +
                        elif stripped.startswith("## ") or stripped.startswith("<!--"):
         | 
| 311 328 | 
             
                            header_lines.append(line)
         | 
| 312 329 | 
             
                        else:
         | 
| 313 330 | 
             
                            other_lines.append(line)
         | 
| 314 | 
            -
             | 
| 331 | 
            +
             | 
| 315 332 | 
             
                    if not bullet_points:
         | 
| 316 333 | 
             
                        return items, stats
         | 
| 317 | 
            -
             | 
| 334 | 
            +
             | 
| 318 335 | 
             
                    # Remove duplicates
         | 
| 319 336 | 
             
                    deduplicated_points, duplicates_removed = self._remove_duplicates(bullet_points)
         | 
| 320 337 | 
             
                    stats["duplicates_removed"] = duplicates_removed
         | 
| 321 | 
            -
             | 
| 338 | 
            +
             | 
| 322 339 | 
             
                    # Consolidate similar items
         | 
| 323 | 
            -
                    consolidated_points, items_consolidated = self._consolidate_similar_items( | 
| 340 | 
            +
                    consolidated_points, items_consolidated = self._consolidate_similar_items(
         | 
| 341 | 
            +
                        deduplicated_points
         | 
| 342 | 
            +
                    )
         | 
| 324 343 | 
             
                    stats["items_consolidated"] = items_consolidated
         | 
| 325 | 
            -
             | 
| 344 | 
            +
             | 
| 326 345 | 
             
                    # Reorder by priority
         | 
| 327 346 | 
             
                    reordered_points = self._reorder_by_priority(consolidated_points)
         | 
| 328 347 | 
             
                    if reordered_points != consolidated_points:
         | 
| 329 348 | 
             
                        stats["items_reordered"] = 1
         | 
| 330 | 
            -
             | 
| 349 | 
            +
             | 
| 331 350 | 
             
                    # Rebuild section
         | 
| 332 351 | 
             
                    optimized_items = header_lines + other_lines + reordered_points
         | 
| 333 | 
            -
             | 
| 352 | 
            +
             | 
| 334 353 | 
             
                    return optimized_items, stats
         | 
| 335 | 
            -
             | 
| 354 | 
            +
             | 
| 336 355 | 
             
                def _remove_duplicates(self, bullet_points: List[str]) -> Tuple[List[str], int]:
         | 
| 337 356 | 
             
                    """Remove duplicate bullet points.
         | 
| 338 | 
            -
             | 
| 357 | 
            +
             | 
| 339 358 | 
             
                    Args:
         | 
| 340 359 | 
             
                        bullet_points: List of bullet point lines
         | 
| 341 | 
            -
             | 
| 360 | 
            +
             | 
| 342 361 | 
             
                    Returns:
         | 
| 343 362 | 
             
                        Tuple of (deduplicated_points, count_removed)
         | 
| 344 363 | 
             
                    """
         | 
| 345 364 | 
             
                    seen_content = set()
         | 
| 346 365 | 
             
                    unique_points = []
         | 
| 347 366 | 
             
                    duplicates_removed = 0
         | 
| 348 | 
            -
             | 
| 367 | 
            +
             | 
| 349 368 | 
             
                    for point in bullet_points:
         | 
| 350 369 | 
             
                        # Normalize content for comparison
         | 
| 351 | 
            -
                        content = point.strip().lower().replace( | 
| 352 | 
            -
                        content_normalized = re.sub(r | 
| 353 | 
            -
             | 
| 370 | 
            +
                        content = point.strip().lower().replace("- ", "")
         | 
| 371 | 
            +
                        content_normalized = re.sub(r"\s+", " ", content).strip()
         | 
| 372 | 
            +
             | 
| 354 373 | 
             
                        if content_normalized not in seen_content:
         | 
| 355 374 | 
             
                            seen_content.add(content_normalized)
         | 
| 356 375 | 
             
                            unique_points.append(point)
         | 
| 357 376 | 
             
                        else:
         | 
| 358 377 | 
             
                            duplicates_removed += 1
         | 
| 359 378 | 
             
                            self.logger.debug(f"Removed duplicate: {point.strip()[:50]}...")
         | 
| 360 | 
            -
             | 
| 379 | 
            +
             | 
| 361 380 | 
             
                    return unique_points, duplicates_removed
         | 
| 362 | 
            -
             | 
| 363 | 
            -
                def _consolidate_similar_items( | 
| 381 | 
            +
             | 
| 382 | 
            +
                def _consolidate_similar_items(
         | 
| 383 | 
            +
                    self, bullet_points: List[str]
         | 
| 384 | 
            +
                ) -> Tuple[List[str], int]:
         | 
| 364 385 | 
             
                    """Consolidate similar bullet points.
         | 
| 365 | 
            -
             | 
| 386 | 
            +
             | 
| 366 387 | 
             
                    Args:
         | 
| 367 388 | 
             
                        bullet_points: List of bullet point lines
         | 
| 368 | 
            -
             | 
| 389 | 
            +
             | 
| 369 390 | 
             
                    Returns:
         | 
| 370 391 | 
             
                        Tuple of (consolidated_points, count_consolidated)
         | 
| 371 392 | 
             
                    """
         | 
| 372 393 | 
             
                    if len(bullet_points) < 2:
         | 
| 373 394 | 
             
                        return bullet_points, 0
         | 
| 374 | 
            -
             | 
| 395 | 
            +
             | 
| 375 396 | 
             
                    consolidated = []
         | 
| 376 397 | 
             
                    items_consolidated = 0
         | 
| 377 398 | 
             
                    used_indices = set()
         | 
| 378 | 
            -
             | 
| 399 | 
            +
             | 
| 379 400 | 
             
                    for i, point_a in enumerate(bullet_points):
         | 
| 380 401 | 
             
                        if i in used_indices:
         | 
| 381 402 | 
             
                            continue
         | 
| 382 | 
            -
             | 
| 383 | 
            -
                        content_a = point_a.strip().replace( | 
| 403 | 
            +
             | 
| 404 | 
            +
                        content_a = point_a.strip().replace("- ", "")
         | 
| 384 405 | 
             
                        similar_items = [point_a]
         | 
| 385 406 | 
             
                        similar_indices = {i}
         | 
| 386 | 
            -
             | 
| 407 | 
            +
             | 
| 387 408 | 
             
                        # Find similar items
         | 
| 388 | 
            -
                        for j, point_b in enumerate(bullet_points[i+1:], i+1):
         | 
| 409 | 
            +
                        for j, point_b in enumerate(bullet_points[i + 1 :], i + 1):
         | 
| 389 410 | 
             
                            if j in used_indices:
         | 
| 390 411 | 
             
                                continue
         | 
| 391 | 
            -
             | 
| 392 | 
            -
                            content_b = point_b.strip().replace( | 
| 393 | 
            -
                            similarity = SequenceMatcher( | 
| 394 | 
            -
             | 
| 412 | 
            +
             | 
| 413 | 
            +
                            content_b = point_b.strip().replace("- ", "")
         | 
| 414 | 
            +
                            similarity = SequenceMatcher(
         | 
| 415 | 
            +
                                None, content_a.lower(), content_b.lower()
         | 
| 416 | 
            +
                            ).ratio()
         | 
| 417 | 
            +
             | 
| 395 418 | 
             
                            if similarity >= self.CONSOLIDATION_THRESHOLD:
         | 
| 396 419 | 
             
                                similar_items.append(point_b)
         | 
| 397 420 | 
             
                                similar_indices.add(j)
         | 
| 398 | 
            -
             | 
| 421 | 
            +
             | 
| 399 422 | 
             
                        # Consolidate if we found similar items
         | 
| 400 423 | 
             
                        if len(similar_items) > 1:
         | 
| 401 424 | 
             
                            consolidated_content = self._merge_similar_items(similar_items)
         | 
| @@ -404,220 +427,230 @@ class MemoryOptimizer(LoggerMixin): | |
| 404 427 | 
             
                            self.logger.debug(f"Consolidated {len(similar_items)} similar items")
         | 
| 405 428 | 
             
                        else:
         | 
| 406 429 | 
             
                            consolidated.append(point_a)
         | 
| 407 | 
            -
             | 
| 430 | 
            +
             | 
| 408 431 | 
             
                        used_indices.update(similar_indices)
         | 
| 409 | 
            -
             | 
| 432 | 
            +
             | 
| 410 433 | 
             
                    return consolidated, items_consolidated
         | 
| 411 | 
            -
             | 
| 434 | 
            +
             | 
| 412 435 | 
             
                def _merge_similar_items(self, similar_items: List[str]) -> str:
         | 
| 413 436 | 
             
                    """Merge similar items into a single consolidated item.
         | 
| 414 | 
            -
             | 
| 437 | 
            +
             | 
| 415 438 | 
             
                    Args:
         | 
| 416 439 | 
             
                        similar_items: List of similar bullet points
         | 
| 417 | 
            -
             | 
| 440 | 
            +
             | 
| 418 441 | 
             
                    Returns:
         | 
| 419 442 | 
             
                        Consolidated content string
         | 
| 420 443 | 
             
                    """
         | 
| 421 444 | 
             
                    # Take the longest/most detailed item as base
         | 
| 422 | 
            -
                    contents = [item.strip().replace( | 
| 445 | 
            +
                    contents = [item.strip().replace("- ", "") for item in similar_items]
         | 
| 423 446 | 
             
                    base_content = max(contents, key=len)
         | 
| 424 | 
            -
             | 
| 447 | 
            +
             | 
| 425 448 | 
             
                    # Look for additional details in other items
         | 
| 426 449 | 
             
                    all_words = set()
         | 
| 427 450 | 
             
                    for content in contents:
         | 
| 428 451 | 
             
                        all_words.update(content.lower().split())
         | 
| 429 | 
            -
             | 
| 452 | 
            +
             | 
| 430 453 | 
             
                    base_words = set(base_content.lower().split())
         | 
| 431 454 | 
             
                    additional_words = all_words - base_words
         | 
| 432 | 
            -
             | 
| 455 | 
            +
             | 
| 433 456 | 
             
                    # If there are meaningful additional words, add them
         | 
| 434 457 | 
             
                    if additional_words and len(additional_words) < 5:  # Don't add too much
         | 
| 435 458 | 
             
                        additional_text = " (" + ", ".join(sorted(additional_words)) + ")"
         | 
| 436 459 | 
             
                        return base_content + additional_text
         | 
| 437 | 
            -
             | 
| 460 | 
            +
             | 
| 438 461 | 
             
                    return base_content
         | 
| 439 | 
            -
             | 
| 462 | 
            +
             | 
| 440 463 | 
             
                def _reorder_by_priority(self, bullet_points: List[str]) -> List[str]:
         | 
| 441 464 | 
             
                    """Reorder bullet points by priority/importance.
         | 
| 442 | 
            -
             | 
| 465 | 
            +
             | 
| 443 466 | 
             
                    Args:
         | 
| 444 467 | 
             
                        bullet_points: List of bullet point lines
         | 
| 445 | 
            -
             | 
| 468 | 
            +
             | 
| 446 469 | 
             
                    Returns:
         | 
| 447 470 | 
             
                        Reordered list of bullet points
         | 
| 448 471 | 
             
                    """
         | 
| 472 | 
            +
             | 
| 449 473 | 
             
                    def get_priority_score(point: str) -> int:
         | 
| 450 474 | 
             
                        content = point.lower()
         | 
| 451 475 | 
             
                        score = 0
         | 
| 452 | 
            -
             | 
| 476 | 
            +
             | 
| 453 477 | 
             
                        # High priority keywords
         | 
| 454 | 
            -
                        for keyword in self.PRIORITY_KEYWORDS[ | 
| 478 | 
            +
                        for keyword in self.PRIORITY_KEYWORDS["high"]:
         | 
| 455 479 | 
             
                            if keyword in content:
         | 
| 456 480 | 
             
                                score += 3
         | 
| 457 | 
            -
             | 
| 481 | 
            +
             | 
| 458 482 | 
             
                        # Medium priority keywords
         | 
| 459 | 
            -
                        for keyword in self.PRIORITY_KEYWORDS[ | 
| 483 | 
            +
                        for keyword in self.PRIORITY_KEYWORDS["medium"]:
         | 
| 460 484 | 
             
                            if keyword in content:
         | 
| 461 485 | 
             
                                score += 2
         | 
| 462 | 
            -
             | 
| 486 | 
            +
             | 
| 463 487 | 
             
                        # Low priority keywords
         | 
| 464 | 
            -
                        for keyword in self.PRIORITY_KEYWORDS[ | 
| 488 | 
            +
                        for keyword in self.PRIORITY_KEYWORDS["low"]:
         | 
| 465 489 | 
             
                            if keyword in content:
         | 
| 466 490 | 
             
                                score += 1
         | 
| 467 | 
            -
             | 
| 491 | 
            +
             | 
| 468 492 | 
             
                        # Length-based priority (more detailed items are often more important)
         | 
| 469 493 | 
             
                        if len(content) > 100:
         | 
| 470 494 | 
             
                            score += 1
         | 
| 471 | 
            -
             | 
| 495 | 
            +
             | 
| 472 496 | 
             
                        return score
         | 
| 473 | 
            -
             | 
| 497 | 
            +
             | 
| 474 498 | 
             
                    # Sort by priority score (descending) then alphabetically
         | 
| 475 499 | 
             
                    return sorted(bullet_points, key=lambda x: (-get_priority_score(x), x.lower()))
         | 
| 476 | 
            -
             | 
| 477 | 
            -
                def _rebuild_memory_content( | 
| 500 | 
            +
             | 
| 501 | 
            +
                def _rebuild_memory_content(
         | 
| 502 | 
            +
                    self, sections: Dict[str, List[str]], agent_id: str
         | 
| 503 | 
            +
                ) -> str:
         | 
| 478 504 | 
             
                    """Rebuild memory content from optimized sections.
         | 
| 479 | 
            -
             | 
| 505 | 
            +
             | 
| 480 506 | 
             
                    Args:
         | 
| 481 507 | 
             
                        sections: Dict of section names to content lines
         | 
| 482 508 | 
             
                        agent_id: Agent identifier
         | 
| 483 | 
            -
             | 
| 509 | 
            +
             | 
| 484 510 | 
             
                    Returns:
         | 
| 485 511 | 
             
                        Rebuilt memory content string
         | 
| 486 512 | 
             
                    """
         | 
| 487 513 | 
             
                    content_lines = []
         | 
| 488 | 
            -
             | 
| 514 | 
            +
             | 
| 489 515 | 
             
                    # Add header if it exists
         | 
| 490 | 
            -
                    if  | 
| 491 | 
            -
                        content_lines.extend(sections[ | 
| 492 | 
            -
             | 
| 516 | 
            +
                    if "header" in sections:
         | 
| 517 | 
            +
                        content_lines.extend(sections["header"])
         | 
| 518 | 
            +
             | 
| 493 519 | 
             
                    # Add sections in a logical order
         | 
| 494 520 | 
             
                    section_order = [
         | 
| 495 | 
            -
                         | 
| 496 | 
            -
                         | 
| 497 | 
            -
                         | 
| 498 | 
            -
                         | 
| 499 | 
            -
                         | 
| 500 | 
            -
                         | 
| 501 | 
            -
                         | 
| 502 | 
            -
                         | 
| 503 | 
            -
                         | 
| 504 | 
            -
                         | 
| 521 | 
            +
                        "Project Architecture",
         | 
| 522 | 
            +
                        "Coding Patterns Learned",
         | 
| 523 | 
            +
                        "Implementation Guidelines",
         | 
| 524 | 
            +
                        "Domain-Specific Knowledge",
         | 
| 525 | 
            +
                        "Effective Strategies",
         | 
| 526 | 
            +
                        "Common Mistakes to Avoid",
         | 
| 527 | 
            +
                        "Integration Points",
         | 
| 528 | 
            +
                        "Performance Considerations",
         | 
| 529 | 
            +
                        "Current Technical Context",
         | 
| 530 | 
            +
                        "Recent Learnings",
         | 
| 505 531 | 
             
                    ]
         | 
| 506 | 
            -
             | 
| 532 | 
            +
             | 
| 507 533 | 
             
                    # Add ordered sections
         | 
| 508 534 | 
             
                    for section_name in section_order:
         | 
| 509 | 
            -
                        if section_name in sections and section_name !=  | 
| 510 | 
            -
                            if content_lines and not content_lines[-1].strip() ==  | 
| 511 | 
            -
                                content_lines.append( | 
| 535 | 
            +
                        if section_name in sections and section_name != "header":
         | 
| 536 | 
            +
                            if content_lines and not content_lines[-1].strip() == "":
         | 
| 537 | 
            +
                                content_lines.append("")  # Add spacing
         | 
| 512 538 | 
             
                            content_lines.extend(sections[section_name])
         | 
| 513 | 
            -
             | 
| 539 | 
            +
             | 
| 514 540 | 
             
                    # Add any remaining sections not in the order
         | 
| 515 541 | 
             
                    for section_name, section_content in sections.items():
         | 
| 516 | 
            -
                        if section_name not in section_order and section_name !=  | 
| 517 | 
            -
                            if content_lines and not content_lines[-1].strip() ==  | 
| 518 | 
            -
                                content_lines.append( | 
| 542 | 
            +
                        if section_name not in section_order and section_name != "header":
         | 
| 543 | 
            +
                            if content_lines and not content_lines[-1].strip() == "":
         | 
| 544 | 
            +
                                content_lines.append("")
         | 
| 519 545 | 
             
                            content_lines.extend(section_content)
         | 
| 520 | 
            -
             | 
| 546 | 
            +
             | 
| 521 547 | 
             
                    # Update timestamp
         | 
| 522 | 
            -
                    content =  | 
| 523 | 
            -
                    timestamp = datetime.now().strftime( | 
| 548 | 
            +
                    content = "\n".join(content_lines)
         | 
| 549 | 
            +
                    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
         | 
| 524 550 | 
             
                    content = re.sub(
         | 
| 525 | 
            -
                        r | 
| 526 | 
            -
                        f | 
| 527 | 
            -
                        content
         | 
| 551 | 
            +
                        r"<!-- Last Updated: .+ \| Auto-updated by: .+ -->",
         | 
| 552 | 
            +
                        f"<!-- Last Updated: {timestamp} | Auto-updated by: optimizer -->",
         | 
| 553 | 
            +
                        content,
         | 
| 528 554 | 
             
                    )
         | 
| 529 | 
            -
             | 
| 555 | 
            +
             | 
| 530 556 | 
             
                    return content
         | 
| 531 | 
            -
             | 
| 557 | 
            +
             | 
| 532 558 | 
             
                def _create_backup(self, memory_file: Path) -> Path:
         | 
| 533 559 | 
             
                    """Create backup of memory file before optimization.
         | 
| 534 | 
            -
             | 
| 560 | 
            +
             | 
| 535 561 | 
             
                    Args:
         | 
| 536 562 | 
             
                        memory_file: Path to memory file
         | 
| 537 | 
            -
             | 
| 563 | 
            +
             | 
| 538 564 | 
             
                    Returns:
         | 
| 539 565 | 
             
                        Path to backup file
         | 
| 540 566 | 
             
                    """
         | 
| 541 | 
            -
                    timestamp = datetime.now().strftime( | 
| 567 | 
            +
                    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
         | 
| 542 568 | 
             
                    backup_name = f"{memory_file.stem}_backup_{timestamp}{memory_file.suffix}"
         | 
| 543 569 | 
             
                    backup_path = memory_file.parent / backup_name
         | 
| 544 | 
            -
             | 
| 545 | 
            -
                    backup_path.write_text( | 
| 570 | 
            +
             | 
| 571 | 
            +
                    backup_path.write_text(
         | 
| 572 | 
            +
                        memory_file.read_text(encoding="utf-8"), encoding="utf-8"
         | 
| 573 | 
            +
                    )
         | 
| 546 574 | 
             
                    self.logger.debug(f"Created backup: {backup_path}")
         | 
| 547 | 
            -
             | 
| 575 | 
            +
             | 
| 548 576 | 
             
                    return backup_path
         | 
| 549 | 
            -
             | 
| 577 | 
            +
             | 
| 550 578 | 
             
                def _analyze_single_agent(self, agent_id: str) -> Dict[str, Any]:
         | 
| 551 579 | 
             
                    """Analyze optimization opportunities for a single agent.
         | 
| 552 | 
            -
             | 
| 580 | 
            +
             | 
| 553 581 | 
             
                    Args:
         | 
| 554 582 | 
             
                        agent_id: Agent identifier
         | 
| 555 | 
            -
             | 
| 583 | 
            +
             | 
| 556 584 | 
             
                    Returns:
         | 
| 557 585 | 
             
                        Analysis results
         | 
| 558 586 | 
             
                    """
         | 
| 559 587 | 
             
                    memory_file = self.memories_dir / f"{agent_id}_agent.md"
         | 
| 560 | 
            -
             | 
| 588 | 
            +
             | 
| 561 589 | 
             
                    if not memory_file.exists():
         | 
| 562 590 | 
             
                        return {
         | 
| 563 591 | 
             
                            "success": False,
         | 
| 564 592 | 
             
                            "agent_id": agent_id,
         | 
| 565 | 
            -
                            "error": "Memory file not found"
         | 
| 593 | 
            +
                            "error": "Memory file not found",
         | 
| 566 594 | 
             
                        }
         | 
| 567 | 
            -
             | 
| 568 | 
            -
                    content = memory_file.read_text(encoding= | 
| 595 | 
            +
             | 
| 596 | 
            +
                    content = memory_file.read_text(encoding="utf-8")
         | 
| 569 597 | 
             
                    sections = self._parse_memory_sections(content)
         | 
| 570 | 
            -
             | 
| 598 | 
            +
             | 
| 571 599 | 
             
                    analysis = {
         | 
| 572 600 | 
             
                        "success": True,
         | 
| 573 601 | 
             
                        "agent_id": agent_id,
         | 
| 574 602 | 
             
                        "file_size": len(content),
         | 
| 575 | 
            -
                        "sections": len( | 
| 576 | 
            -
             | 
| 603 | 
            +
                        "sections": len(
         | 
| 604 | 
            +
                            [s for s in sections if not s.lower() in ["header", "metadata"]]
         | 
| 605 | 
            +
                        ),
         | 
| 606 | 
            +
                        "opportunities": [],
         | 
| 577 607 | 
             
                    }
         | 
| 578 | 
            -
             | 
| 608 | 
            +
             | 
| 579 609 | 
             
                    # Analyze each section for opportunities
         | 
| 580 610 | 
             
                    for section_name, items in sections.items():
         | 
| 581 | 
            -
                        if section_name.lower() in [ | 
| 611 | 
            +
                        if section_name.lower() in ["header", "metadata"]:
         | 
| 582 612 | 
             
                            continue
         | 
| 583 | 
            -
             | 
| 584 | 
            -
                        bullet_points = [line for line in items if line.strip().startswith( | 
| 585 | 
            -
             | 
| 613 | 
            +
             | 
| 614 | 
            +
                        bullet_points = [line for line in items if line.strip().startswith("- ")]
         | 
| 615 | 
            +
             | 
| 586 616 | 
             
                        if len(bullet_points) > 1:
         | 
| 587 617 | 
             
                            # Check for duplicates
         | 
| 588 618 | 
             
                            unique_points, duplicates = self._remove_duplicates(bullet_points)
         | 
| 589 619 | 
             
                            if duplicates > 0:
         | 
| 590 | 
            -
                                analysis["opportunities"].append( | 
| 591 | 
            -
             | 
| 620 | 
            +
                                analysis["opportunities"].append(
         | 
| 621 | 
            +
                                    f"{section_name}: {duplicates} duplicate items"
         | 
| 622 | 
            +
                                )
         | 
| 623 | 
            +
             | 
| 592 624 | 
             
                            # Check for similar items
         | 
| 593 | 
            -
                            consolidated, consolidated_count = self._consolidate_similar_items( | 
| 625 | 
            +
                            consolidated, consolidated_count = self._consolidate_similar_items(
         | 
| 626 | 
            +
                                unique_points
         | 
| 627 | 
            +
                            )
         | 
| 594 628 | 
             
                            if consolidated_count > 0:
         | 
| 595 | 
            -
                                analysis["opportunities"].append( | 
| 596 | 
            -
             | 
| 629 | 
            +
                                analysis["opportunities"].append(
         | 
| 630 | 
            +
                                    f"{section_name}: {consolidated_count} items can be consolidated"
         | 
| 631 | 
            +
                                )
         | 
| 632 | 
            +
             | 
| 597 633 | 
             
                    return analysis
         | 
| 598 | 
            -
             | 
| 634 | 
            +
             | 
| 599 635 | 
             
                def _analyze_all_agents(self) -> Dict[str, Any]:
         | 
| 600 636 | 
             
                    """Analyze optimization opportunities for all agents.
         | 
| 601 | 
            -
             | 
| 637 | 
            +
             | 
| 602 638 | 
             
                    Returns:
         | 
| 603 639 | 
             
                        Analysis results for all agents
         | 
| 604 640 | 
             
                    """
         | 
| 605 641 | 
             
                    if not self.memories_dir.exists():
         | 
| 606 | 
            -
                        return {
         | 
| 607 | 
            -
             | 
| 608 | 
            -
                            "error": "Memory directory not found"
         | 
| 609 | 
            -
                        }
         | 
| 610 | 
            -
                    
         | 
| 642 | 
            +
                        return {"success": False, "error": "Memory directory not found"}
         | 
| 643 | 
            +
             | 
| 611 644 | 
             
                    memory_files = list(self.memories_dir.glob("*_agent.md"))
         | 
| 612 645 | 
             
                    agents_analysis = {}
         | 
| 613 | 
            -
             | 
| 646 | 
            +
             | 
| 614 647 | 
             
                    for memory_file in memory_files:
         | 
| 615 | 
            -
                        agent_id = memory_file.stem.replace( | 
| 648 | 
            +
                        agent_id = memory_file.stem.replace("_agent", "")
         | 
| 616 649 | 
             
                        agents_analysis[agent_id] = self._analyze_single_agent(agent_id)
         | 
| 617 | 
            -
             | 
| 650 | 
            +
             | 
| 618 651 | 
             
                    return {
         | 
| 619 652 | 
             
                        "success": True,
         | 
| 620 653 | 
             
                        "timestamp": datetime.now().isoformat(),
         | 
| 621 654 | 
             
                        "agents_analyzed": len(agents_analysis),
         | 
| 622 | 
            -
                        "agents": agents_analysis
         | 
| 623 | 
            -
                    }
         | 
| 655 | 
            +
                        "agents": agents_analysis,
         | 
| 656 | 
            +
                    }
         |