attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
"""Haystack Adapter
|
|
2
|
+
|
|
3
|
+
Creates RAG pipelines and document processing agents using deepset Haystack.
|
|
4
|
+
Best for document QA, search, and NLP workflows.
|
|
5
|
+
|
|
6
|
+
Requires: pip install haystack-ai
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart-AI-Memory
|
|
9
|
+
Licensed under Fair Source License 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from collections.abc import Callable
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from attune_llm.agent_factory.base import (
|
|
17
|
+
AgentCapability,
|
|
18
|
+
AgentConfig,
|
|
19
|
+
AgentRole,
|
|
20
|
+
BaseAdapter,
|
|
21
|
+
BaseAgent,
|
|
22
|
+
BaseWorkflow,
|
|
23
|
+
WorkflowConfig,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# Lazy import
|
|
27
|
+
_haystack_available = None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _check_haystack():
|
|
31
|
+
"""Check if Haystack is available."""
|
|
32
|
+
global _haystack_available
|
|
33
|
+
if _haystack_available is None:
|
|
34
|
+
try:
|
|
35
|
+
import haystack # noqa: F401
|
|
36
|
+
|
|
37
|
+
_haystack_available = True
|
|
38
|
+
except ImportError:
|
|
39
|
+
_haystack_available = False
|
|
40
|
+
return _haystack_available
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class HaystackAgent(BaseAgent):
|
|
44
|
+
"""Agent wrapping a Haystack Pipeline or Component."""
|
|
45
|
+
|
|
46
|
+
def __init__(self, config: AgentConfig, pipeline=None, generator=None):
|
|
47
|
+
super().__init__(config)
|
|
48
|
+
self._pipeline = pipeline
|
|
49
|
+
self._generator = generator
|
|
50
|
+
|
|
51
|
+
async def invoke(self, input_data: str | dict, context: dict | None = None) -> dict:
|
|
52
|
+
"""Invoke the Haystack pipeline/generator."""
|
|
53
|
+
# Format query
|
|
54
|
+
if isinstance(input_data, str):
|
|
55
|
+
query = input_data
|
|
56
|
+
else:
|
|
57
|
+
query = input_data.get("query", input_data.get("input", str(input_data)))
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
if self._pipeline:
|
|
61
|
+
# Run pipeline
|
|
62
|
+
result = self._pipeline.run({"query": query, **(context or {})})
|
|
63
|
+
|
|
64
|
+
# Extract answer
|
|
65
|
+
if "answers" in result:
|
|
66
|
+
answers = result["answers"]
|
|
67
|
+
output = answers[0].data if answers else "No answer found"
|
|
68
|
+
elif "replies" in result:
|
|
69
|
+
output = result["replies"][0] if result["replies"] else "No reply"
|
|
70
|
+
else:
|
|
71
|
+
output = str(result)
|
|
72
|
+
|
|
73
|
+
elif self._generator:
|
|
74
|
+
# Use generator directly
|
|
75
|
+
result = self._generator.run(prompt=query)
|
|
76
|
+
output = result.get("replies", [query])[0]
|
|
77
|
+
|
|
78
|
+
else:
|
|
79
|
+
output = f"[{self.name}] No pipeline configured"
|
|
80
|
+
result = {}
|
|
81
|
+
|
|
82
|
+
self._conversation_history.append({"role": "user", "content": query})
|
|
83
|
+
self._conversation_history.append({"role": "assistant", "content": output})
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
"output": output,
|
|
87
|
+
"metadata": {
|
|
88
|
+
"framework": "haystack",
|
|
89
|
+
"model": self.model,
|
|
90
|
+
"raw_result": result if isinstance(result, dict) else {},
|
|
91
|
+
},
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
return {"output": f"Error: {e}", "metadata": {"error": str(e)}}
|
|
96
|
+
|
|
97
|
+
async def stream(self, input_data: str | dict, context: dict | None = None):
|
|
98
|
+
"""Haystack 2.0 supports streaming for some generators."""
|
|
99
|
+
# Most Haystack components don't stream; yield full result
|
|
100
|
+
result = await self.invoke(input_data, context)
|
|
101
|
+
yield result
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class HaystackWorkflow(BaseWorkflow):
|
|
105
|
+
"""Workflow using Haystack Pipeline."""
|
|
106
|
+
|
|
107
|
+
def __init__(self, config: WorkflowConfig, agents: list[BaseAgent], pipeline=None):
|
|
108
|
+
super().__init__(config, agents)
|
|
109
|
+
self._pipeline = pipeline
|
|
110
|
+
|
|
111
|
+
async def run(self, input_data: str | dict, initial_state: dict | None = None) -> dict:
|
|
112
|
+
"""Run the Haystack pipeline workflow."""
|
|
113
|
+
if isinstance(input_data, str):
|
|
114
|
+
query = input_data
|
|
115
|
+
else:
|
|
116
|
+
query = input_data.get("query", input_data.get("input", str(input_data)))
|
|
117
|
+
|
|
118
|
+
self._state = initial_state or {}
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
if self._pipeline:
|
|
122
|
+
result = self._pipeline.run({"query": query, **self._state})
|
|
123
|
+
|
|
124
|
+
# Extract output
|
|
125
|
+
if "answers" in result:
|
|
126
|
+
output = result["answers"][0].data if result["answers"] else ""
|
|
127
|
+
elif "replies" in result:
|
|
128
|
+
output = result["replies"][0] if result["replies"] else ""
|
|
129
|
+
else:
|
|
130
|
+
output = str(result)
|
|
131
|
+
|
|
132
|
+
self._state.update(result)
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
"output": output,
|
|
136
|
+
"state": self._state,
|
|
137
|
+
"metadata": {"framework": "haystack"},
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
# Fallback to sequential agent execution
|
|
141
|
+
return await self._run_sequential(input_data)
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
return {"output": f"Error: {e}", "error": str(e)}
|
|
145
|
+
|
|
146
|
+
async def _run_sequential(self, input_data: str | dict) -> dict:
|
|
147
|
+
"""Fallback sequential execution."""
|
|
148
|
+
current = input_data
|
|
149
|
+
results = []
|
|
150
|
+
for agent in self.agents.values():
|
|
151
|
+
result = await agent.invoke(current)
|
|
152
|
+
results.append(result)
|
|
153
|
+
current = result.get("output", current)
|
|
154
|
+
return {"output": current, "results": results}
|
|
155
|
+
|
|
156
|
+
async def stream(self, input_data: str | dict, initial_state: dict | None = None):
|
|
157
|
+
"""Stream workflow execution."""
|
|
158
|
+
result = await self.run(input_data, initial_state)
|
|
159
|
+
yield result
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class HaystackAdapter(BaseAdapter):
|
|
163
|
+
"""Adapter for deepset Haystack framework."""
|
|
164
|
+
|
|
165
|
+
def __init__(self, provider: str = "anthropic", api_key: str | None = None):
|
|
166
|
+
self.provider = provider
|
|
167
|
+
self.api_key = api_key or os.getenv(
|
|
168
|
+
"ANTHROPIC_API_KEY" if provider == "anthropic" else "OPENAI_API_KEY",
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
@property
|
|
172
|
+
def framework_name(self) -> str:
|
|
173
|
+
return "haystack"
|
|
174
|
+
|
|
175
|
+
def is_available(self) -> bool:
|
|
176
|
+
return bool(_check_haystack())
|
|
177
|
+
|
|
178
|
+
def _get_generator(self, config: AgentConfig):
|
|
179
|
+
"""Get Haystack generator based on provider."""
|
|
180
|
+
if not self.is_available():
|
|
181
|
+
raise ImportError("Haystack not installed. Run: pip install haystack-ai")
|
|
182
|
+
|
|
183
|
+
model_id = config.model_override or self.get_model_for_tier(
|
|
184
|
+
config.model_tier,
|
|
185
|
+
self.provider,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if self.provider == "anthropic":
|
|
189
|
+
from haystack_integrations.components.generators.anthropic import AnthropicChatGenerator
|
|
190
|
+
|
|
191
|
+
return AnthropicChatGenerator(
|
|
192
|
+
model=model_id,
|
|
193
|
+
api_key=self.api_key,
|
|
194
|
+
generation_kwargs={
|
|
195
|
+
"temperature": config.temperature,
|
|
196
|
+
"max_tokens": config.max_tokens,
|
|
197
|
+
},
|
|
198
|
+
)
|
|
199
|
+
if self.provider == "openai":
|
|
200
|
+
from haystack.components.generators import OpenAIGenerator
|
|
201
|
+
|
|
202
|
+
return OpenAIGenerator(
|
|
203
|
+
model=model_id,
|
|
204
|
+
api_key=self.api_key,
|
|
205
|
+
generation_kwargs={
|
|
206
|
+
"temperature": config.temperature,
|
|
207
|
+
"max_tokens": config.max_tokens,
|
|
208
|
+
},
|
|
209
|
+
)
|
|
210
|
+
raise ValueError(f"Unsupported provider for Haystack: {self.provider}")
|
|
211
|
+
|
|
212
|
+
def create_agent(self, config: AgentConfig) -> HaystackAgent:
|
|
213
|
+
"""Create a Haystack agent."""
|
|
214
|
+
if not self.is_available():
|
|
215
|
+
raise ImportError("Haystack not installed")
|
|
216
|
+
|
|
217
|
+
# For RAG roles, create a full pipeline
|
|
218
|
+
if config.role in [AgentRole.RETRIEVER, AgentRole.ANSWERER, AgentRole.SUMMARIZER]:
|
|
219
|
+
if AgentCapability.RETRIEVAL in config.capabilities:
|
|
220
|
+
pipeline = self._create_rag_pipeline(config)
|
|
221
|
+
return HaystackAgent(config, pipeline=pipeline)
|
|
222
|
+
|
|
223
|
+
# For other roles, just use a generator
|
|
224
|
+
generator = self._get_generator(config)
|
|
225
|
+
return HaystackAgent(config, generator=generator)
|
|
226
|
+
|
|
227
|
+
def _create_rag_pipeline(self, config: AgentConfig):
|
|
228
|
+
"""Create a RAG pipeline."""
|
|
229
|
+
from haystack import Pipeline
|
|
230
|
+
from haystack.components.builders import PromptBuilder
|
|
231
|
+
|
|
232
|
+
pipeline = Pipeline()
|
|
233
|
+
|
|
234
|
+
# Add prompt builder
|
|
235
|
+
template = (
|
|
236
|
+
config.system_prompt
|
|
237
|
+
or """
|
|
238
|
+
Answer the question based on the context.
|
|
239
|
+
|
|
240
|
+
Context: {{context}}
|
|
241
|
+
Question: {{query}}
|
|
242
|
+
Answer:
|
|
243
|
+
"""
|
|
244
|
+
)
|
|
245
|
+
prompt_builder = PromptBuilder(template=template)
|
|
246
|
+
pipeline.add_component("prompt_builder", prompt_builder)
|
|
247
|
+
|
|
248
|
+
# Add generator
|
|
249
|
+
generator = self._get_generator(config)
|
|
250
|
+
pipeline.add_component("generator", generator)
|
|
251
|
+
|
|
252
|
+
# Connect components
|
|
253
|
+
pipeline.connect("prompt_builder", "generator")
|
|
254
|
+
|
|
255
|
+
return pipeline
|
|
256
|
+
|
|
257
|
+
def create_workflow(self, config: WorkflowConfig, agents: list[BaseAgent]) -> HaystackWorkflow:
|
|
258
|
+
"""Create a Haystack Pipeline workflow."""
|
|
259
|
+
if not self.is_available():
|
|
260
|
+
raise ImportError("Haystack not installed")
|
|
261
|
+
|
|
262
|
+
from haystack import Pipeline
|
|
263
|
+
|
|
264
|
+
# Build a pipeline from agents
|
|
265
|
+
pipeline = Pipeline()
|
|
266
|
+
|
|
267
|
+
# For now, create a simple sequential pipeline
|
|
268
|
+
# More complex routing would require custom components
|
|
269
|
+
|
|
270
|
+
for _i, agent in enumerate(agents):
|
|
271
|
+
if hasattr(agent, "_generator") and agent._generator:
|
|
272
|
+
pipeline.add_component(f"agent_{agent.name}", agent._generator)
|
|
273
|
+
|
|
274
|
+
# Connect sequentially (simplified)
|
|
275
|
+
# Real implementation would need proper input/output mapping
|
|
276
|
+
|
|
277
|
+
return HaystackWorkflow(config, agents, pipeline=pipeline)
|
|
278
|
+
|
|
279
|
+
def create_tool(
|
|
280
|
+
self,
|
|
281
|
+
name: str,
|
|
282
|
+
description: str,
|
|
283
|
+
func: Callable,
|
|
284
|
+
args_schema: dict | None = None,
|
|
285
|
+
) -> dict:
|
|
286
|
+
"""Create a tool for Haystack (custom component would be needed)."""
|
|
287
|
+
return {"name": name, "description": description, "func": func, "args_schema": args_schema}
|
|
288
|
+
|
|
289
|
+
def create_document_store(self, store_type: str = "in_memory") -> Any:
|
|
290
|
+
"""Create a Haystack document store."""
|
|
291
|
+
if not self.is_available():
|
|
292
|
+
raise ImportError("Haystack not installed")
|
|
293
|
+
|
|
294
|
+
if store_type == "in_memory":
|
|
295
|
+
from haystack.document_stores.in_memory import InMemoryDocumentStore
|
|
296
|
+
|
|
297
|
+
return InMemoryDocumentStore()
|
|
298
|
+
raise ValueError(f"Unsupported store type: {store_type}")
|
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
"""LangChain Adapter
|
|
2
|
+
|
|
3
|
+
Creates agents using LangChain's primitives while integrating
|
|
4
|
+
with Empathy's cost optimization and pattern learning.
|
|
5
|
+
|
|
6
|
+
Requires: pip install langchain langchain-anthropic langchain-openai
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart-AI-Memory
|
|
9
|
+
Licensed under Fair Source License 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
from collections.abc import Callable
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from pydantic import SecretStr
|
|
17
|
+
|
|
18
|
+
from attune_llm.agent_factory.base import (
|
|
19
|
+
AgentCapability,
|
|
20
|
+
AgentConfig,
|
|
21
|
+
BaseAdapter,
|
|
22
|
+
BaseAgent,
|
|
23
|
+
BaseWorkflow,
|
|
24
|
+
WorkflowConfig,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Lazy imports for LangChain
|
|
28
|
+
_langchain_available = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _check_langchain():
|
|
32
|
+
"""Check if LangChain is available."""
|
|
33
|
+
global _langchain_available
|
|
34
|
+
if _langchain_available is None:
|
|
35
|
+
try:
|
|
36
|
+
import langchain # noqa: F401
|
|
37
|
+
|
|
38
|
+
_langchain_available = True
|
|
39
|
+
except ImportError:
|
|
40
|
+
_langchain_available = False
|
|
41
|
+
return _langchain_available
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class LangChainAgent(BaseAgent):
|
|
45
|
+
"""Agent wrapping a LangChain chain or agent."""
|
|
46
|
+
|
|
47
|
+
def __init__(self, config: AgentConfig, chain=None, agent_executor=None):
|
|
48
|
+
super().__init__(config)
|
|
49
|
+
self._chain = chain
|
|
50
|
+
self._agent_executor = agent_executor
|
|
51
|
+
self._runnable = agent_executor or chain
|
|
52
|
+
|
|
53
|
+
async def invoke(self, input_data: str | dict, context: dict | None = None) -> dict:
|
|
54
|
+
"""Invoke the LangChain agent/chain."""
|
|
55
|
+
if not self._runnable:
|
|
56
|
+
return {"output": "No LangChain runnable configured", "metadata": {}}
|
|
57
|
+
|
|
58
|
+
# Format input
|
|
59
|
+
invoke_input: dict[str, Any] = {}
|
|
60
|
+
if isinstance(input_data, str):
|
|
61
|
+
invoke_input = {"input": input_data}
|
|
62
|
+
else:
|
|
63
|
+
invoke_input = dict(input_data)
|
|
64
|
+
|
|
65
|
+
# Add context
|
|
66
|
+
if context:
|
|
67
|
+
invoke_input["context"] = context
|
|
68
|
+
|
|
69
|
+
# Add conversation history if memory enabled
|
|
70
|
+
if self.config.memory_enabled and self._conversation_history:
|
|
71
|
+
invoke_input["chat_history"] = self._conversation_history[-10:]
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
# Use ainvoke for async
|
|
75
|
+
if hasattr(self._runnable, "ainvoke"):
|
|
76
|
+
result = await self._runnable.ainvoke(invoke_input)
|
|
77
|
+
else:
|
|
78
|
+
# Fallback to sync
|
|
79
|
+
result = self._runnable.invoke(invoke_input)
|
|
80
|
+
|
|
81
|
+
# Extract output
|
|
82
|
+
if isinstance(result, dict):
|
|
83
|
+
output = result.get("output", result.get("answer", str(result)))
|
|
84
|
+
else:
|
|
85
|
+
output = str(result)
|
|
86
|
+
|
|
87
|
+
# Track conversation
|
|
88
|
+
user_msg = invoke_input.get("input", str(input_data))
|
|
89
|
+
self._conversation_history.append({"role": "user", "content": user_msg})
|
|
90
|
+
self._conversation_history.append({"role": "assistant", "content": output})
|
|
91
|
+
|
|
92
|
+
return {
|
|
93
|
+
"output": output,
|
|
94
|
+
"metadata": {
|
|
95
|
+
"model": self.model,
|
|
96
|
+
"framework": "langchain",
|
|
97
|
+
"raw_result": result if isinstance(result, dict) else None,
|
|
98
|
+
},
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
return {
|
|
103
|
+
"output": f"Error: {e!s}",
|
|
104
|
+
"metadata": {"error": str(e), "framework": "langchain"},
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
async def stream(self, input_data: str | dict, context: dict | None = None):
|
|
108
|
+
"""Stream LangChain response."""
|
|
109
|
+
if not self._runnable:
|
|
110
|
+
yield {"output": "No LangChain runnable configured", "metadata": {}}
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
# Format input
|
|
114
|
+
if isinstance(input_data, str):
|
|
115
|
+
invoke_input = {"input": input_data}
|
|
116
|
+
else:
|
|
117
|
+
invoke_input = input_data
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
if hasattr(self._runnable, "astream"):
|
|
121
|
+
async for chunk in self._runnable.astream(invoke_input):
|
|
122
|
+
if isinstance(chunk, dict):
|
|
123
|
+
yield chunk
|
|
124
|
+
else:
|
|
125
|
+
yield {"chunk": str(chunk)}
|
|
126
|
+
else:
|
|
127
|
+
# Fallback to non-streaming
|
|
128
|
+
result = await self.invoke(input_data, context)
|
|
129
|
+
yield result
|
|
130
|
+
except Exception as e:
|
|
131
|
+
yield {"error": str(e)}
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class LangChainWorkflow(BaseWorkflow):
|
|
135
|
+
"""Workflow using LangChain's SequentialChain or custom routing."""
|
|
136
|
+
|
|
137
|
+
def __init__(self, config: WorkflowConfig, agents: list[BaseAgent], chain=None):
|
|
138
|
+
super().__init__(config, agents)
|
|
139
|
+
self._chain = chain
|
|
140
|
+
|
|
141
|
+
async def run(self, input_data: str | dict, initial_state: dict | None = None) -> dict:
|
|
142
|
+
"""Run the LangChain workflow."""
|
|
143
|
+
self._state = initial_state or {}
|
|
144
|
+
|
|
145
|
+
if self._chain:
|
|
146
|
+
# Use the composed chain
|
|
147
|
+
try:
|
|
148
|
+
if hasattr(self._chain, "ainvoke"):
|
|
149
|
+
result = await self._chain.ainvoke(input_data)
|
|
150
|
+
else:
|
|
151
|
+
result = self._chain.invoke(input_data)
|
|
152
|
+
|
|
153
|
+
output = (
|
|
154
|
+
result.get("output", str(result)) if isinstance(result, dict) else str(result)
|
|
155
|
+
)
|
|
156
|
+
return {"output": output, "results": [result], "state": self._state}
|
|
157
|
+
except Exception as e:
|
|
158
|
+
return {"output": f"Error: {e}", "error": str(e)}
|
|
159
|
+
else:
|
|
160
|
+
# Fallback to sequential agent execution
|
|
161
|
+
results = []
|
|
162
|
+
current_input = input_data
|
|
163
|
+
|
|
164
|
+
for agent in self.agents.values():
|
|
165
|
+
result = await agent.invoke(current_input, {"state": self._state})
|
|
166
|
+
result["agent"] = agent.name
|
|
167
|
+
results.append(result)
|
|
168
|
+
current_input = result["output"]
|
|
169
|
+
|
|
170
|
+
return {
|
|
171
|
+
"output": results[-1]["output"] if results else "",
|
|
172
|
+
"results": results,
|
|
173
|
+
"state": self._state,
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
async def stream(self, input_data: str | dict, initial_state: dict | None = None):
|
|
177
|
+
"""Stream workflow execution."""
|
|
178
|
+
for agent in self.agents.values():
|
|
179
|
+
yield {"event": "agent_start", "agent": agent.name}
|
|
180
|
+
stream_gen = agent.stream(input_data)
|
|
181
|
+
async for chunk in stream_gen: # type: ignore[attr-defined]
|
|
182
|
+
yield {"event": "chunk", "agent": agent.name, "data": chunk}
|
|
183
|
+
yield {"event": "agent_end", "agent": agent.name}
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class LangChainAdapter(BaseAdapter):
|
|
187
|
+
"""Adapter for LangChain framework."""
|
|
188
|
+
|
|
189
|
+
def __init__(self, provider: str = "anthropic", api_key: str | None = None):
|
|
190
|
+
"""Initialize LangChain adapter.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
provider: LLM provider (anthropic, openai)
|
|
194
|
+
api_key: API key (uses env var if not provided)
|
|
195
|
+
|
|
196
|
+
"""
|
|
197
|
+
self.provider = provider
|
|
198
|
+
self.api_key = api_key or os.getenv(
|
|
199
|
+
"ANTHROPIC_API_KEY" if provider == "anthropic" else "OPENAI_API_KEY",
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
@property
|
|
203
|
+
def framework_name(self) -> str:
|
|
204
|
+
return "langchain"
|
|
205
|
+
|
|
206
|
+
def is_available(self) -> bool:
|
|
207
|
+
"""Check if LangChain is installed."""
|
|
208
|
+
return bool(_check_langchain())
|
|
209
|
+
|
|
210
|
+
def _get_llm(self, config: AgentConfig):
|
|
211
|
+
"""Get LangChain LLM based on config."""
|
|
212
|
+
if not self.is_available():
|
|
213
|
+
raise ImportError(
|
|
214
|
+
"LangChain not installed. Run: pip install langchain langchain-anthropic",
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
model_id = config.model_override or self.get_model_for_tier(
|
|
218
|
+
config.model_tier,
|
|
219
|
+
self.provider,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
if self.provider == "anthropic":
|
|
223
|
+
from langchain_anthropic import ChatAnthropic
|
|
224
|
+
|
|
225
|
+
# LangChain API varies between versions - use type: ignore for flexibility
|
|
226
|
+
# ChatAnthropic requires api_key as SecretStr (not None)
|
|
227
|
+
if not self.api_key:
|
|
228
|
+
raise ValueError("API key required for Anthropic provider")
|
|
229
|
+
return ChatAnthropic( # type: ignore[call-arg]
|
|
230
|
+
model=model_id,
|
|
231
|
+
api_key=SecretStr(self.api_key),
|
|
232
|
+
temperature=config.temperature,
|
|
233
|
+
max_tokens_to_sample=config.max_tokens,
|
|
234
|
+
)
|
|
235
|
+
if self.provider == "openai":
|
|
236
|
+
from langchain_openai import ChatOpenAI
|
|
237
|
+
|
|
238
|
+
return ChatOpenAI(
|
|
239
|
+
model=model_id,
|
|
240
|
+
api_key=self.api_key,
|
|
241
|
+
temperature=config.temperature,
|
|
242
|
+
max_tokens=config.max_tokens,
|
|
243
|
+
)
|
|
244
|
+
raise ValueError(f"Unsupported provider for LangChain: {self.provider}")
|
|
245
|
+
|
|
246
|
+
def create_agent(self, config: AgentConfig) -> LangChainAgent:
|
|
247
|
+
"""Create a LangChain-based agent."""
|
|
248
|
+
if not self.is_available():
|
|
249
|
+
raise ImportError("LangChain not installed")
|
|
250
|
+
|
|
251
|
+
# Import from langchain or langgraph depending on version
|
|
252
|
+
# In langchain 1.x, these moved to different locations
|
|
253
|
+
try:
|
|
254
|
+
# Try langchain 1.x imports first
|
|
255
|
+
from langchain.agents import create_tool_calling_agent # type: ignore[attr-defined]
|
|
256
|
+
from langchain.agents.agent import AgentExecutor
|
|
257
|
+
except (ImportError, AttributeError):
|
|
258
|
+
# Fall back to langgraph for newer versions
|
|
259
|
+
AgentExecutor: Any = None # type: ignore[no-redef]
|
|
260
|
+
try:
|
|
261
|
+
from langgraph.prebuilt import create_react_agent
|
|
262
|
+
|
|
263
|
+
create_tool_calling_agent: Any = create_react_agent # type: ignore[no-redef]
|
|
264
|
+
except ImportError:
|
|
265
|
+
create_tool_calling_agent = None
|
|
266
|
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
267
|
+
|
|
268
|
+
llm = self._get_llm(config)
|
|
269
|
+
|
|
270
|
+
# Build system prompt
|
|
271
|
+
system_prompt = config.system_prompt or self._default_system_prompt(config)
|
|
272
|
+
|
|
273
|
+
# Create prompt template
|
|
274
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
275
|
+
[
|
|
276
|
+
("system", system_prompt),
|
|
277
|
+
MessagesPlaceholder(variable_name="chat_history", optional=True),
|
|
278
|
+
("human", "{input}"),
|
|
279
|
+
MessagesPlaceholder(variable_name="agent_scratchpad", optional=True),
|
|
280
|
+
],
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Convert tools to LangChain format
|
|
284
|
+
lc_tools = []
|
|
285
|
+
for tool in config.tools:
|
|
286
|
+
lc_tool = self._convert_tool(tool)
|
|
287
|
+
if lc_tool:
|
|
288
|
+
lc_tools.append(lc_tool)
|
|
289
|
+
|
|
290
|
+
if lc_tools and AgentCapability.TOOL_USE in config.capabilities:
|
|
291
|
+
# Create tool-calling agent
|
|
292
|
+
agent = create_tool_calling_agent(llm, lc_tools, prompt)
|
|
293
|
+
executor = AgentExecutor(agent=agent, tools=lc_tools, verbose=False)
|
|
294
|
+
return LangChainAgent(config, agent_executor=executor)
|
|
295
|
+
# Create simple chain (prompt | llm)
|
|
296
|
+
chain = prompt | llm
|
|
297
|
+
return LangChainAgent(config, chain=chain)
|
|
298
|
+
|
|
299
|
+
def create_workflow(self, config: WorkflowConfig, agents: list[BaseAgent]) -> LangChainWorkflow:
|
|
300
|
+
"""Create a LangChain workflow."""
|
|
301
|
+
# For sequential mode, we can compose chains
|
|
302
|
+
# For more complex modes, just wrap the agents
|
|
303
|
+
return LangChainWorkflow(config, agents)
|
|
304
|
+
|
|
305
|
+
def create_tool(
|
|
306
|
+
self,
|
|
307
|
+
name: str,
|
|
308
|
+
description: str,
|
|
309
|
+
func: Callable,
|
|
310
|
+
args_schema: dict | None = None,
|
|
311
|
+
) -> Any:
|
|
312
|
+
"""Create a LangChain tool."""
|
|
313
|
+
if not self.is_available():
|
|
314
|
+
return super().create_tool(name, description, func, args_schema)
|
|
315
|
+
|
|
316
|
+
from langchain_core.tools import StructuredTool
|
|
317
|
+
|
|
318
|
+
return StructuredTool.from_function(func=func, name=name, description=description)
|
|
319
|
+
|
|
320
|
+
def _convert_tool(self, tool: Any) -> Any:
|
|
321
|
+
"""Convert a tool to LangChain format."""
|
|
322
|
+
if not self.is_available():
|
|
323
|
+
return None
|
|
324
|
+
|
|
325
|
+
# If already a LangChain tool, return as-is
|
|
326
|
+
try:
|
|
327
|
+
from langchain_core.tools import BaseTool
|
|
328
|
+
|
|
329
|
+
if isinstance(tool, BaseTool):
|
|
330
|
+
return tool
|
|
331
|
+
except ImportError:
|
|
332
|
+
pass
|
|
333
|
+
|
|
334
|
+
# If dict, convert to StructuredTool
|
|
335
|
+
if isinstance(tool, dict):
|
|
336
|
+
return self.create_tool(
|
|
337
|
+
name=tool.get("name", "tool"),
|
|
338
|
+
description=tool.get("description", ""),
|
|
339
|
+
func=tool.get("func", lambda x: x),
|
|
340
|
+
args_schema=tool.get("args_schema"),
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
return None
|
|
344
|
+
|
|
345
|
+
def _default_system_prompt(self, config: AgentConfig) -> str:
|
|
346
|
+
"""Generate default system prompt based on role."""
|
|
347
|
+
role_prompts = {
|
|
348
|
+
"researcher": "You are a thorough researcher. Gather information and cite sources.",
|
|
349
|
+
"writer": "You are a skilled writer. Create clear, engaging content.",
|
|
350
|
+
"reviewer": "You are a critical reviewer. Provide constructive feedback.",
|
|
351
|
+
"editor": "You are an experienced editor. Refine and improve content.",
|
|
352
|
+
"debugger": "You are an expert debugger. Analyze code issues systematically.",
|
|
353
|
+
"security": "You are a security analyst. Identify vulnerabilities and risks.",
|
|
354
|
+
"coordinator": "You coordinate a team of agents. Delegate and synthesize results.",
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
base = role_prompts.get(config.role.value, f"You are a helpful {config.role.value} agent.")
|
|
358
|
+
|
|
359
|
+
if config.description:
|
|
360
|
+
base = f"{base}\n\n{config.description}"
|
|
361
|
+
|
|
362
|
+
return base
|