attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
"""LangGraph Adapter
|
|
2
|
+
|
|
3
|
+
Creates stateful multi-agent workflows using LangGraph's graph primitives.
|
|
4
|
+
Best for complex workflows with cycles, conditional routing, and state management.
|
|
5
|
+
|
|
6
|
+
Requires: pip install langgraph langchain-anthropic
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart-AI-Memory
|
|
9
|
+
Licensed under Fair Source License 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import os
|
|
14
|
+
from collections.abc import Callable
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
from pydantic import SecretStr
|
|
18
|
+
|
|
19
|
+
from attune_llm.agent_factory.base import (
|
|
20
|
+
AgentConfig,
|
|
21
|
+
BaseAdapter,
|
|
22
|
+
BaseAgent,
|
|
23
|
+
BaseWorkflow,
|
|
24
|
+
WorkflowConfig,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Lazy imports
|
|
28
|
+
_langgraph_available = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _check_langgraph():
|
|
32
|
+
"""Check if LangGraph is available."""
|
|
33
|
+
global _langgraph_available
|
|
34
|
+
if _langgraph_available is None:
|
|
35
|
+
try:
|
|
36
|
+
import langgraph # noqa: F401
|
|
37
|
+
|
|
38
|
+
_langgraph_available = True
|
|
39
|
+
except ImportError:
|
|
40
|
+
_langgraph_available = False
|
|
41
|
+
return _langgraph_available
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class LangGraphAgent(BaseAgent):
|
|
45
|
+
"""Agent wrapping a LangGraph node/runnable."""
|
|
46
|
+
|
|
47
|
+
def __init__(self, config: AgentConfig, runnable=None, node_func=None):
|
|
48
|
+
super().__init__(config)
|
|
49
|
+
self._runnable = runnable
|
|
50
|
+
self._node_func = node_func
|
|
51
|
+
|
|
52
|
+
async def invoke(self, input_data: str | dict, context: dict | None = None) -> dict:
|
|
53
|
+
"""Invoke the agent."""
|
|
54
|
+
# Format as state dict
|
|
55
|
+
if isinstance(input_data, str):
|
|
56
|
+
state = {"messages": [{"role": "user", "content": input_data}]}
|
|
57
|
+
elif isinstance(input_data, dict):
|
|
58
|
+
state = input_data.copy()
|
|
59
|
+
else:
|
|
60
|
+
state = {"input": input_data}
|
|
61
|
+
|
|
62
|
+
# Merge context into state
|
|
63
|
+
if context:
|
|
64
|
+
state.update(context)
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
if self._runnable:
|
|
68
|
+
if hasattr(self._runnable, "ainvoke"):
|
|
69
|
+
result = await self._runnable.ainvoke(state)
|
|
70
|
+
else:
|
|
71
|
+
result = self._runnable.invoke(state)
|
|
72
|
+
elif self._node_func:
|
|
73
|
+
result = (
|
|
74
|
+
await self._node_func(state)
|
|
75
|
+
if asyncio.iscoroutinefunction(self._node_func)
|
|
76
|
+
else self._node_func(state)
|
|
77
|
+
)
|
|
78
|
+
else:
|
|
79
|
+
result = {"output": f"[{self.name}] No runnable configured"}
|
|
80
|
+
|
|
81
|
+
# Extract output from messages or output key
|
|
82
|
+
if isinstance(result, dict):
|
|
83
|
+
if result.get("messages"):
|
|
84
|
+
output = result["messages"][-1].get("content", str(result["messages"][-1]))
|
|
85
|
+
else:
|
|
86
|
+
output = result.get("output", str(result))
|
|
87
|
+
else:
|
|
88
|
+
output = str(result)
|
|
89
|
+
|
|
90
|
+
self._conversation_history.append({"role": "user", "content": str(input_data)})
|
|
91
|
+
self._conversation_history.append({"role": "assistant", "content": output})
|
|
92
|
+
|
|
93
|
+
return {
|
|
94
|
+
"output": output,
|
|
95
|
+
"state": result if isinstance(result, dict) else {},
|
|
96
|
+
"metadata": {"framework": "langgraph", "model": self.model},
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
except Exception as e:
|
|
100
|
+
return {"output": f"Error: {e}", "metadata": {"error": str(e)}}
|
|
101
|
+
|
|
102
|
+
async def stream(self, input_data: str | dict, context: dict | None = None):
|
|
103
|
+
"""Stream agent response."""
|
|
104
|
+
if self._runnable and hasattr(self._runnable, "astream"):
|
|
105
|
+
state = (
|
|
106
|
+
{"messages": [{"role": "user", "content": input_data}]}
|
|
107
|
+
if isinstance(input_data, str)
|
|
108
|
+
else input_data
|
|
109
|
+
)
|
|
110
|
+
async for chunk in self._runnable.astream(state):
|
|
111
|
+
yield chunk
|
|
112
|
+
else:
|
|
113
|
+
result = await self.invoke(input_data, context)
|
|
114
|
+
yield result
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class LangGraphWorkflow(BaseWorkflow):
|
|
118
|
+
"""Workflow using LangGraph's StateGraph."""
|
|
119
|
+
|
|
120
|
+
def __init__(self, config: WorkflowConfig, agents: list[BaseAgent], graph=None):
|
|
121
|
+
super().__init__(config, agents)
|
|
122
|
+
self._graph = graph
|
|
123
|
+
self._compiled = None
|
|
124
|
+
|
|
125
|
+
def _compile_graph(self):
|
|
126
|
+
"""Compile the graph if not already compiled."""
|
|
127
|
+
if self._compiled is None and self._graph:
|
|
128
|
+
self._compiled = self._graph.compile()
|
|
129
|
+
return self._compiled
|
|
130
|
+
|
|
131
|
+
async def run(self, input_data: str | dict, initial_state: dict | None = None) -> dict:
|
|
132
|
+
"""Run the LangGraph workflow."""
|
|
133
|
+
compiled = self._compile_graph()
|
|
134
|
+
|
|
135
|
+
# Prepare input state
|
|
136
|
+
if isinstance(input_data, str):
|
|
137
|
+
state = {"messages": [{"role": "user", "content": input_data}]}
|
|
138
|
+
else:
|
|
139
|
+
state = input_data.copy()
|
|
140
|
+
|
|
141
|
+
if initial_state:
|
|
142
|
+
state.update(initial_state)
|
|
143
|
+
|
|
144
|
+
self._state = state
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
if compiled:
|
|
148
|
+
if hasattr(compiled, "ainvoke"):
|
|
149
|
+
result = await compiled.ainvoke(state)
|
|
150
|
+
else:
|
|
151
|
+
result = compiled.invoke(state)
|
|
152
|
+
else:
|
|
153
|
+
# Fallback to sequential
|
|
154
|
+
result = await self._run_sequential(input_data)
|
|
155
|
+
|
|
156
|
+
# Extract output
|
|
157
|
+
if isinstance(result, dict):
|
|
158
|
+
if result.get("messages"):
|
|
159
|
+
output = result["messages"][-1].get("content", "")
|
|
160
|
+
else:
|
|
161
|
+
output = result.get("output", str(result))
|
|
162
|
+
self._state = result
|
|
163
|
+
else:
|
|
164
|
+
output = str(result)
|
|
165
|
+
|
|
166
|
+
return {"output": output, "state": self._state, "metadata": {"framework": "langgraph"}}
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
return {"output": f"Error: {e}", "error": str(e)}
|
|
170
|
+
|
|
171
|
+
async def _run_sequential(self, input_data: str | dict) -> dict:
|
|
172
|
+
"""Fallback sequential execution."""
|
|
173
|
+
current = input_data
|
|
174
|
+
for agent in self.agents.values():
|
|
175
|
+
result = await agent.invoke(current)
|
|
176
|
+
current = result.get("output", result)
|
|
177
|
+
return {"output": current, "messages": []}
|
|
178
|
+
|
|
179
|
+
async def stream(self, input_data: str | dict, initial_state: dict | None = None):
|
|
180
|
+
"""Stream workflow execution."""
|
|
181
|
+
compiled = self._compile_graph()
|
|
182
|
+
|
|
183
|
+
if isinstance(input_data, str):
|
|
184
|
+
state = {"messages": [{"role": "user", "content": input_data}]}
|
|
185
|
+
else:
|
|
186
|
+
state = input_data.copy()
|
|
187
|
+
|
|
188
|
+
if compiled and hasattr(compiled, "astream"):
|
|
189
|
+
async for event in compiled.astream(state):
|
|
190
|
+
yield event
|
|
191
|
+
else:
|
|
192
|
+
result = await self.run(input_data, initial_state)
|
|
193
|
+
yield result
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class LangGraphAdapter(BaseAdapter):
|
|
197
|
+
"""Adapter for LangGraph framework."""
|
|
198
|
+
|
|
199
|
+
def __init__(self, provider: str = "anthropic", api_key: str | None = None):
|
|
200
|
+
self.provider = provider
|
|
201
|
+
self.api_key = api_key or os.getenv(
|
|
202
|
+
"ANTHROPIC_API_KEY" if provider == "anthropic" else "OPENAI_API_KEY",
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
@property
|
|
206
|
+
def framework_name(self) -> str:
|
|
207
|
+
return "langgraph"
|
|
208
|
+
|
|
209
|
+
def is_available(self) -> bool:
|
|
210
|
+
return bool(_check_langgraph())
|
|
211
|
+
|
|
212
|
+
def _get_llm(self, config: AgentConfig) -> Any:
|
|
213
|
+
"""Get LangChain LLM for use in LangGraph."""
|
|
214
|
+
model_id = config.model_override or self.get_model_for_tier(
|
|
215
|
+
config.model_tier,
|
|
216
|
+
self.provider,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if self.provider == "anthropic":
|
|
220
|
+
from langchain_anthropic import ChatAnthropic
|
|
221
|
+
|
|
222
|
+
# LangChain API varies between versions - use type: ignore for flexibility
|
|
223
|
+
# ChatAnthropic requires api_key as SecretStr (not None)
|
|
224
|
+
if not self.api_key:
|
|
225
|
+
raise ValueError("API key required for Anthropic provider")
|
|
226
|
+
return ChatAnthropic( # type: ignore[call-arg]
|
|
227
|
+
model=model_id,
|
|
228
|
+
api_key=SecretStr(self.api_key),
|
|
229
|
+
temperature=config.temperature,
|
|
230
|
+
max_tokens_to_sample=config.max_tokens, # Anthropic uses max_tokens_to_sample
|
|
231
|
+
)
|
|
232
|
+
if self.provider == "openai":
|
|
233
|
+
from langchain_openai import ChatOpenAI
|
|
234
|
+
|
|
235
|
+
return ChatOpenAI(
|
|
236
|
+
model=model_id,
|
|
237
|
+
api_key=self.api_key,
|
|
238
|
+
temperature=config.temperature,
|
|
239
|
+
max_tokens=config.max_tokens,
|
|
240
|
+
)
|
|
241
|
+
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
242
|
+
|
|
243
|
+
def create_agent(self, config: AgentConfig) -> LangGraphAgent:
|
|
244
|
+
"""Create a LangGraph-compatible agent."""
|
|
245
|
+
if not self.is_available():
|
|
246
|
+
raise ImportError("LangGraph not installed. Run: pip install langgraph")
|
|
247
|
+
|
|
248
|
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
249
|
+
|
|
250
|
+
llm = self._get_llm(config)
|
|
251
|
+
|
|
252
|
+
# Build prompt
|
|
253
|
+
system = config.system_prompt or f"You are a {config.role.value} agent."
|
|
254
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
255
|
+
[
|
|
256
|
+
("system", system),
|
|
257
|
+
MessagesPlaceholder(variable_name="messages"),
|
|
258
|
+
],
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Create runnable
|
|
262
|
+
chain = prompt | llm
|
|
263
|
+
|
|
264
|
+
return LangGraphAgent(config, runnable=chain)
|
|
265
|
+
|
|
266
|
+
def create_workflow(self, config: WorkflowConfig, agents: list[BaseAgent]) -> LangGraphWorkflow:
|
|
267
|
+
"""Create a LangGraph StateGraph workflow."""
|
|
268
|
+
if not self.is_available():
|
|
269
|
+
raise ImportError("LangGraph not installed")
|
|
270
|
+
|
|
271
|
+
from langgraph.graph import END, StateGraph
|
|
272
|
+
from typing_extensions import TypedDict
|
|
273
|
+
|
|
274
|
+
# Define state schema
|
|
275
|
+
class WorkflowState(TypedDict):
|
|
276
|
+
messages: list
|
|
277
|
+
current_agent: str
|
|
278
|
+
iteration: int
|
|
279
|
+
|
|
280
|
+
# Build graph
|
|
281
|
+
graph = StateGraph(WorkflowState)
|
|
282
|
+
|
|
283
|
+
# Add agent nodes
|
|
284
|
+
for agent in agents:
|
|
285
|
+
|
|
286
|
+
async def agent_node(state, a=agent):
|
|
287
|
+
result = await a.invoke(state)
|
|
288
|
+
messages = state.get("messages", [])
|
|
289
|
+
messages.append({"role": "assistant", "content": result["output"], "agent": a.name})
|
|
290
|
+
return {"messages": messages, "current_agent": a.name}
|
|
291
|
+
|
|
292
|
+
graph.add_node(agent.name, agent_node)
|
|
293
|
+
|
|
294
|
+
# Build edges based on mode
|
|
295
|
+
agent_names = [a.name for a in agents]
|
|
296
|
+
|
|
297
|
+
if config.mode == "sequential":
|
|
298
|
+
# Linear chain: agent1 -> agent2 -> ... -> END
|
|
299
|
+
graph.set_entry_point(agent_names[0])
|
|
300
|
+
for i in range(len(agent_names) - 1):
|
|
301
|
+
graph.add_edge(agent_names[i], agent_names[i + 1])
|
|
302
|
+
graph.add_edge(agent_names[-1], END)
|
|
303
|
+
|
|
304
|
+
elif config.mode == "parallel":
|
|
305
|
+
# All agents run, then merge at END
|
|
306
|
+
# (LangGraph doesn't natively support parallel, so we use fan-out)
|
|
307
|
+
graph.set_entry_point(agent_names[0])
|
|
308
|
+
for name in agent_names:
|
|
309
|
+
graph.add_edge(name, END)
|
|
310
|
+
|
|
311
|
+
else:
|
|
312
|
+
# Default: sequential
|
|
313
|
+
graph.set_entry_point(agent_names[0])
|
|
314
|
+
for i in range(len(agent_names) - 1):
|
|
315
|
+
graph.add_edge(agent_names[i], agent_names[i + 1])
|
|
316
|
+
graph.add_edge(agent_names[-1], END)
|
|
317
|
+
|
|
318
|
+
return LangGraphWorkflow(config, agents, graph=graph)
|
|
319
|
+
|
|
320
|
+
def create_tool(
|
|
321
|
+
self,
|
|
322
|
+
name: str,
|
|
323
|
+
description: str,
|
|
324
|
+
func: Callable,
|
|
325
|
+
args_schema: dict | None = None,
|
|
326
|
+
) -> Any:
|
|
327
|
+
"""Create a tool (same as LangChain)."""
|
|
328
|
+
if not self.is_available():
|
|
329
|
+
return {"name": name, "description": description, "func": func}
|
|
330
|
+
|
|
331
|
+
from langchain_core.tools import StructuredTool
|
|
332
|
+
|
|
333
|
+
return StructuredTool.from_function(func=func, name=name, description=description)
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
"""Native Empathy Adapter
|
|
2
|
+
|
|
3
|
+
Creates agents using Empathy's built-in EmpathyLLM system.
|
|
4
|
+
No external dependencies required.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Full EmpathyLLM integration (levels 1-5)
|
|
8
|
+
- ModelRouter for cost optimization
|
|
9
|
+
- Pattern learning and memory
|
|
10
|
+
- Cost tracking
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import os
|
|
18
|
+
from collections.abc import Callable
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from attune_llm.agent_factory.base import (
|
|
22
|
+
AgentConfig,
|
|
23
|
+
BaseAdapter,
|
|
24
|
+
BaseAgent,
|
|
25
|
+
BaseWorkflow,
|
|
26
|
+
WorkflowConfig,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class NativeAgent(BaseAgent):
|
|
31
|
+
"""Agent using Empathy's native EmpathyLLM."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, config: AgentConfig, llm=None):
|
|
34
|
+
super().__init__(config)
|
|
35
|
+
self._llm = llm
|
|
36
|
+
self._tools = {tool["name"]: tool for tool in config.tools if isinstance(tool, dict)}
|
|
37
|
+
|
|
38
|
+
async def invoke(self, input_data: str | dict, context: dict | None = None) -> dict:
|
|
39
|
+
"""Invoke the agent."""
|
|
40
|
+
# Format input
|
|
41
|
+
if isinstance(input_data, str):
|
|
42
|
+
user_input = input_data
|
|
43
|
+
else:
|
|
44
|
+
user_input = input_data.get("input", input_data.get("query", str(input_data)))
|
|
45
|
+
|
|
46
|
+
# Handle empty input
|
|
47
|
+
if not user_input or not user_input.strip():
|
|
48
|
+
return {
|
|
49
|
+
"output": "",
|
|
50
|
+
"metadata": {
|
|
51
|
+
"level": self.config.empathy_level,
|
|
52
|
+
"model": self.model,
|
|
53
|
+
"skipped": True,
|
|
54
|
+
},
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Build context
|
|
58
|
+
ctx = context or {}
|
|
59
|
+
ctx["agent_name"] = self.name
|
|
60
|
+
ctx["agent_role"] = self.role.value
|
|
61
|
+
|
|
62
|
+
# Add conversation history
|
|
63
|
+
if self._conversation_history:
|
|
64
|
+
ctx["conversation_history"] = self._conversation_history[-10:]
|
|
65
|
+
|
|
66
|
+
# Use EmpathyLLM if available
|
|
67
|
+
if self._llm:
|
|
68
|
+
response = await self._llm.interact(
|
|
69
|
+
user_id=f"agent_{self.name}",
|
|
70
|
+
user_input=user_input,
|
|
71
|
+
context=ctx,
|
|
72
|
+
)
|
|
73
|
+
output = response.get("response", "")
|
|
74
|
+
metadata = {
|
|
75
|
+
"level": response.get("level", self.config.empathy_level),
|
|
76
|
+
"model": response.get("model", self.model),
|
|
77
|
+
"patterns_used": response.get("patterns_used", []),
|
|
78
|
+
}
|
|
79
|
+
else:
|
|
80
|
+
# Fallback to simple response
|
|
81
|
+
output = f"[{self.name}] I would process: {user_input}"
|
|
82
|
+
metadata = {"level": self.config.empathy_level, "model": self.model}
|
|
83
|
+
|
|
84
|
+
# Track conversation
|
|
85
|
+
self._conversation_history.append({"role": "user", "content": user_input})
|
|
86
|
+
self._conversation_history.append({"role": "assistant", "content": output})
|
|
87
|
+
|
|
88
|
+
return {"output": output, "metadata": metadata}
|
|
89
|
+
|
|
90
|
+
async def stream(self, input_data: str | dict, context: dict | None = None):
|
|
91
|
+
"""Stream agent response."""
|
|
92
|
+
# Native adapter doesn't support streaming yet, yield full response
|
|
93
|
+
result = await self.invoke(input_data, context)
|
|
94
|
+
yield result
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class NativeWorkflow(BaseWorkflow):
|
|
98
|
+
"""Workflow using sequential/parallel agent execution."""
|
|
99
|
+
|
|
100
|
+
async def run(self, input_data: str | dict, initial_state: dict | None = None) -> dict:
|
|
101
|
+
"""Run the workflow."""
|
|
102
|
+
self._state = initial_state or {}
|
|
103
|
+
self._state["input"] = input_data
|
|
104
|
+
|
|
105
|
+
mode = self.config.mode
|
|
106
|
+
results = []
|
|
107
|
+
|
|
108
|
+
if mode == "sequential":
|
|
109
|
+
results = await self._run_sequential(input_data)
|
|
110
|
+
elif mode == "parallel":
|
|
111
|
+
results = await self._run_parallel(input_data)
|
|
112
|
+
else:
|
|
113
|
+
results = await self._run_sequential(input_data)
|
|
114
|
+
|
|
115
|
+
self._state["results"] = results
|
|
116
|
+
self._state["final_output"] = results[-1]["output"] if results else ""
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
"output": self._state["final_output"],
|
|
120
|
+
"results": results,
|
|
121
|
+
"state": self._state,
|
|
122
|
+
"agents_invoked": [r.get("agent") for r in results],
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
async def _run_sequential(self, input_data: str | dict) -> list[dict]:
|
|
126
|
+
"""Run agents sequentially, passing output to next."""
|
|
127
|
+
results: list[dict] = []
|
|
128
|
+
current_input = input_data
|
|
129
|
+
|
|
130
|
+
for agent in self.agents.values():
|
|
131
|
+
context = {"previous_results": results, "state": self._state}
|
|
132
|
+
result = await agent.invoke(current_input, context)
|
|
133
|
+
result["agent"] = agent.name
|
|
134
|
+
results.append(result)
|
|
135
|
+
current_input = result["output"]
|
|
136
|
+
|
|
137
|
+
return results
|
|
138
|
+
|
|
139
|
+
async def _run_parallel(self, input_data: str | dict) -> list:
|
|
140
|
+
"""Run all agents in parallel."""
|
|
141
|
+
tasks = []
|
|
142
|
+
for agent in self.agents.values():
|
|
143
|
+
context = {"state": self._state}
|
|
144
|
+
tasks.append(agent.invoke(input_data, context))
|
|
145
|
+
|
|
146
|
+
results = await asyncio.gather(*tasks)
|
|
147
|
+
|
|
148
|
+
# Add agent names
|
|
149
|
+
for _i, (agent_name, result) in enumerate(zip(self.agents.keys(), results, strict=False)):
|
|
150
|
+
result["agent"] = agent_name
|
|
151
|
+
|
|
152
|
+
return list(results)
|
|
153
|
+
|
|
154
|
+
async def stream(self, input_data: str | dict, initial_state: dict | None = None):
|
|
155
|
+
"""Stream workflow execution."""
|
|
156
|
+
self._state = initial_state or {}
|
|
157
|
+
self._state["input"] = input_data
|
|
158
|
+
|
|
159
|
+
for agent in self.agents.values():
|
|
160
|
+
context = {"state": self._state}
|
|
161
|
+
yield {"event": "agent_start", "agent": agent.name}
|
|
162
|
+
|
|
163
|
+
# agent.stream returns an async generator
|
|
164
|
+
stream_gen = agent.stream(input_data, context)
|
|
165
|
+
async for chunk in stream_gen: # type: ignore[attr-defined]
|
|
166
|
+
yield {"event": "agent_output", "agent": agent.name, "data": chunk}
|
|
167
|
+
|
|
168
|
+
yield {"event": "agent_end", "agent": agent.name}
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class NativeAdapter(BaseAdapter):
|
|
172
|
+
"""Adapter for Empathy's native agent system."""
|
|
173
|
+
|
|
174
|
+
def __init__(self, provider: str = "anthropic", api_key: str | None = None):
|
|
175
|
+
"""Initialize native adapter.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
provider: LLM provider (anthropic, openai, local)
|
|
179
|
+
api_key: API key (uses env var if not provided)
|
|
180
|
+
|
|
181
|
+
"""
|
|
182
|
+
self.provider = provider
|
|
183
|
+
self.api_key = api_key or os.getenv(
|
|
184
|
+
"ANTHROPIC_API_KEY" if provider == "anthropic" else "OPENAI_API_KEY",
|
|
185
|
+
)
|
|
186
|
+
self._llm: Any = None # EmpathyLLM instance or None
|
|
187
|
+
|
|
188
|
+
@property
|
|
189
|
+
def framework_name(self) -> str:
|
|
190
|
+
return "native"
|
|
191
|
+
|
|
192
|
+
def is_available(self) -> bool:
|
|
193
|
+
"""Native is always available."""
|
|
194
|
+
return True
|
|
195
|
+
|
|
196
|
+
def _get_llm(self, config: AgentConfig) -> Any:
|
|
197
|
+
"""Get or create EmpathyLLM instance."""
|
|
198
|
+
if self._llm is None and self.api_key:
|
|
199
|
+
try:
|
|
200
|
+
from attune_llm.core import EmpathyLLM
|
|
201
|
+
|
|
202
|
+
self._llm = EmpathyLLM(
|
|
203
|
+
provider=self.provider,
|
|
204
|
+
api_key=self.api_key,
|
|
205
|
+
target_level=config.empathy_level,
|
|
206
|
+
)
|
|
207
|
+
except ImportError:
|
|
208
|
+
pass
|
|
209
|
+
return self._llm
|
|
210
|
+
|
|
211
|
+
def create_agent(self, config: AgentConfig) -> NativeAgent:
|
|
212
|
+
"""Create a native Empathy agent."""
|
|
213
|
+
llm = self._get_llm(config)
|
|
214
|
+
return NativeAgent(config, llm=llm)
|
|
215
|
+
|
|
216
|
+
def create_workflow(self, config: WorkflowConfig, agents: list[BaseAgent]) -> NativeWorkflow:
|
|
217
|
+
"""Create a native workflow."""
|
|
218
|
+
return NativeWorkflow(config, agents)
|
|
219
|
+
|
|
220
|
+
def create_tool(
|
|
221
|
+
self,
|
|
222
|
+
name: str,
|
|
223
|
+
description: str,
|
|
224
|
+
func: Callable,
|
|
225
|
+
args_schema: dict | None = None,
|
|
226
|
+
) -> dict:
|
|
227
|
+
"""Create a tool dict for native agents."""
|
|
228
|
+
return {"name": name, "description": description, "func": func, "args_schema": args_schema}
|