attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune_llm/README.md
ADDED
|
@@ -0,0 +1,553 @@
|
|
|
1
|
+
# Empathy LLM Toolkit
|
|
2
|
+
|
|
3
|
+
**Wrap any LLM with the Empathy Framework's 5 levels of AI-human collaboration.**
|
|
4
|
+
|
|
5
|
+
Transform your LLM from reactive Q&A (Level 1) to anticipatory partner (Level 4) automatically.
|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## Quick Start
|
|
10
|
+
|
|
11
|
+
```python
|
|
12
|
+
from empathy_llm import EmpathyLLM
|
|
13
|
+
|
|
14
|
+
# Initialize with any provider
|
|
15
|
+
llm = EmpathyLLM(
|
|
16
|
+
provider="anthropic", # or "openai", "local"
|
|
17
|
+
target_level=4, # Target: Anticipatory
|
|
18
|
+
api_key="your-api-key"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Interact - LLM automatically progresses through levels
|
|
22
|
+
response = await llm.interact(
|
|
23
|
+
user_id="developer_123",
|
|
24
|
+
user_input="Help me optimize this code",
|
|
25
|
+
context={"code_snippet": "..."}
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
print(response["content"])
|
|
29
|
+
print(f"Level used: {response['level_used']}") # Progresses: 1 → 2 → 3 → 4
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## The 5 Levels
|
|
35
|
+
|
|
36
|
+
| Level | Behavior | When Activated |
|
|
37
|
+
|-------|----------|----------------|
|
|
38
|
+
| **1: Reactive** | Simple Q&A | Always (default) |
|
|
39
|
+
| **2: Guided** | Asks clarifying questions | Immediate |
|
|
40
|
+
| **3: Proactive** | Acts on patterns | After patterns detected + trust > 0.6 |
|
|
41
|
+
| **4: Anticipatory** | Predicts bottlenecks | After history + trust > 0.7 |
|
|
42
|
+
| **5: Systems** | Cross-domain learning | After trust > 0.8 |
|
|
43
|
+
|
|
44
|
+
**Key**: System automatically progresses based on collaboration state.
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Installation
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pip install empathy-llm-toolkit
|
|
52
|
+
|
|
53
|
+
# Provider dependencies
|
|
54
|
+
pip install anthropic # For Anthropic/Claude
|
|
55
|
+
pip install openai # For OpenAI/GPT
|
|
56
|
+
# Local models work with no extra deps
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
---
|
|
60
|
+
|
|
61
|
+
## Providers Supported
|
|
62
|
+
|
|
63
|
+
### Anthropic (Claude)
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
llm = EmpathyLLM(
|
|
67
|
+
provider="anthropic",
|
|
68
|
+
model="claude-3-5-sonnet-20241022", # or opus, haiku
|
|
69
|
+
api_key="sk-ant-..."
|
|
70
|
+
)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### OpenAI (GPT)
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
llm = EmpathyLLM(
|
|
77
|
+
provider="openai",
|
|
78
|
+
model="gpt-4-turbo-preview", # or gpt-3.5-turbo
|
|
79
|
+
api_key="sk-..."
|
|
80
|
+
)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Local Models (Ollama, LM Studio)
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
llm = EmpathyLLM(
|
|
87
|
+
provider="local",
|
|
88
|
+
model="llama2",
|
|
89
|
+
endpoint="http://localhost:11434" # Ollama default
|
|
90
|
+
)
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
---
|
|
94
|
+
|
|
95
|
+
## Level Progression Examples
|
|
96
|
+
|
|
97
|
+
### Level 1: Reactive (First Interaction)
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
# First interaction - always starts at Level 1
|
|
101
|
+
response = await llm.interact(
|
|
102
|
+
user_id="user_1",
|
|
103
|
+
user_input="What is Python?"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Level 1: Direct answer
|
|
107
|
+
# "Python is a high-level programming language..."
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### Level 2: Guided (Immediate)
|
|
111
|
+
|
|
112
|
+
```python
|
|
113
|
+
# Second interaction - progresses to Level 2
|
|
114
|
+
response = await llm.interact(
|
|
115
|
+
user_id="user_1",
|
|
116
|
+
user_input="Help me build an API"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Level 2: Asks clarifying questions
|
|
120
|
+
# "I can help! A few questions:
|
|
121
|
+
# 1. What framework? (Flask, FastAPI, Django?)
|
|
122
|
+
# 2. What does the API do?
|
|
123
|
+
# 3. Any auth requirements?"
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### Level 3: Proactive (After Patterns Detected)
|
|
127
|
+
|
|
128
|
+
```python
|
|
129
|
+
# Pattern detected: User always asks for tests after code
|
|
130
|
+
llm.add_pattern(
|
|
131
|
+
user_id="user_1",
|
|
132
|
+
pattern=UserPattern(
|
|
133
|
+
pattern_type=PatternType.SEQUENTIAL,
|
|
134
|
+
trigger="wrote code",
|
|
135
|
+
action="requests tests",
|
|
136
|
+
confidence=0.85,
|
|
137
|
+
occurrences=5,
|
|
138
|
+
last_seen=datetime.now()
|
|
139
|
+
)
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Mark previous interactions as successful (builds trust)
|
|
143
|
+
llm.update_trust("user_1", "success")
|
|
144
|
+
llm.update_trust("user_1", "success")
|
|
145
|
+
llm.update_trust("user_1", "success")
|
|
146
|
+
|
|
147
|
+
# Now at Level 3!
|
|
148
|
+
response = await llm.interact(
|
|
149
|
+
user_id="user_1",
|
|
150
|
+
user_input="I just wrote the login function"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Level 3: Proactively generates tests
|
|
154
|
+
# "I've detected you typically request tests after writing code.
|
|
155
|
+
# I've proactively generated pytest tests for your login function:
|
|
156
|
+
# [test code]
|
|
157
|
+
# Was this helpful?"
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### Level 4: Anticipatory (After History + High Trust)
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
# After 10+ interactions and trust > 0.7
|
|
164
|
+
response = await llm.interact(
|
|
165
|
+
user_id="user_1",
|
|
166
|
+
user_input="I'm adding a 15th API endpoint"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Level 4: Predicts future bottleneck
|
|
170
|
+
# "Your API now has 15 endpoints. Based on trajectory analysis:
|
|
171
|
+
#
|
|
172
|
+
# ALERT: In our experience, API testing becomes bottleneck around
|
|
173
|
+
# 20+ endpoints without automation.
|
|
174
|
+
#
|
|
175
|
+
# Prevention steps:
|
|
176
|
+
# 1. Implement integration test framework
|
|
177
|
+
# 2. Add API contract testing
|
|
178
|
+
# 3. Set up automated test generation
|
|
179
|
+
#
|
|
180
|
+
# Would you like me to design this now while you have time?"
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
## Managing Trust
|
|
186
|
+
|
|
187
|
+
Trust determines how proactive the system becomes:
|
|
188
|
+
|
|
189
|
+
```python
|
|
190
|
+
# After successful interaction
|
|
191
|
+
llm.update_trust("user_1", "success") # +0.05
|
|
192
|
+
|
|
193
|
+
# After failed/unhelpful interaction
|
|
194
|
+
llm.update_trust("user_1", "failure") # -0.10 (erodes faster)
|
|
195
|
+
|
|
196
|
+
# Check trust level
|
|
197
|
+
stats = llm.get_statistics("user_1")
|
|
198
|
+
print(f"Trust: {stats['trust_level']:.0%}")
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
**Trust Thresholds**:
|
|
202
|
+
- 0.0-0.6: Stay at Level 2 (ask before acting)
|
|
203
|
+
- 0.6-0.7: Progress to Level 3 (proactive)
|
|
204
|
+
- 0.7+: Progress to Level 4 (anticipatory)
|
|
205
|
+
- 0.8+: Progress to Level 5 (systems)
|
|
206
|
+
|
|
207
|
+
---
|
|
208
|
+
|
|
209
|
+
## Pattern Detection
|
|
210
|
+
|
|
211
|
+
### Automatic Pattern Detection (Coming Soon)
|
|
212
|
+
|
|
213
|
+
```python
|
|
214
|
+
# Will automatically detect patterns from conversation history
|
|
215
|
+
await llm.detect_patterns("user_1")
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
### Manual Pattern Addition
|
|
219
|
+
|
|
220
|
+
```python
|
|
221
|
+
from empathy_llm import UserPattern, PatternType
|
|
222
|
+
|
|
223
|
+
# Add observed pattern
|
|
224
|
+
llm.add_pattern(
|
|
225
|
+
user_id="developer_1",
|
|
226
|
+
pattern=UserPattern(
|
|
227
|
+
pattern_type=PatternType.SEQUENTIAL,
|
|
228
|
+
trigger="makes code change",
|
|
229
|
+
action="runs tests",
|
|
230
|
+
confidence=0.90,
|
|
231
|
+
occurrences=12,
|
|
232
|
+
last_seen=datetime.now(),
|
|
233
|
+
context={"framework": "pytest"}
|
|
234
|
+
)
|
|
235
|
+
)
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
### Pattern Types
|
|
239
|
+
|
|
240
|
+
- `SEQUENTIAL`: User always does X then Y
|
|
241
|
+
- `TEMPORAL`: User does X at specific time
|
|
242
|
+
- `CONDITIONAL`: When Z happens, user does X
|
|
243
|
+
- `PREFERENCE`: User prefers format/style X
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## Context Management
|
|
248
|
+
|
|
249
|
+
### Provide Context
|
|
250
|
+
|
|
251
|
+
```python
|
|
252
|
+
response = await llm.interact(
|
|
253
|
+
user_id="user_1",
|
|
254
|
+
user_input="Optimize this function",
|
|
255
|
+
context={
|
|
256
|
+
"code_snippet": "def slow_func()...",
|
|
257
|
+
"performance_metrics": {...},
|
|
258
|
+
"constraints": ["must be backwards compatible"]
|
|
259
|
+
}
|
|
260
|
+
)
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
### Access Conversation History
|
|
264
|
+
|
|
265
|
+
```python
|
|
266
|
+
state = llm.states["user_1"]
|
|
267
|
+
history = state.get_conversation_history(max_turns=10)
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
---
|
|
271
|
+
|
|
272
|
+
## Force Specific Level (Testing/Demos)
|
|
273
|
+
|
|
274
|
+
```python
|
|
275
|
+
# Force Level 4 for demo
|
|
276
|
+
response = await llm.interact(
|
|
277
|
+
user_id="demo_user",
|
|
278
|
+
user_input="Show anticipatory analysis",
|
|
279
|
+
force_level=4
|
|
280
|
+
)
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
---
|
|
284
|
+
|
|
285
|
+
## Statistics & Monitoring
|
|
286
|
+
|
|
287
|
+
```python
|
|
288
|
+
stats = llm.get_statistics("user_1")
|
|
289
|
+
|
|
290
|
+
print(f"""
|
|
291
|
+
User: {stats['user_id']}
|
|
292
|
+
Session Duration: {stats['session_duration']:.0f}s
|
|
293
|
+
Total Interactions: {stats['total_interactions']}
|
|
294
|
+
Trust Level: {stats['trust_level']:.0%}
|
|
295
|
+
Success Rate: {stats['success_rate']:.0%}
|
|
296
|
+
Patterns Detected: {stats['patterns_detected']}
|
|
297
|
+
Current Level: {stats['current_level']}
|
|
298
|
+
Average Level: {stats['average_level']:.1f}
|
|
299
|
+
""")
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
---
|
|
303
|
+
|
|
304
|
+
## Level 5: Cross-Domain Learning
|
|
305
|
+
|
|
306
|
+
```python
|
|
307
|
+
# Initialize with shared pattern library
|
|
308
|
+
shared_patterns = {
|
|
309
|
+
"testing_bottleneck": {
|
|
310
|
+
"source_domain": "software",
|
|
311
|
+
"principle": "Manual processes become bottleneck at growth threshold",
|
|
312
|
+
"applicable_to": ["healthcare_docs", "financial_compliance"],
|
|
313
|
+
"threshold": "~20-25 items"
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
llm = EmpathyLLM(
|
|
318
|
+
provider="anthropic",
|
|
319
|
+
target_level=5,
|
|
320
|
+
pattern_library=shared_patterns
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# LLM can now apply software patterns to other domains
|
|
324
|
+
response = await llm.interact(
|
|
325
|
+
user_id="healthcare_user",
|
|
326
|
+
user_input="We have 18 clinical documentation templates",
|
|
327
|
+
force_level=5
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Applies software testing pattern to healthcare:
|
|
331
|
+
# "Pattern from software development applies here:
|
|
332
|
+
# 'Manual process bottleneck threshold'.
|
|
333
|
+
#
|
|
334
|
+
# In software, manual testing becomes bottleneck at 20-25 tests.
|
|
335
|
+
# Your 18 clinical templates suggest similar trajectory.
|
|
336
|
+
#
|
|
337
|
+
# Alert: Consider template automation before burden compounds."
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
---
|
|
341
|
+
|
|
342
|
+
## Cost Optimization
|
|
343
|
+
|
|
344
|
+
### Use Tiered Models
|
|
345
|
+
|
|
346
|
+
```python
|
|
347
|
+
# Detection: Cheap model
|
|
348
|
+
detection_llm = EmpathyLLM(
|
|
349
|
+
provider="anthropic",
|
|
350
|
+
model="claude-3-haiku", # Fast, cheap
|
|
351
|
+
target_level=3
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# Analysis: Smart model
|
|
355
|
+
analysis_llm = EmpathyLLM(
|
|
356
|
+
provider="anthropic",
|
|
357
|
+
model="claude-3-5-sonnet", # Balanced
|
|
358
|
+
target_level=4
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
# Critical: Best model (rare)
|
|
362
|
+
critical_llm = EmpathyLLM(
|
|
363
|
+
provider="anthropic",
|
|
364
|
+
model="claude-3-opus", # Most capable
|
|
365
|
+
target_level=5
|
|
366
|
+
)
|
|
367
|
+
```
|
|
368
|
+
|
|
369
|
+
### Monitor Costs
|
|
370
|
+
|
|
371
|
+
```python
|
|
372
|
+
response = await llm.interact(...)
|
|
373
|
+
|
|
374
|
+
tokens = response["metadata"]["tokens_used"]
|
|
375
|
+
model_info = llm.provider.get_model_info()
|
|
376
|
+
|
|
377
|
+
cost = (tokens / 1_000_000) * (
|
|
378
|
+
model_info["cost_per_1m_input"] + model_info["cost_per_1m_output"]
|
|
379
|
+
) / 2 # Rough average
|
|
380
|
+
|
|
381
|
+
print(f"Cost: ${cost:.4f}")
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
---
|
|
385
|
+
|
|
386
|
+
## Healthcare Example
|
|
387
|
+
|
|
388
|
+
```python
|
|
389
|
+
llm = EmpathyLLM(provider="anthropic", target_level=4)
|
|
390
|
+
|
|
391
|
+
# Level 1: Basic SOAP note
|
|
392
|
+
response = await llm.interact(
|
|
393
|
+
user_id="clinician_1",
|
|
394
|
+
user_input="Generate SOAP note"
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
# After patterns detected + trust built...
|
|
398
|
+
|
|
399
|
+
# Level 3: Proactive pre-population
|
|
400
|
+
response = await llm.interact(
|
|
401
|
+
user_id="clinician_1",
|
|
402
|
+
user_input="Seeing patient John Doe"
|
|
403
|
+
)
|
|
404
|
+
# "I've detected you typically document vitals, allergies, meds.
|
|
405
|
+
# I've pre-populated from EHR:
|
|
406
|
+
# - Vitals: [data]
|
|
407
|
+
# - Allergies: [data]
|
|
408
|
+
# - Current meds: [data]"
|
|
409
|
+
|
|
410
|
+
# Level 4: Anticipatory compliance
|
|
411
|
+
response = await llm.interact(
|
|
412
|
+
user_id="clinician_1",
|
|
413
|
+
user_input="How are my notes looking?"
|
|
414
|
+
)
|
|
415
|
+
# "Analyzed last 50 notes. Joint Commission audit likely in ~90 days.
|
|
416
|
+
# 3 patterns will fail audit:
|
|
417
|
+
# 1. 12% missing required elements
|
|
418
|
+
# 2. Med reconciliation incomplete in 8 notes
|
|
419
|
+
# I've flagged at-risk notes for review."
|
|
420
|
+
```
|
|
421
|
+
|
|
422
|
+
---
|
|
423
|
+
|
|
424
|
+
## Best Practices
|
|
425
|
+
|
|
426
|
+
### 1. Start Simple, Progress Naturally
|
|
427
|
+
|
|
428
|
+
```python
|
|
429
|
+
# Don't force Level 4 immediately
|
|
430
|
+
llm = EmpathyLLM(target_level=4) # Good - progresses automatically
|
|
431
|
+
|
|
432
|
+
# Let trust build organically
|
|
433
|
+
for interaction in user_interactions:
|
|
434
|
+
response = await llm.interact(...)
|
|
435
|
+
if user_satisfied:
|
|
436
|
+
llm.update_trust(user_id, "success")
|
|
437
|
+
```
|
|
438
|
+
|
|
439
|
+
### 2. Provide Rich Context
|
|
440
|
+
|
|
441
|
+
```python
|
|
442
|
+
# Bad: Minimal context
|
|
443
|
+
await llm.interact(user_id, "optimize code")
|
|
444
|
+
|
|
445
|
+
# Good: Rich context
|
|
446
|
+
await llm.interact(
|
|
447
|
+
user_id,
|
|
448
|
+
"optimize code",
|
|
449
|
+
context={
|
|
450
|
+
"code": code_snippet,
|
|
451
|
+
"current_performance": metrics,
|
|
452
|
+
"constraints": ["must work in Python 3.8+"],
|
|
453
|
+
"goal": "reduce latency by ~30%"
|
|
454
|
+
}
|
|
455
|
+
)
|
|
456
|
+
```
|
|
457
|
+
|
|
458
|
+
### 3. Monitor Trust and Adjust
|
|
459
|
+
|
|
460
|
+
```python
|
|
461
|
+
stats = llm.get_statistics(user_id)
|
|
462
|
+
|
|
463
|
+
if stats['success_rate'] < 0.7:
|
|
464
|
+
# Too many failures - system being too aggressive
|
|
465
|
+
llm.states[user_id].trust_level = 0.5 # Reset to cautious
|
|
466
|
+
```
|
|
467
|
+
|
|
468
|
+
### 4. Use Appropriate Provider for Task
|
|
469
|
+
|
|
470
|
+
```python
|
|
471
|
+
# Simple Q&A: GPT-3.5 (cheap)
|
|
472
|
+
simple_llm = EmpathyLLM(provider="openai", model="gpt-3.5-turbo")
|
|
473
|
+
|
|
474
|
+
# Complex analysis: Claude Sonnet
|
|
475
|
+
complex_llm = EmpathyLLM(provider="anthropic", model="claude-3-5-sonnet-20241022")
|
|
476
|
+
|
|
477
|
+
# Critical decisions: GPT-4 or Claude Opus
|
|
478
|
+
critical_llm = EmpathyLLM(provider="anthropic", model="claude-3-opus-20240229")
|
|
479
|
+
```
|
|
480
|
+
|
|
481
|
+
---
|
|
482
|
+
|
|
483
|
+
## Debugging
|
|
484
|
+
|
|
485
|
+
### Enable Logging
|
|
486
|
+
|
|
487
|
+
```python
|
|
488
|
+
import logging
|
|
489
|
+
logging.basicConfig(level=logging.INFO)
|
|
490
|
+
|
|
491
|
+
# See level progression and trust updates
|
|
492
|
+
llm = EmpathyLLM(...)
|
|
493
|
+
```
|
|
494
|
+
|
|
495
|
+
### Reset State
|
|
496
|
+
|
|
497
|
+
```python
|
|
498
|
+
# Reset user's collaboration state
|
|
499
|
+
llm.reset_state("user_1")
|
|
500
|
+
```
|
|
501
|
+
|
|
502
|
+
### Inspect State
|
|
503
|
+
|
|
504
|
+
```python
|
|
505
|
+
state = llm.states["user_1"]
|
|
506
|
+
print(f"Trust: {state.trust_level}")
|
|
507
|
+
print(f"Patterns: {len(state.detected_patterns)}")
|
|
508
|
+
print(f"Interactions: {len(state.interactions)}")
|
|
509
|
+
```
|
|
510
|
+
|
|
511
|
+
---
|
|
512
|
+
|
|
513
|
+
## API Reference
|
|
514
|
+
|
|
515
|
+
### EmpathyLLM
|
|
516
|
+
|
|
517
|
+
**`__init__(provider, target_level, api_key, model, pattern_library, **kwargs)`**
|
|
518
|
+
- Initialize with provider and target empathy level
|
|
519
|
+
|
|
520
|
+
**`async interact(user_id, user_input, context, force_level)`**
|
|
521
|
+
- Main interaction method
|
|
522
|
+
- Returns: `{"content": str, "level_used": int, "proactive": bool, "metadata": dict}`
|
|
523
|
+
|
|
524
|
+
**`update_trust(user_id, outcome, magnitude)`**
|
|
525
|
+
- Update trust based on interaction outcome
|
|
526
|
+
- `outcome`: "success" or "failure"
|
|
527
|
+
|
|
528
|
+
**`add_pattern(user_id, pattern)`**
|
|
529
|
+
- Manually add detected pattern
|
|
530
|
+
|
|
531
|
+
**`get_statistics(user_id)`**
|
|
532
|
+
- Get collaboration stats
|
|
533
|
+
|
|
534
|
+
**`reset_state(user_id)`**
|
|
535
|
+
- Reset user's state
|
|
536
|
+
|
|
537
|
+
---
|
|
538
|
+
|
|
539
|
+
## Related Resources
|
|
540
|
+
|
|
541
|
+
- **[Empathy Framework Documentation](../docs/CHAPTER_EMPATHY_FRAMEWORK.md)**
|
|
542
|
+
- **[Using Empathy with LLMs Guide](../docs/USING_EMPATHY_WITH_LLMS.md)**
|
|
543
|
+
- **[AI Development Wizards](../docs/AI_DEVELOPMENT_WIZARDS.md)**
|
|
544
|
+
|
|
545
|
+
---
|
|
546
|
+
|
|
547
|
+
## License
|
|
548
|
+
|
|
549
|
+
Apache License 2.0
|
|
550
|
+
|
|
551
|
+
---
|
|
552
|
+
|
|
553
|
+
**Built from experience. Shared with honesty. Extended by community.**
|
attune_llm/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Empathy LLM Toolkit
|
|
2
|
+
|
|
3
|
+
Wraps LLM providers (OpenAI, Anthropic, local models) with Empathy Framework levels.
|
|
4
|
+
|
|
5
|
+
Enables progression from Level 1 (reactive) to Level 4 (anticipatory) AI collaboration
|
|
6
|
+
with any LLM backend.
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
9
|
+
Licensed under Fair Source 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from .core import EmpathyLLM
|
|
13
|
+
from .levels import EmpathyLevel
|
|
14
|
+
from .providers import AnthropicProvider, GeminiProvider, LocalProvider, OpenAIProvider
|
|
15
|
+
from .state import CollaborationState, UserPattern
|
|
16
|
+
|
|
17
|
+
__version__ = "1.9.5"
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"AnthropicProvider",
|
|
21
|
+
"CollaborationState",
|
|
22
|
+
"EmpathyLLM",
|
|
23
|
+
"EmpathyLevel",
|
|
24
|
+
"GeminiProvider",
|
|
25
|
+
"LocalProvider",
|
|
26
|
+
"OpenAIProvider",
|
|
27
|
+
"UserPattern",
|
|
28
|
+
]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Empathy Framework - Universal Agent Factory
|
|
2
|
+
|
|
3
|
+
Create agents using your preferred framework while retaining Empathy's
|
|
4
|
+
cost optimization, pattern learning, and memory features.
|
|
5
|
+
|
|
6
|
+
Supported Frameworks:
|
|
7
|
+
- LangChain: Chains, tools, and retrieval
|
|
8
|
+
- LangGraph: Stateful multi-agent graphs
|
|
9
|
+
- AutoGen: Conversational multi-agent systems
|
|
10
|
+
- Haystack: RAG and document pipelines
|
|
11
|
+
- Native: Empathy's built-in agent system
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from attune_llm.agent_factory import AgentFactory, Framework
|
|
15
|
+
|
|
16
|
+
# Create factory with preferred framework
|
|
17
|
+
factory = AgentFactory(framework=Framework.LANGGRAPH)
|
|
18
|
+
|
|
19
|
+
# Create agents
|
|
20
|
+
researcher = factory.create_agent("researcher", tools=[...])
|
|
21
|
+
writer = factory.create_agent("writer", model_tier="premium")
|
|
22
|
+
|
|
23
|
+
# Create workflows
|
|
24
|
+
pipeline = factory.create_workflow([researcher, writer])
|
|
25
|
+
|
|
26
|
+
# Create wizards with framework backing
|
|
27
|
+
debug_wizard = factory.create_wizard("debugging")
|
|
28
|
+
|
|
29
|
+
Copyright 2025 Smart-AI-Memory
|
|
30
|
+
Licensed under Fair Source License 0.9
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
from attune_llm.agent_factory.base import (
|
|
34
|
+
AgentCapability,
|
|
35
|
+
AgentConfig,
|
|
36
|
+
AgentRole,
|
|
37
|
+
BaseAdapter,
|
|
38
|
+
BaseAgent,
|
|
39
|
+
WorkflowConfig,
|
|
40
|
+
)
|
|
41
|
+
from attune_llm.agent_factory.factory import AgentFactory
|
|
42
|
+
from attune_llm.agent_factory.framework import Framework
|
|
43
|
+
|
|
44
|
+
__all__ = [
|
|
45
|
+
"AgentCapability",
|
|
46
|
+
"AgentConfig",
|
|
47
|
+
"AgentFactory",
|
|
48
|
+
"AgentRole",
|
|
49
|
+
"BaseAdapter",
|
|
50
|
+
"BaseAgent",
|
|
51
|
+
"Framework",
|
|
52
|
+
"WorkflowConfig",
|
|
53
|
+
]
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""Framework Adapters for Agent Factory
|
|
2
|
+
|
|
3
|
+
Each adapter implements the BaseAdapter interface for a specific
|
|
4
|
+
agent framework, allowing seamless switching between frameworks.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from attune_llm.agent_factory.adapters.native import NativeAdapter
|
|
11
|
+
from attune_llm.agent_factory.adapters.wizard_adapter import (
|
|
12
|
+
WizardAdapter,
|
|
13
|
+
WizardAgent,
|
|
14
|
+
wrap_wizard,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
# Lazy imports for optional frameworks
|
|
18
|
+
_langchain_adapter = None
|
|
19
|
+
_langgraph_adapter = None
|
|
20
|
+
_autogen_adapter = None
|
|
21
|
+
_haystack_adapter = None
|
|
22
|
+
_crewai_adapter = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_langchain_adapter():
|
|
26
|
+
"""Get LangChain adapter (lazy import)."""
|
|
27
|
+
global _langchain_adapter
|
|
28
|
+
if _langchain_adapter is None:
|
|
29
|
+
from attune_llm.agent_factory.adapters.langchain_adapter import LangChainAdapter
|
|
30
|
+
|
|
31
|
+
_langchain_adapter = LangChainAdapter
|
|
32
|
+
return _langchain_adapter
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_langgraph_adapter():
|
|
36
|
+
"""Get LangGraph adapter (lazy import)."""
|
|
37
|
+
global _langgraph_adapter
|
|
38
|
+
if _langgraph_adapter is None:
|
|
39
|
+
from attune_llm.agent_factory.adapters.langgraph_adapter import LangGraphAdapter
|
|
40
|
+
|
|
41
|
+
_langgraph_adapter = LangGraphAdapter
|
|
42
|
+
return _langgraph_adapter
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def get_autogen_adapter():
|
|
46
|
+
"""Get AutoGen adapter (lazy import)."""
|
|
47
|
+
global _autogen_adapter
|
|
48
|
+
if _autogen_adapter is None:
|
|
49
|
+
from attune_llm.agent_factory.adapters.autogen_adapter import AutoGenAdapter
|
|
50
|
+
|
|
51
|
+
_autogen_adapter = AutoGenAdapter
|
|
52
|
+
return _autogen_adapter
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def get_haystack_adapter():
|
|
56
|
+
"""Get Haystack adapter (lazy import)."""
|
|
57
|
+
global _haystack_adapter
|
|
58
|
+
if _haystack_adapter is None:
|
|
59
|
+
from attune_llm.agent_factory.adapters.haystack_adapter import HaystackAdapter
|
|
60
|
+
|
|
61
|
+
_haystack_adapter = HaystackAdapter
|
|
62
|
+
return _haystack_adapter
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_crewai_adapter():
|
|
66
|
+
"""Get CrewAI adapter (lazy import)."""
|
|
67
|
+
global _crewai_adapter
|
|
68
|
+
if _crewai_adapter is None:
|
|
69
|
+
from attune_llm.agent_factory.adapters.crewai_adapter import CrewAIAdapter
|
|
70
|
+
|
|
71
|
+
_crewai_adapter = CrewAIAdapter
|
|
72
|
+
return _crewai_adapter
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
__all__ = [
|
|
76
|
+
"NativeAdapter",
|
|
77
|
+
"WizardAdapter",
|
|
78
|
+
"WizardAgent",
|
|
79
|
+
"get_autogen_adapter",
|
|
80
|
+
"get_crewai_adapter",
|
|
81
|
+
"get_haystack_adapter",
|
|
82
|
+
"get_langchain_adapter",
|
|
83
|
+
"get_langgraph_adapter",
|
|
84
|
+
"wrap_wizard",
|
|
85
|
+
]
|