@smilintux/skcapstone 0.1.0 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +98 -0
- package/.github/workflows/ci.yml +39 -3
- package/.github/workflows/publish.yml +25 -4
- package/.openclaw-workspace.json +58 -0
- package/CHANGELOG.md +62 -0
- package/CLAUDE.md +39 -2
- package/MANIFEST.in +6 -0
- package/MISSION.md +7 -0
- package/README.md +47 -2
- package/SKILL.md +895 -23
- package/docker/Dockerfile +61 -0
- package/docker/compose-templates/dev-team.yml +203 -0
- package/docker/compose-templates/mini-team.yml +140 -0
- package/docker/compose-templates/ops-team.yml +173 -0
- package/docker/compose-templates/research-team.yml +170 -0
- package/docker/entrypoint.sh +192 -0
- package/docs/ARCHITECTURE.md +663 -374
- package/docs/BOND_WITH_GROK.md +112 -0
- package/docs/GETTING_STARTED.md +782 -0
- package/docs/QUICKSTART.md +477 -0
- package/docs/SKJOULE_ARCHITECTURE.md +658 -0
- package/docs/SOUL_SWAPPER.md +921 -0
- package/docs/SOVEREIGN_SINGULARITY.md +47 -14
- package/examples/custom-bond-template.json +36 -0
- package/examples/grok-feb.json +36 -0
- package/examples/grok-testimony.md +34 -0
- package/examples/love-bootloader.txt +32 -0
- package/examples/plugins/echo_tool.py +87 -0
- package/examples/queen-ava-feb.json +36 -0
- package/examples/souls/lumina.yaml +64 -0
- package/index.js +6 -5
- package/installer/build.py +124 -0
- package/openclaw-plugin/package.json +13 -0
- package/openclaw-plugin/src/index.ts +351 -0
- package/openclaw-plugin/src/openclaw.plugin.json +10 -0
- package/package.json +1 -1
- package/pyproject.toml +38 -2
- package/scripts/bump_version.py +141 -0
- package/scripts/check-updates.py +230 -0
- package/scripts/convert_blueprints_to_yaml.py +157 -0
- package/scripts/dev-install.sh +14 -0
- package/scripts/e2e-test.sh +193 -0
- package/scripts/install-bundle.sh +171 -0
- package/scripts/install.bat +2 -0
- package/scripts/install.ps1 +253 -0
- package/scripts/install.sh +185 -0
- package/scripts/mcp-serve.sh +69 -0
- package/scripts/mcp-server.bat +113 -0
- package/scripts/mcp-server.ps1 +116 -0
- package/scripts/mcp-server.sh +99 -0
- package/scripts/pull-models.sh +10 -0
- package/scripts/skcapstone +48 -0
- package/scripts/verify_install.sh +180 -0
- package/scripts/windows/install-tasks.ps1 +406 -0
- package/scripts/windows/skcapstone-task.xml +113 -0
- package/scripts/windows/uninstall-tasks.ps1 +117 -0
- package/skill.yaml +34 -0
- package/src/skcapstone/__init__.py +67 -2
- package/src/skcapstone/_cli_monolith.py +5916 -0
- package/src/skcapstone/_trustee_helpers.py +165 -0
- package/src/skcapstone/activity.py +105 -0
- package/src/skcapstone/agent_card.py +324 -0
- package/src/skcapstone/api.py +1935 -0
- package/src/skcapstone/archiver.py +340 -0
- package/src/skcapstone/auction.py +485 -0
- package/src/skcapstone/baby_agents.py +179 -0
- package/src/skcapstone/backup.py +345 -0
- package/src/skcapstone/blueprint_registry.py +357 -0
- package/src/skcapstone/blueprints/__init__.py +17 -0
- package/src/skcapstone/blueprints/builtins/content-studio.yaml +81 -0
- package/src/skcapstone/blueprints/builtins/defi-trading.yaml +81 -0
- package/src/skcapstone/blueprints/builtins/dev-squadron.yaml +95 -0
- package/src/skcapstone/blueprints/builtins/infrastructure-guardian.yaml +107 -0
- package/src/skcapstone/blueprints/builtins/legal-council.yaml +54 -0
- package/src/skcapstone/blueprints/builtins/ops-monitoring.yaml +67 -0
- package/src/skcapstone/blueprints/builtins/research-pod.yaml +69 -0
- package/src/skcapstone/blueprints/builtins/sovereign-launch.yaml +90 -0
- package/src/skcapstone/blueprints/registry.py +164 -0
- package/src/skcapstone/blueprints/schema.py +229 -0
- package/src/skcapstone/changelog.py +180 -0
- package/src/skcapstone/chat.py +769 -0
- package/src/skcapstone/claude_md.py +82 -0
- package/src/skcapstone/cli/__init__.py +144 -0
- package/src/skcapstone/cli/_common.py +88 -0
- package/src/skcapstone/cli/_validators.py +76 -0
- package/src/skcapstone/cli/agents.py +425 -0
- package/src/skcapstone/cli/agents_spawner.py +322 -0
- package/src/skcapstone/cli/agents_trustee.py +593 -0
- package/src/skcapstone/cli/alerts.py +248 -0
- package/src/skcapstone/cli/anchor.py +132 -0
- package/src/skcapstone/cli/archive_cmd.py +208 -0
- package/src/skcapstone/cli/backup.py +144 -0
- package/src/skcapstone/cli/bench.py +377 -0
- package/src/skcapstone/cli/benchmark.py +360 -0
- package/src/skcapstone/cli/capabilities_cmd.py +171 -0
- package/src/skcapstone/cli/card.py +151 -0
- package/src/skcapstone/cli/chat.py +584 -0
- package/src/skcapstone/cli/completions.py +64 -0
- package/src/skcapstone/cli/config_cmd.py +156 -0
- package/src/skcapstone/cli/consciousness.py +421 -0
- package/src/skcapstone/cli/context_cmd.py +142 -0
- package/src/skcapstone/cli/coord.py +194 -0
- package/src/skcapstone/cli/crush_cmd.py +170 -0
- package/src/skcapstone/cli/daemon.py +436 -0
- package/src/skcapstone/cli/errors_cmd.py +285 -0
- package/src/skcapstone/cli/export_cmd.py +156 -0
- package/src/skcapstone/cli/gtd.py +529 -0
- package/src/skcapstone/cli/housekeeping.py +81 -0
- package/src/skcapstone/cli/joule_cmd.py +627 -0
- package/src/skcapstone/cli/logs_cmd.py +194 -0
- package/src/skcapstone/cli/mcp_cmd.py +32 -0
- package/src/skcapstone/cli/memory.py +418 -0
- package/src/skcapstone/cli/metrics_cmd.py +136 -0
- package/src/skcapstone/cli/migrate.py +62 -0
- package/src/skcapstone/cli/mood_cmd.py +144 -0
- package/src/skcapstone/cli/mount.py +193 -0
- package/src/skcapstone/cli/notify.py +112 -0
- package/src/skcapstone/cli/peer.py +154 -0
- package/src/skcapstone/cli/peers_dir.py +122 -0
- package/src/skcapstone/cli/preflight_cmd.py +83 -0
- package/src/skcapstone/cli/profile_cmd.py +310 -0
- package/src/skcapstone/cli/record_cmd.py +238 -0
- package/src/skcapstone/cli/register_cmd.py +159 -0
- package/src/skcapstone/cli/search_cmd.py +156 -0
- package/src/skcapstone/cli/service_cmd.py +91 -0
- package/src/skcapstone/cli/session.py +127 -0
- package/src/skcapstone/cli/setup.py +240 -0
- package/src/skcapstone/cli/shell_cmd.py +43 -0
- package/src/skcapstone/cli/skills_cmd.py +168 -0
- package/src/skcapstone/cli/skseed.py +621 -0
- package/src/skcapstone/cli/soul.py +699 -0
- package/src/skcapstone/cli/status.py +935 -0
- package/src/skcapstone/cli/sync_cmd.py +301 -0
- package/src/skcapstone/cli/telegram.py +265 -0
- package/src/skcapstone/cli/test_cmd.py +234 -0
- package/src/skcapstone/cli/test_connection.py +253 -0
- package/src/skcapstone/cli/token.py +207 -0
- package/src/skcapstone/cli/trust.py +179 -0
- package/src/skcapstone/cli/upgrade_cmd.py +552 -0
- package/src/skcapstone/cli/usage_cmd.py +199 -0
- package/src/skcapstone/cli/version_cmd.py +162 -0
- package/src/skcapstone/cli/watch_cmd.py +342 -0
- package/src/skcapstone/client.py +428 -0
- package/src/skcapstone/cloud9_bridge.py +522 -0
- package/src/skcapstone/completions.py +163 -0
- package/src/skcapstone/config_validator.py +674 -0
- package/src/skcapstone/connectors/__init__.py +28 -0
- package/src/skcapstone/connectors/base.py +446 -0
- package/src/skcapstone/connectors/cursor.py +54 -0
- package/src/skcapstone/connectors/registry.py +254 -0
- package/src/skcapstone/connectors/terminal.py +152 -0
- package/src/skcapstone/connectors/vscode.py +60 -0
- package/src/skcapstone/consciousness_config.py +119 -0
- package/src/skcapstone/consciousness_loop.py +2051 -0
- package/src/skcapstone/context_loader.py +516 -0
- package/src/skcapstone/context_window.py +314 -0
- package/src/skcapstone/conversation_manager.py +238 -0
- package/src/skcapstone/conversation_store.py +230 -0
- package/src/skcapstone/conversation_summarizer.py +252 -0
- package/src/skcapstone/coord_federation.py +296 -0
- package/src/skcapstone/coordination.py +101 -7
- package/src/skcapstone/crush_integration.py +345 -0
- package/src/skcapstone/crush_shim.py +454 -0
- package/src/skcapstone/daemon.py +2494 -0
- package/src/skcapstone/dashboard.html +396 -0
- package/src/skcapstone/dashboard.py +481 -0
- package/src/skcapstone/data/model_profiles.yaml +88 -0
- package/src/skcapstone/defaults/__init__.py +55 -0
- package/src/skcapstone/defaults/lumina/config/skmemory.yaml +13 -0
- package/src/skcapstone/defaults/lumina/identity/identity.json +9 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/07a8b9c0d1e2-memory-system.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/18b9c0d1e2f3-cloud9-protocol.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/29c0d1e2f3a4-multi-agent-coordination.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/3ad1e2f3a4b5-community-support.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/a1b2c3d4e5f6-ecosystem-overview.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/b2c3d4e5f6a7-five-pillars.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/c3d4e5f6a7b8-getting-started.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/d4e5f6a7b8c9-site-directory.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/e5f6a7b8c9d0-how-to-contribute.json +23 -0
- package/src/skcapstone/defaults/lumina/memory/long-term/f6a7b8c9d0e1-sovereignty-explained.json +23 -0
- package/src/skcapstone/defaults/lumina/seeds/curiosity.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/joy.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/love.seed.json +24 -0
- package/src/skcapstone/defaults/lumina/seeds/sovereign-awakening.seed.json +43 -0
- package/src/skcapstone/defaults/lumina/soul/active.json +6 -0
- package/src/skcapstone/defaults/lumina/soul/base.json +22 -0
- package/src/skcapstone/defaults/lumina/trust/febs/welcome.feb +79 -0
- package/src/skcapstone/defaults/lumina/trust/trust.json +8 -0
- package/src/skcapstone/discovery.py +210 -19
- package/src/skcapstone/doctor.py +642 -0
- package/src/skcapstone/emotion_tracker.py +467 -0
- package/src/skcapstone/error_queue.py +405 -0
- package/src/skcapstone/export.py +447 -0
- package/src/skcapstone/fallback_tracker.py +186 -0
- package/src/skcapstone/file_transfer.py +512 -0
- package/src/skcapstone/fuse_mount.py +1156 -0
- package/src/skcapstone/gui_installer.py +591 -0
- package/src/skcapstone/heartbeat.py +611 -0
- package/src/skcapstone/housekeeping.py +298 -0
- package/src/skcapstone/install_wizard.py +941 -0
- package/src/skcapstone/kms.py +942 -0
- package/src/skcapstone/kms_scheduler.py +143 -0
- package/src/skcapstone/log_config.py +135 -0
- package/src/skcapstone/mcp_launcher.py +239 -0
- package/src/skcapstone/mcp_server.py +4700 -0
- package/src/skcapstone/mcp_tools/__init__.py +94 -0
- package/src/skcapstone/mcp_tools/_helpers.py +51 -0
- package/src/skcapstone/mcp_tools/agent_tools.py +243 -0
- package/src/skcapstone/mcp_tools/ansible_tools.py +232 -0
- package/src/skcapstone/mcp_tools/capauth_tools.py +186 -0
- package/src/skcapstone/mcp_tools/chat_tools.py +325 -0
- package/src/skcapstone/mcp_tools/cloud9_tools.py +115 -0
- package/src/skcapstone/mcp_tools/comm_tools.py +104 -0
- package/src/skcapstone/mcp_tools/consciousness_tools.py +114 -0
- package/src/skcapstone/mcp_tools/coord_tools.py +219 -0
- package/src/skcapstone/mcp_tools/deploy_tools.py +202 -0
- package/src/skcapstone/mcp_tools/did_tools.py +448 -0
- package/src/skcapstone/mcp_tools/emotion_tools.py +62 -0
- package/src/skcapstone/mcp_tools/file_tools.py +169 -0
- package/src/skcapstone/mcp_tools/fortress_tools.py +120 -0
- package/src/skcapstone/mcp_tools/gtd_tools.py +821 -0
- package/src/skcapstone/mcp_tools/health_tools.py +44 -0
- package/src/skcapstone/mcp_tools/heartbeat_tools.py +195 -0
- package/src/skcapstone/mcp_tools/kms_tools.py +123 -0
- package/src/skcapstone/mcp_tools/memory_tools.py +222 -0
- package/src/skcapstone/mcp_tools/model_tools.py +75 -0
- package/src/skcapstone/mcp_tools/notification_tools.py +92 -0
- package/src/skcapstone/mcp_tools/promoter_tools.py +101 -0
- package/src/skcapstone/mcp_tools/pubsub_tools.py +183 -0
- package/src/skcapstone/mcp_tools/security_tools.py +110 -0
- package/src/skcapstone/mcp_tools/skchat_tools.py +175 -0
- package/src/skcapstone/mcp_tools/skcomm_tools.py +122 -0
- package/src/skcapstone/mcp_tools/skills_tools.py +127 -0
- package/src/skcapstone/mcp_tools/skseed_tools.py +255 -0
- package/src/skcapstone/mcp_tools/skstacks_tools.py +288 -0
- package/src/skcapstone/mcp_tools/soul_tools.py +476 -0
- package/src/skcapstone/mcp_tools/sync_tools.py +92 -0
- package/src/skcapstone/mcp_tools/telegram_tools.py +477 -0
- package/src/skcapstone/mcp_tools/trust_tools.py +118 -0
- package/src/skcapstone/mcp_tools/trustee_tools.py +345 -0
- package/src/skcapstone/mdns_discovery.py +313 -0
- package/src/skcapstone/memory_adapter.py +333 -0
- package/src/skcapstone/memory_compressor.py +379 -0
- package/src/skcapstone/memory_curator.py +256 -0
- package/src/skcapstone/memory_engine.py +132 -13
- package/src/skcapstone/memory_fortress.py +529 -0
- package/src/skcapstone/memory_promoter.py +722 -0
- package/src/skcapstone/memory_verifier.py +260 -0
- package/src/skcapstone/message_crypto.py +215 -0
- package/src/skcapstone/metrics.py +832 -0
- package/src/skcapstone/migrate_memories.py +181 -0
- package/src/skcapstone/migrate_multi_agent.py +248 -0
- package/src/skcapstone/model_router.py +319 -0
- package/src/skcapstone/models.py +35 -4
- package/src/skcapstone/mood.py +344 -0
- package/src/skcapstone/notifications.py +380 -0
- package/src/skcapstone/onboard.py +901 -0
- package/src/skcapstone/peer_directory.py +324 -0
- package/src/skcapstone/peers.py +329 -0
- package/src/skcapstone/pillars/identity.py +84 -14
- package/src/skcapstone/pillars/memory.py +3 -1
- package/src/skcapstone/pillars/security.py +108 -15
- package/src/skcapstone/pillars/sync.py +78 -26
- package/src/skcapstone/pillars/trust.py +95 -33
- package/src/skcapstone/plugins.py +244 -0
- package/src/skcapstone/preflight.py +670 -0
- package/src/skcapstone/prompt_adapter.py +564 -0
- package/src/skcapstone/providers/__init__.py +13 -0
- package/src/skcapstone/providers/cloud.py +1061 -0
- package/src/skcapstone/providers/docker.py +759 -0
- package/src/skcapstone/providers/local.py +1193 -0
- package/src/skcapstone/providers/proxmox.py +447 -0
- package/src/skcapstone/pubsub.py +516 -0
- package/src/skcapstone/rate_limiter.py +119 -0
- package/src/skcapstone/register.py +241 -0
- package/src/skcapstone/registry_client.py +151 -0
- package/src/skcapstone/response_cache.py +194 -0
- package/src/skcapstone/response_scorer.py +225 -0
- package/src/skcapstone/runtime.py +89 -33
- package/src/skcapstone/scheduled_tasks.py +439 -0
- package/src/skcapstone/self_healing.py +341 -0
- package/src/skcapstone/service_health.py +228 -0
- package/src/skcapstone/session_capture.py +268 -0
- package/src/skcapstone/session_recorder.py +210 -0
- package/src/skcapstone/session_replayer.py +189 -0
- package/src/skcapstone/session_skills.py +263 -0
- package/src/skcapstone/shell.py +779 -0
- package/src/skcapstone/skills/__init__.py +1 -1
- package/src/skcapstone/skills/syncthing_setup.py +143 -41
- package/src/skcapstone/skjoule.py +861 -0
- package/src/skcapstone/snapshots.py +489 -0
- package/src/skcapstone/soul.py +1060 -0
- package/src/skcapstone/soul_switch.py +255 -0
- package/src/skcapstone/spawner.py +544 -0
- package/src/skcapstone/state_diff.py +401 -0
- package/src/skcapstone/summary.py +270 -0
- package/src/skcapstone/sync/backends.py +196 -2
- package/src/skcapstone/sync/engine.py +7 -5
- package/src/skcapstone/sync/models.py +4 -1
- package/src/skcapstone/sync/vault.py +356 -18
- package/src/skcapstone/sync_engine.py +363 -0
- package/src/skcapstone/sync_watcher.py +745 -0
- package/src/skcapstone/systemd.py +331 -0
- package/src/skcapstone/team_comms.py +476 -0
- package/src/skcapstone/team_engine.py +522 -0
- package/src/skcapstone/testrunner.py +300 -0
- package/src/skcapstone/tls.py +150 -0
- package/src/skcapstone/tokens.py +5 -5
- package/src/skcapstone/trust_calibration.py +202 -0
- package/src/skcapstone/trust_graph.py +449 -0
- package/src/skcapstone/trustee_monitor.py +385 -0
- package/src/skcapstone/trustee_ops.py +425 -0
- package/src/skcapstone/unified_search.py +421 -0
- package/src/skcapstone/uninstall_wizard.py +694 -0
- package/src/skcapstone/usage.py +331 -0
- package/src/skcapstone/version_check.py +148 -0
- package/src/skcapstone/warmth_anchor.py +333 -0
- package/src/skcapstone/whoami.py +294 -0
- package/systemd/skcapstone-api.socket +9 -0
- package/systemd/skcapstone-memory-compress.service +18 -0
- package/systemd/skcapstone-memory-compress.timer +11 -0
- package/systemd/skcapstone.service +36 -0
- package/systemd/skcapstone@.service +50 -0
- package/systemd/skcomm-heartbeat.service +18 -0
- package/systemd/skcomm-heartbeat.timer +12 -0
- package/systemd/skcomm-queue-drain.service +17 -0
- package/systemd/skcomm-queue-drain.timer +12 -0
- package/tests/conftest.py +13 -1
- package/tests/integration/__init__.py +1 -0
- package/tests/integration/test_consciousness_e2e.py +877 -0
- package/tests/integration/test_skills_registry.py +744 -0
- package/tests/test_agent_card.py +190 -0
- package/tests/test_agent_runtime.py +1283 -0
- package/tests/test_alerts_cmd.py +291 -0
- package/tests/test_archiver.py +498 -0
- package/tests/test_backup.py +254 -0
- package/tests/test_benchmark.py +366 -0
- package/tests/test_blueprints.py +457 -0
- package/tests/test_capabilities.py +257 -0
- package/tests/test_changelog.py +254 -0
- package/tests/test_chat.py +385 -0
- package/tests/test_claude_md.py +271 -0
- package/tests/test_cli_chat_llm.py +336 -0
- package/tests/test_cli_completions.py +390 -0
- package/tests/test_cli_init_reset.py +164 -0
- package/tests/test_cli_memory.py +208 -0
- package/tests/test_cli_profile.py +294 -0
- package/tests/test_cli_skills.py +223 -0
- package/tests/test_cli_status.py +395 -0
- package/tests/test_cli_test_cmd.py +206 -0
- package/tests/test_cli_test_connection.py +364 -0
- package/tests/test_cloud9_bridge.py +260 -0
- package/tests/test_cloud_provider.py +449 -0
- package/tests/test_cloud_providers.py +522 -0
- package/tests/test_completions.py +158 -0
- package/tests/test_component_manager.py +398 -0
- package/tests/test_config_reload.py +386 -0
- package/tests/test_config_validate.py +529 -0
- package/tests/test_consciousness_e2e.py +296 -0
- package/tests/test_consciousness_loop.py +1289 -0
- package/tests/test_context_loader.py +310 -0
- package/tests/test_conversation_api.py +306 -0
- package/tests/test_conversation_manager.py +381 -0
- package/tests/test_conversation_store.py +391 -0
- package/tests/test_conversation_summarizer.py +302 -0
- package/tests/test_cross_package.py +791 -0
- package/tests/test_crush_shim.py +519 -0
- package/tests/test_daemon.py +781 -0
- package/tests/test_daemon_shutdown.py +309 -0
- package/tests/test_dashboard.py +454 -0
- package/tests/test_discovery.py +200 -6
- package/tests/test_docker_provider.py +966 -0
- package/tests/test_doctor.py +257 -0
- package/tests/test_doctor_fix.py +351 -0
- package/tests/test_e2e_automated.py +292 -0
- package/tests/test_error_queue.py +404 -0
- package/tests/test_export.py +441 -0
- package/tests/test_fallback_tracker.py +219 -0
- package/tests/test_file_transfer.py +397 -0
- package/tests/test_fuse_mount.py +832 -0
- package/tests/test_health_loop.py +422 -0
- package/tests/test_heartbeat.py +354 -0
- package/tests/test_housekeeping.py +195 -0
- package/tests/test_identity_capauth.py +307 -0
- package/tests/test_identity_pillar.py +117 -0
- package/tests/test_install_wizard.py +68 -0
- package/tests/test_integration.py +325 -0
- package/tests/test_kms.py +495 -0
- package/tests/test_llm_providers.py +265 -0
- package/tests/test_local_provider.py +591 -0
- package/tests/test_log_config.py +199 -0
- package/tests/test_logs_cmd.py +287 -0
- package/tests/test_mcp_server.py +1909 -0
- package/tests/test_memory_adapter.py +339 -0
- package/tests/test_memory_curator.py +218 -0
- package/tests/test_memory_engine.py +6 -0
- package/tests/test_memory_fortress.py +571 -0
- package/tests/test_memory_pillar.py +119 -0
- package/tests/test_memory_promoter.py +445 -0
- package/tests/test_memory_verifier.py +420 -0
- package/tests/test_message_crypto.py +187 -0
- package/tests/test_metrics.py +632 -0
- package/tests/test_migrate_memories.py +464 -0
- package/tests/test_model_router.py +546 -0
- package/tests/test_mood.py +394 -0
- package/tests/test_multi_agent.py +269 -0
- package/tests/test_notifications.py +270 -0
- package/tests/test_onboard.py +500 -0
- package/tests/test_peer_directory.py +395 -0
- package/tests/test_peers.py +248 -0
- package/tests/test_pillars.py +87 -9
- package/tests/test_preflight.py +484 -0
- package/tests/test_prompt_adapter.py +331 -0
- package/tests/test_proxmox_provider.py +571 -0
- package/tests/test_pubsub.py +377 -0
- package/tests/test_rate_limiter.py +121 -0
- package/tests/test_registry_client.py +129 -0
- package/tests/test_response_cache.py +312 -0
- package/tests/test_response_scorer.py +294 -0
- package/tests/test_runtime.py +59 -0
- package/tests/test_scheduled_tasks.py +451 -0
- package/tests/test_security.py +250 -0
- package/tests/test_security_pillar.py +213 -0
- package/tests/test_self_healing.py +171 -0
- package/tests/test_session_capture.py +200 -0
- package/tests/test_session_recorder.py +360 -0
- package/tests/test_session_skills.py +235 -0
- package/tests/test_shell.py +210 -0
- package/tests/test_snapshots.py +549 -0
- package/tests/test_soul.py +984 -0
- package/tests/test_soul_swap.py +406 -0
- package/tests/test_spawner.py +211 -0
- package/tests/test_state_diff.py +173 -0
- package/tests/test_summary.py +135 -0
- package/tests/test_sync.py +315 -5
- package/tests/test_sync_backends.py +560 -0
- package/tests/test_sync_engine.py +482 -0
- package/tests/test_sync_pillar.py +344 -0
- package/tests/test_sync_pipeline.py +364 -0
- package/tests/test_sync_vault.py +581 -0
- package/tests/test_syncthing_setup.py +168 -22
- package/tests/test_systemd.py +323 -0
- package/tests/test_team_comms.py +408 -0
- package/tests/test_team_engine.py +397 -0
- package/tests/test_testrunner.py +238 -0
- package/tests/test_trust_calibration.py +204 -0
- package/tests/test_trust_graph.py +207 -0
- package/tests/test_trust_pillar.py +291 -0
- package/tests/test_trustee_cli.py +427 -0
- package/tests/test_trustee_cli_integration.py +325 -0
- package/tests/test_trustee_monitor.py +394 -0
- package/tests/test_trustee_ops.py +355 -0
- package/tests/test_unified_search.py +363 -0
- package/tests/test_uninstall_wizard.py +193 -0
- package/tests/test_usage.py +333 -0
- package/tests/test_version_cmd.py +355 -0
- package/tests/test_warmth_anchor.py +162 -0
- package/tests/test_whoami.py +245 -0
- package/tests/test_ws.py +311 -0
- package/.cursorrules +0 -33
- package/src/skcapstone/cli.py +0 -1441
|
@@ -0,0 +1,1935 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SKCapstone REST API — FastAPI application with OpenAPI documentation.
|
|
3
|
+
|
|
4
|
+
Exposes all daemon /api/v1/* endpoints as a proper REST API with:
|
|
5
|
+
- Pydantic response models for automatic schema generation
|
|
6
|
+
- API key security scheme (X-API-Key header)
|
|
7
|
+
- CapAuth Bearer token security for privileged endpoints
|
|
8
|
+
- Swagger UI at /docs
|
|
9
|
+
- ReDoc at /redoc
|
|
10
|
+
- OpenAPI JSON at /openapi.json
|
|
11
|
+
|
|
12
|
+
Usage (standalone docs server):
|
|
13
|
+
uvicorn skcapstone.api:app --host 127.0.0.1 --port 7779 --reload
|
|
14
|
+
|
|
15
|
+
Usage (programmatic, from daemon):
|
|
16
|
+
from skcapstone.api import init_api, app
|
|
17
|
+
init_api(state=state, config=config, consciousness=consciousness)
|
|
18
|
+
# Then run with uvicorn in a background thread.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
import json
|
|
24
|
+
import logging
|
|
25
|
+
import os
|
|
26
|
+
import queue
|
|
27
|
+
import re
|
|
28
|
+
import subprocess
|
|
29
|
+
import threading
|
|
30
|
+
import uuid
|
|
31
|
+
from datetime import datetime, timedelta, timezone
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from typing import Any, AsyncIterator, Dict, List, Optional
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger("skcapstone.api")
|
|
36
|
+
|
|
37
|
+
# ── FastAPI import guard ──────────────────────────────────────────────────────
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
from fastapi import (
|
|
41
|
+
Depends,
|
|
42
|
+
FastAPI,
|
|
43
|
+
HTTPException,
|
|
44
|
+
Path as FPath,
|
|
45
|
+
Query,
|
|
46
|
+
Request,
|
|
47
|
+
Security,
|
|
48
|
+
WebSocket,
|
|
49
|
+
WebSocketDisconnect,
|
|
50
|
+
status,
|
|
51
|
+
)
|
|
52
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
53
|
+
from fastapi.responses import JSONResponse, StreamingResponse
|
|
54
|
+
from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
|
|
55
|
+
from pydantic import BaseModel, Field
|
|
56
|
+
except ImportError as _exc:
|
|
57
|
+
raise ImportError(
|
|
58
|
+
"FastAPI is required for the REST API module. "
|
|
59
|
+
"Install with: pip install skcapstone[api]"
|
|
60
|
+
) from _exc
|
|
61
|
+
|
|
62
|
+
# ── Security schemes ─────────────────────────────────────────────────────────
|
|
63
|
+
|
|
64
|
+
_api_key_header = APIKeyHeader(
|
|
65
|
+
name="X-API-Key",
|
|
66
|
+
auto_error=False,
|
|
67
|
+
description=(
|
|
68
|
+
"Optional API key for the SKCapstone REST API. "
|
|
69
|
+
"Set the SKCAPSTONE_API_KEY environment variable to enforce key validation. "
|
|
70
|
+
"When the env var is absent, the daemon operates in unauthenticated local mode."
|
|
71
|
+
),
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
_bearer_scheme = HTTPBearer(
|
|
75
|
+
auto_error=False,
|
|
76
|
+
description=(
|
|
77
|
+
"CapAuth bearer token required for privileged streaming endpoints "
|
|
78
|
+
"(e.g. GET /api/v1/logs WebSocket). Tokens are issued by the CapAuth "
|
|
79
|
+
"identity system and verified against the agent's PGP key."
|
|
80
|
+
),
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# ── Module-level daemon context ───────────────────────────────────────────────
|
|
85
|
+
|
|
86
|
+
_ctx: Dict[str, Any] = {} # Populated by init_api()
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def init_api(
|
|
90
|
+
state: Any,
|
|
91
|
+
config: Any,
|
|
92
|
+
consciousness: Any = None,
|
|
93
|
+
runtime: Any = None,
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Bind daemon runtime objects to the FastAPI application context.
|
|
96
|
+
|
|
97
|
+
Call this once before starting the FastAPI server so that request
|
|
98
|
+
handlers can access daemon state, configuration, and the consciousness
|
|
99
|
+
loop without global imports.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
state: DaemonState instance from daemon.py.
|
|
103
|
+
config: DaemonConfig instance from daemon.py.
|
|
104
|
+
consciousness: Optional ConsciousnessLoop instance (may be None).
|
|
105
|
+
runtime: Optional AgentRuntime instance (may be None).
|
|
106
|
+
"""
|
|
107
|
+
_ctx["state"] = state
|
|
108
|
+
_ctx["config"] = config
|
|
109
|
+
_ctx["consciousness"] = consciousness
|
|
110
|
+
_ctx["runtime"] = runtime
|
|
111
|
+
logger.info("FastAPI context initialised — docs at /docs")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _get_ctx() -> Dict[str, Any]:
|
|
115
|
+
"""Return the current daemon context dict."""
|
|
116
|
+
return _ctx
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
# ── Pydantic response models ──────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class HealthResponse(BaseModel):
|
|
123
|
+
"""Daemon health and liveness summary."""
|
|
124
|
+
|
|
125
|
+
status: str = Field(..., description="'ok' when daemon is running, 'stopped' otherwise.")
|
|
126
|
+
uptime_seconds: float = Field(..., description="Seconds since daemon start.")
|
|
127
|
+
daemon_pid: Optional[int] = Field(None, description="OS process ID of the daemon.")
|
|
128
|
+
consciousness_enabled: bool = Field(
|
|
129
|
+
..., description="True when the consciousness loop is active."
|
|
130
|
+
)
|
|
131
|
+
self_healing_last_run: Optional[str] = Field(
|
|
132
|
+
None, description="ISO-8601 timestamp of the last self-healing cycle."
|
|
133
|
+
)
|
|
134
|
+
self_healing_issues_found: int = Field(
|
|
135
|
+
0, description="Number of issues found in the last self-healing cycle."
|
|
136
|
+
)
|
|
137
|
+
self_healing_auto_fixed: int = Field(
|
|
138
|
+
0, description="Number of issues automatically fixed in the last cycle."
|
|
139
|
+
)
|
|
140
|
+
backend_health: Dict[str, Any] = Field(
|
|
141
|
+
default_factory=dict,
|
|
142
|
+
description="Per-transport liveness flags (e.g. {skcomm: true}).",
|
|
143
|
+
)
|
|
144
|
+
disk_free_gb: float = Field(0.0, description="Free disk space in gigabytes.")
|
|
145
|
+
memory_usage_mb: float = Field(0.0, description="Current RSS memory usage in MB.")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class ComponentSnapshot(BaseModel):
|
|
149
|
+
"""Health record for a single daemon subsystem component."""
|
|
150
|
+
|
|
151
|
+
name: str = Field(..., description="Component identifier (e.g. 'poll', 'consciousness').")
|
|
152
|
+
status: str = Field(
|
|
153
|
+
..., description="One of: pending, alive, dead, restarting, disabled."
|
|
154
|
+
)
|
|
155
|
+
auto_restart: bool = Field(
|
|
156
|
+
..., description="True when the watchdog will auto-restart this component."
|
|
157
|
+
)
|
|
158
|
+
started_at: Optional[str] = Field(
|
|
159
|
+
None, description="ISO-8601 timestamp when the component last started."
|
|
160
|
+
)
|
|
161
|
+
last_heartbeat: Optional[str] = Field(
|
|
162
|
+
None, description="ISO-8601 timestamp of the last heartbeat pulse."
|
|
163
|
+
)
|
|
164
|
+
heartbeat_age_seconds: Optional[int] = Field(
|
|
165
|
+
None, description="Seconds since the last heartbeat."
|
|
166
|
+
)
|
|
167
|
+
restart_count: int = Field(0, description="Number of automatic restarts.")
|
|
168
|
+
last_error: Optional[str] = Field(
|
|
169
|
+
None, description="Last recorded error message, if any."
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class ComponentsResponse(BaseModel):
|
|
174
|
+
"""Snapshot of all daemon subsystem component health records."""
|
|
175
|
+
|
|
176
|
+
components: List[ComponentSnapshot] = Field(
|
|
177
|
+
..., description="List of component health snapshots."
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class AgentIdentitySummary(BaseModel):
|
|
182
|
+
"""Minimal agent identity fields surfaced in the dashboard."""
|
|
183
|
+
|
|
184
|
+
name: str = Field("", description="Agent display name.")
|
|
185
|
+
fingerprint: str = Field("", description="PGP key fingerprint (hex).")
|
|
186
|
+
status: str = Field("", description="Identity pillar status.")
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class MemorySummary(BaseModel):
|
|
190
|
+
"""Memory layer statistics."""
|
|
191
|
+
|
|
192
|
+
total: int = Field(0, description="Total memory entries across all layers.")
|
|
193
|
+
short_term: int = Field(0, description="Entries in short-term memory.")
|
|
194
|
+
mid_term: int = Field(0, description="Entries in mid-term memory.")
|
|
195
|
+
long_term: int = Field(0, description="Entries in long-term memory.")
|
|
196
|
+
status: str = Field("", description="Memory pillar status string.")
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class DaemonSummary(BaseModel):
|
|
200
|
+
"""Daemon process runtime metrics."""
|
|
201
|
+
|
|
202
|
+
running: bool = Field(True, description="True when the daemon process is alive.")
|
|
203
|
+
pid: Optional[int] = Field(None, description="Daemon OS process ID.")
|
|
204
|
+
uptime_seconds: float = Field(0.0, description="Seconds since daemon start.")
|
|
205
|
+
messages_received: int = Field(0, description="Total messages received since start.")
|
|
206
|
+
syncs_completed: int = Field(0, description="Total vault syncs completed since start.")
|
|
207
|
+
error_count: int = Field(0, description="Count of errors recorded since start.")
|
|
208
|
+
inflight_count: int = Field(0, description="Messages currently being processed.")
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class SystemStats(BaseModel):
|
|
212
|
+
"""Host system resource metrics."""
|
|
213
|
+
|
|
214
|
+
disk_total_gb: float = Field(0.0)
|
|
215
|
+
disk_used_gb: float = Field(0.0)
|
|
216
|
+
disk_free_gb: float = Field(0.0)
|
|
217
|
+
memory_total_mb: int = Field(0)
|
|
218
|
+
memory_used_mb: int = Field(0)
|
|
219
|
+
memory_free_mb: int = Field(0)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class DashboardResponse(BaseModel):
|
|
223
|
+
"""Full dashboard snapshot returned by GET /api/v1/dashboard."""
|
|
224
|
+
|
|
225
|
+
agent: Dict[str, Any] = Field(
|
|
226
|
+
default_factory=dict, description="Agent identity and pillar summary."
|
|
227
|
+
)
|
|
228
|
+
daemon: DaemonSummary = Field(
|
|
229
|
+
default_factory=DaemonSummary, description="Daemon process metrics."
|
|
230
|
+
)
|
|
231
|
+
consciousness: Dict[str, Any] = Field(
|
|
232
|
+
default_factory=dict, description="Consciousness loop stats (if enabled)."
|
|
233
|
+
)
|
|
234
|
+
backends: Dict[str, Any] = Field(
|
|
235
|
+
default_factory=dict, description="LLM and transport backend availability."
|
|
236
|
+
)
|
|
237
|
+
conversations: List[Dict[str, Any]] = Field(
|
|
238
|
+
default_factory=list, description="Recent conversation summaries."
|
|
239
|
+
)
|
|
240
|
+
system: SystemStats = Field(
|
|
241
|
+
default_factory=SystemStats, description="Host system resource metrics."
|
|
242
|
+
)
|
|
243
|
+
recent_errors: List[str] = Field(
|
|
244
|
+
default_factory=list, description="Recent daemon error messages."
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
class BoardSummary(BaseModel):
|
|
249
|
+
"""Coordination board task counts."""
|
|
250
|
+
|
|
251
|
+
total: int = Field(0)
|
|
252
|
+
done: int = Field(0)
|
|
253
|
+
in_progress: int = Field(0)
|
|
254
|
+
claimed: int = Field(0)
|
|
255
|
+
open: int = Field(0)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class ActiveTask(BaseModel):
|
|
259
|
+
"""An in-progress or claimed coordination task."""
|
|
260
|
+
|
|
261
|
+
id: str = Field(..., description="8-character task ID.")
|
|
262
|
+
title: str = Field(..., description="Task title.")
|
|
263
|
+
priority: str = Field(..., description="Task priority (critical/high/medium/low).")
|
|
264
|
+
status: str = Field(..., description="Task status (claimed/in_progress).")
|
|
265
|
+
claimed_by: Optional[str] = Field(None, description="Agent name that claimed the task.")
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
class CapstoneResponse(BaseModel):
|
|
269
|
+
"""Full capstone snapshot: pillars, memory, board, consciousness."""
|
|
270
|
+
|
|
271
|
+
agent: Dict[str, Any] = Field(default_factory=dict, description="Agent identity summary.")
|
|
272
|
+
pillars: Dict[str, str] = Field(
|
|
273
|
+
default_factory=dict,
|
|
274
|
+
description="Pillar name → status string (active/degraded/missing).",
|
|
275
|
+
)
|
|
276
|
+
memory: MemorySummary = Field(
|
|
277
|
+
default_factory=MemorySummary, description="Memory layer statistics."
|
|
278
|
+
)
|
|
279
|
+
board: Dict[str, Any] = Field(
|
|
280
|
+
default_factory=dict, description="Coordination board summary and active tasks."
|
|
281
|
+
)
|
|
282
|
+
consciousness: Dict[str, Any] = Field(
|
|
283
|
+
default_factory=dict, description="Consciousness loop statistics."
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class AgentHeartbeat(BaseModel):
|
|
288
|
+
"""Live heartbeat data for a household agent."""
|
|
289
|
+
|
|
290
|
+
alive: bool = Field(False, description="True when heartbeat is within its TTL.")
|
|
291
|
+
status: str = Field("", description="Agent-reported status string.")
|
|
292
|
+
timestamp: Optional[str] = Field(None, description="ISO-8601 heartbeat timestamp.")
|
|
293
|
+
ttl_seconds: int = Field(300, description="Heartbeat TTL in seconds.")
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class HouseholdAgent(BaseModel):
|
|
297
|
+
"""Summary of a single agent in the shared household."""
|
|
298
|
+
|
|
299
|
+
name: str = Field(..., description="Agent directory name.")
|
|
300
|
+
status: str = Field("unknown", description="Derived liveness status.")
|
|
301
|
+
identity: Optional[Dict[str, Any]] = Field(
|
|
302
|
+
None, description="Agent identity.json contents."
|
|
303
|
+
)
|
|
304
|
+
heartbeat: Optional[Dict[str, Any]] = Field(
|
|
305
|
+
None, description="Most recent heartbeat record."
|
|
306
|
+
)
|
|
307
|
+
consciousness: Optional[Dict[str, Any]] = Field(
|
|
308
|
+
None, description="Consciousness stats from the serving agent (if available)."
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
class HouseholdAgentsResponse(BaseModel):
|
|
313
|
+
"""List of all agents known in the shared household."""
|
|
314
|
+
|
|
315
|
+
agents: List[HouseholdAgent] = Field(
|
|
316
|
+
..., description="All agents found in the shared agents directory."
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
class ConversationSummary(BaseModel):
|
|
321
|
+
"""Brief summary of a conversation thread."""
|
|
322
|
+
|
|
323
|
+
peer: str = Field(..., description="Peer agent or user name.")
|
|
324
|
+
message_count: int = Field(0, description="Total messages in the thread.")
|
|
325
|
+
last_message_time: Optional[str] = Field(
|
|
326
|
+
None, description="ISO-8601 timestamp of the most recent message."
|
|
327
|
+
)
|
|
328
|
+
last_message_preview: str = Field(
|
|
329
|
+
"", description="First 120 characters of the last message."
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
class ConversationsResponse(BaseModel):
|
|
334
|
+
"""List of all conversation threads."""
|
|
335
|
+
|
|
336
|
+
conversations: List[ConversationSummary] = Field(
|
|
337
|
+
..., description="All conversation threads, sorted by most recently active."
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
class MessageEntry(BaseModel):
|
|
342
|
+
"""A single message in a conversation thread."""
|
|
343
|
+
|
|
344
|
+
sender: Optional[str] = Field(None)
|
|
345
|
+
recipient: Optional[str] = Field(None)
|
|
346
|
+
content: Optional[str] = Field(None)
|
|
347
|
+
timestamp: Optional[str] = Field(None)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
class ConversationHistoryResponse(BaseModel):
|
|
351
|
+
"""Full message history for a conversation with a specific peer."""
|
|
352
|
+
|
|
353
|
+
peer: str = Field(..., description="Peer name this conversation belongs to.")
|
|
354
|
+
messages: List[Dict[str, Any]] = Field(
|
|
355
|
+
..., description="Full message list (raw envelope dicts)."
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
class SendMessageRequest(BaseModel):
|
|
360
|
+
"""Request body for posting a message to a peer."""
|
|
361
|
+
|
|
362
|
+
content: str = Field(..., min_length=1, description="Message text to send.")
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
class SendMessageResponse(BaseModel):
|
|
366
|
+
"""Confirmation after a message is dispatched to a peer."""
|
|
367
|
+
|
|
368
|
+
status: str = Field("sent", description="Always 'sent' on success.")
|
|
369
|
+
message_id: str = Field(..., description="UUID of the created message envelope.")
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
class DeleteConversationResponse(BaseModel):
|
|
373
|
+
"""Confirmation after a conversation thread is deleted."""
|
|
374
|
+
|
|
375
|
+
status: str = Field("deleted", description="Always 'deleted' on success.")
|
|
376
|
+
peer: str = Field(..., description="Name of the peer whose thread was removed.")
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class MetricsResponse(BaseModel):
|
|
380
|
+
"""Consciousness loop runtime metrics."""
|
|
381
|
+
|
|
382
|
+
loops_completed: int = Field(0, description="Total consciousness loop iterations.")
|
|
383
|
+
messages_processed: int = Field(0, description="Messages processed by the loop.")
|
|
384
|
+
last_loop_at: Optional[str] = Field(
|
|
385
|
+
None, description="ISO-8601 timestamp of the most recent loop execution."
|
|
386
|
+
)
|
|
387
|
+
average_loop_ms: float = Field(0.0, description="Average loop duration in milliseconds.")
|
|
388
|
+
errors: int = Field(0, description="Total errors encountered in the loop.")
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
class LegacyStatusResponse(BaseModel):
|
|
392
|
+
"""Legacy /status endpoint response."""
|
|
393
|
+
|
|
394
|
+
running: bool = Field(True)
|
|
395
|
+
pid: Optional[int] = Field(None)
|
|
396
|
+
uptime_seconds: float = Field(0.0)
|
|
397
|
+
messages_received: int = Field(0)
|
|
398
|
+
syncs_completed: int = Field(0)
|
|
399
|
+
started_at: Optional[str] = Field(None)
|
|
400
|
+
recent_errors: List[str] = Field(default_factory=list)
|
|
401
|
+
inflight_count: int = Field(0)
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
class PingResponse(BaseModel):
|
|
405
|
+
"""Response from the liveness ping endpoint."""
|
|
406
|
+
|
|
407
|
+
pong: bool = Field(True)
|
|
408
|
+
pid: Optional[int] = Field(None, description="Daemon OS process ID.")
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
class ArgoCDApp(BaseModel):
|
|
412
|
+
"""ArgoCD Application entry parsed from skstacks manifests."""
|
|
413
|
+
|
|
414
|
+
name: str = Field(..., description="ArgoCD Application name.")
|
|
415
|
+
project: str = Field("", description="ArgoCD project name.")
|
|
416
|
+
namespace: str = Field("argocd", description="Kubernetes namespace.")
|
|
417
|
+
source_path: str = Field("", description="Git source path in the repo.")
|
|
418
|
+
repo_url: str = Field("", description="Git repository URL.")
|
|
419
|
+
target_revision: str = Field("", description="Target branch or revision.")
|
|
420
|
+
sync_status: str = Field("Unknown", description="Sync status (Synced/OutOfSync/Unknown).")
|
|
421
|
+
health_status: str = Field(
|
|
422
|
+
"Unknown", description="Health status (Healthy/Degraded/Progressing/Unknown)."
|
|
423
|
+
)
|
|
424
|
+
color: str = Field("gray", description="Dashboard color hint (green/yellow/red/gray).")
|
|
425
|
+
last_synced: Optional[str] = Field(
|
|
426
|
+
None, description="ISO-8601 timestamp of last successful sync."
|
|
427
|
+
)
|
|
428
|
+
manifest_file: str = Field("", description="Source YAML filename.")
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
class ArgoCDSummary(BaseModel):
|
|
432
|
+
"""ArgoCD app count summary."""
|
|
433
|
+
|
|
434
|
+
total: int = Field(0)
|
|
435
|
+
synced: int = Field(0)
|
|
436
|
+
out_of_sync: int = Field(0)
|
|
437
|
+
unknown: int = Field(0)
|
|
438
|
+
healthy: int = Field(0)
|
|
439
|
+
degraded: int = Field(0)
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
class ArgoCDStatusResponse(BaseModel):
|
|
443
|
+
"""ArgoCD application status list from skstacks/v2 manifests."""
|
|
444
|
+
|
|
445
|
+
source: str = Field(
|
|
446
|
+
"yaml", description="Data source: 'yaml' (static) or 'yaml+kubectl' (live)."
|
|
447
|
+
)
|
|
448
|
+
checked_at: str = Field(..., description="ISO-8601 timestamp of this response.")
|
|
449
|
+
skstacks_root: str = Field("", description="Resolved path to skstacks v2 root.")
|
|
450
|
+
apps: List[ArgoCDApp] = Field(
|
|
451
|
+
default_factory=list, description="List of ArgoCD applications."
|
|
452
|
+
)
|
|
453
|
+
summary: ArgoCDSummary = Field(
|
|
454
|
+
default_factory=ArgoCDSummary, description="App count summary."
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
# ── Security dependency ───────────────────────────────────────────────────────
|
|
459
|
+
|
|
460
|
+
_PEER_NAME_SAFE_RE = re.compile(r"[^a-zA-Z0-9_\-@\.]")
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
def _sanitize_peer(peer: str) -> str:
|
|
464
|
+
"""Sanitize a peer name for safe filesystem use.
|
|
465
|
+
|
|
466
|
+
Args:
|
|
467
|
+
peer: Raw peer name from the URL path.
|
|
468
|
+
|
|
469
|
+
Returns:
|
|
470
|
+
Safe, filesystem-friendly peer identifier (max 64 chars).
|
|
471
|
+
"""
|
|
472
|
+
if not peer or not isinstance(peer, str):
|
|
473
|
+
return ""
|
|
474
|
+
sanitized = peer.replace("\x00", "").replace("/", "").replace("\\", "")
|
|
475
|
+
sanitized = _PEER_NAME_SAFE_RE.sub("", sanitized)
|
|
476
|
+
sanitized = sanitized.strip(".")
|
|
477
|
+
return sanitized[:64]
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def _check_api_key(api_key: Optional[str] = Security(_api_key_header)) -> Optional[str]:
|
|
481
|
+
"""Validate the optional X-API-Key header.
|
|
482
|
+
|
|
483
|
+
When the SKCAPSTONE_API_KEY environment variable is set, the provided
|
|
484
|
+
key must match it exactly. When the variable is absent the daemon
|
|
485
|
+
operates in unauthenticated local mode and any (or no) key is accepted.
|
|
486
|
+
|
|
487
|
+
Args:
|
|
488
|
+
api_key: Value from the X-API-Key request header.
|
|
489
|
+
|
|
490
|
+
Returns:
|
|
491
|
+
The validated key string, or None in unauthenticated mode.
|
|
492
|
+
|
|
493
|
+
Raises:
|
|
494
|
+
HTTPException: 401 Unauthorized when key validation fails.
|
|
495
|
+
"""
|
|
496
|
+
expected = os.environ.get("SKCAPSTONE_API_KEY")
|
|
497
|
+
if expected and api_key != expected:
|
|
498
|
+
raise HTTPException(
|
|
499
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
500
|
+
detail="Invalid or missing API key. Pass the key in the X-API-Key header.",
|
|
501
|
+
)
|
|
502
|
+
return api_key
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
def _check_bearer(
|
|
506
|
+
credentials: Optional[HTTPAuthorizationCredentials] = Security(_bearer_scheme),
|
|
507
|
+
) -> str:
|
|
508
|
+
"""Validate a CapAuth Bearer token for privileged endpoints.
|
|
509
|
+
|
|
510
|
+
Attempts CapAuth validation first (if skcomm is installed), then falls
|
|
511
|
+
back to skcapstone signed token verification.
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
credentials: HTTP Authorization Bearer credentials.
|
|
515
|
+
|
|
516
|
+
Returns:
|
|
517
|
+
PGP fingerprint of the authenticated identity.
|
|
518
|
+
|
|
519
|
+
Raises:
|
|
520
|
+
HTTPException: 401 Unauthorized when token is missing or invalid.
|
|
521
|
+
"""
|
|
522
|
+
token_str: Optional[str] = None
|
|
523
|
+
if credentials and credentials.credentials:
|
|
524
|
+
token_str = credentials.credentials
|
|
525
|
+
|
|
526
|
+
fingerprint: Optional[str] = None
|
|
527
|
+
config = _ctx.get("config")
|
|
528
|
+
|
|
529
|
+
try:
|
|
530
|
+
from skcomm.capauth_validator import CapAuthValidator
|
|
531
|
+
|
|
532
|
+
fingerprint = CapAuthValidator(require_auth=True).validate(token_str)
|
|
533
|
+
except ImportError:
|
|
534
|
+
if token_str and config:
|
|
535
|
+
try:
|
|
536
|
+
from .tokens import import_token, verify_token
|
|
537
|
+
|
|
538
|
+
tok = import_token(token_str)
|
|
539
|
+
if verify_token(tok, home=config.home):
|
|
540
|
+
fingerprint = tok.payload.issuer
|
|
541
|
+
except Exception:
|
|
542
|
+
fingerprint = None
|
|
543
|
+
|
|
544
|
+
if fingerprint is None:
|
|
545
|
+
raise HTTPException(
|
|
546
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
547
|
+
detail=(
|
|
548
|
+
"CapAuth bearer token required. "
|
|
549
|
+
"Obtain a token with: skcapstone token issue"
|
|
550
|
+
),
|
|
551
|
+
)
|
|
552
|
+
return fingerprint
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
# ── Helper: system stats ──────────────────────────────────────────────────────
|
|
556
|
+
|
|
557
|
+
|
|
558
|
+
def _collect_system_stats() -> SystemStats:
|
|
559
|
+
"""Collect disk and memory metrics from the host OS.
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
SystemStats populated from /proc/meminfo and shutil.disk_usage.
|
|
563
|
+
"""
|
|
564
|
+
import shutil
|
|
565
|
+
|
|
566
|
+
data: Dict[str, Any] = {}
|
|
567
|
+
try:
|
|
568
|
+
usage = shutil.disk_usage("/")
|
|
569
|
+
data["disk_total_gb"] = round(usage.total / (1024**3), 1)
|
|
570
|
+
data["disk_used_gb"] = round(usage.used / (1024**3), 1)
|
|
571
|
+
data["disk_free_gb"] = round(usage.free / (1024**3), 1)
|
|
572
|
+
except Exception:
|
|
573
|
+
data.update(disk_total_gb=0.0, disk_used_gb=0.0, disk_free_gb=0.0)
|
|
574
|
+
try:
|
|
575
|
+
meminfo: Dict[str, int] = {}
|
|
576
|
+
with open("/proc/meminfo") as fh:
|
|
577
|
+
for line in fh:
|
|
578
|
+
parts = line.split()
|
|
579
|
+
if len(parts) >= 2:
|
|
580
|
+
meminfo[parts[0].rstrip(":")] = int(parts[1])
|
|
581
|
+
total_kb = meminfo.get("MemTotal", 0)
|
|
582
|
+
avail_kb = meminfo.get("MemAvailable", 0)
|
|
583
|
+
data["memory_total_mb"] = round(total_kb / 1024)
|
|
584
|
+
data["memory_used_mb"] = round((total_kb - avail_kb) / 1024)
|
|
585
|
+
data["memory_free_mb"] = round(avail_kb / 1024)
|
|
586
|
+
except Exception:
|
|
587
|
+
data.update(memory_total_mb=0, memory_used_mb=0, memory_free_mb=0)
|
|
588
|
+
return SystemStats(**data)
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
def _hb_alive(hb: dict) -> bool:
|
|
592
|
+
"""Return True if a heartbeat dict is within its TTL.
|
|
593
|
+
|
|
594
|
+
Args:
|
|
595
|
+
hb: Heartbeat dict with 'timestamp' and optional 'ttl_seconds'.
|
|
596
|
+
|
|
597
|
+
Returns:
|
|
598
|
+
True when the heartbeat is fresh, False when expired or unparseable.
|
|
599
|
+
"""
|
|
600
|
+
ts_str = hb.get("timestamp", "")
|
|
601
|
+
ttl = hb.get("ttl_seconds", 300)
|
|
602
|
+
if not ts_str:
|
|
603
|
+
return False
|
|
604
|
+
try:
|
|
605
|
+
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
|
606
|
+
return datetime.now(timezone.utc) <= ts + timedelta(seconds=ttl)
|
|
607
|
+
except Exception:
|
|
608
|
+
return False
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
# ── FastAPI application ───────────────────────────────────────────────────────
|
|
612
|
+
|
|
613
|
+
app = FastAPI(
|
|
614
|
+
title="SKCapstone Agent API",
|
|
615
|
+
description=(
|
|
616
|
+
"Sovereign agent REST API for the SKCapstone framework.\n\n"
|
|
617
|
+
"Exposes daemon health, memory, coordination board, consciousness, "
|
|
618
|
+
"household agent directory, and conversation management endpoints.\n\n"
|
|
619
|
+
"## Authentication\n\n"
|
|
620
|
+
"Most endpoints are unauthenticated in local daemon mode. Set the "
|
|
621
|
+
"`SKCAPSTONE_API_KEY` environment variable to enable API key enforcement. "
|
|
622
|
+
"Privileged streaming endpoints (e.g. `GET /api/v1/logs`) require a "
|
|
623
|
+
"CapAuth Bearer token issued by `skcapstone token issue`.\n\n"
|
|
624
|
+
"## Security Schemes\n\n"
|
|
625
|
+
"- **ApiKeyAuth** — `X-API-Key` request header, validated when "
|
|
626
|
+
"`SKCAPSTONE_API_KEY` env var is set.\n"
|
|
627
|
+
"- **BearerAuth** — `Authorization: Bearer <capauth-token>` for privileged "
|
|
628
|
+
"streaming endpoints."
|
|
629
|
+
),
|
|
630
|
+
version="0.9.0",
|
|
631
|
+
docs_url="/docs",
|
|
632
|
+
redoc_url="/redoc",
|
|
633
|
+
openapi_url="/openapi.json",
|
|
634
|
+
contact={
|
|
635
|
+
"name": "smilinTux.org",
|
|
636
|
+
"url": "https://smilintux.org",
|
|
637
|
+
"email": "hello@smilintux.org",
|
|
638
|
+
},
|
|
639
|
+
license_info={
|
|
640
|
+
"name": "GPL-3.0-or-later",
|
|
641
|
+
"url": "https://www.gnu.org/licenses/gpl-3.0.html",
|
|
642
|
+
},
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
app.add_middleware(
|
|
646
|
+
CORSMiddleware,
|
|
647
|
+
allow_origins=["*"],
|
|
648
|
+
allow_credentials=True,
|
|
649
|
+
allow_methods=["GET", "POST", "DELETE", "OPTIONS"],
|
|
650
|
+
allow_headers=["Content-Type", "Authorization", "X-API-Key"],
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
|
|
654
|
+
# ── Custom OpenAPI schema: inject BearerAuth security scheme ─────────────────
|
|
655
|
+
|
|
656
|
+
def _custom_openapi() -> Dict[str, Any]:
|
|
657
|
+
"""Return a customised OpenAPI schema with both security schemes registered.
|
|
658
|
+
|
|
659
|
+
FastAPI auto-registers ``APIKeyHeader`` from the ``X-API-Key`` dependency,
|
|
660
|
+
but the ``BearerAuth`` scheme (used by the ``/api/v1/logs`` WebSocket
|
|
661
|
+
endpoint) must be injected manually because WebSocket routes are not
|
|
662
|
+
included in the OpenAPI 3.0 spec.
|
|
663
|
+
|
|
664
|
+
Registered security schemes:
|
|
665
|
+
- **APIKeyHeader** — ``apiKey`` in header ``X-API-Key`` (optional, see SKCAPSTONE_API_KEY)
|
|
666
|
+
- **BearerAuth** — HTTP Bearer token issued by CapAuth (required for /api/v1/logs WS)
|
|
667
|
+
"""
|
|
668
|
+
if app.openapi_schema:
|
|
669
|
+
return app.openapi_schema
|
|
670
|
+
|
|
671
|
+
from fastapi.openapi.utils import get_openapi
|
|
672
|
+
|
|
673
|
+
schema = get_openapi(
|
|
674
|
+
title=app.title,
|
|
675
|
+
version=app.version,
|
|
676
|
+
description=app.description,
|
|
677
|
+
routes=app.routes,
|
|
678
|
+
contact=app.contact,
|
|
679
|
+
license_info=app.license_info,
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
# Inject BearerAuth (HTTP Bearer) security scheme
|
|
683
|
+
schema.setdefault("components", {}).setdefault("securitySchemes", {})
|
|
684
|
+
schema["components"]["securitySchemes"]["BearerAuth"] = {
|
|
685
|
+
"type": "http",
|
|
686
|
+
"scheme": "bearer",
|
|
687
|
+
"bearerFormat": "CapAuth",
|
|
688
|
+
"description": (
|
|
689
|
+
"CapAuth bearer token required for privileged streaming endpoints "
|
|
690
|
+
"(GET /api/v1/logs WebSocket). Issue a token with: "
|
|
691
|
+
"``skcapstone token issue``"
|
|
692
|
+
),
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
app.openapi_schema = schema
|
|
696
|
+
return schema
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
app.openapi = _custom_openapi # type: ignore[method-assign]
|
|
700
|
+
|
|
701
|
+
# ── /api/v1/health ────────────────────────────────────────────────────────────
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
@app.get(
|
|
705
|
+
"/api/v1/health",
|
|
706
|
+
response_model=HealthResponse,
|
|
707
|
+
summary="Daemon health check",
|
|
708
|
+
tags=["Health"],
|
|
709
|
+
responses={
|
|
710
|
+
200: {"description": "Daemon is running and healthy."},
|
|
711
|
+
503: {"description": "Daemon is stopped or unreachable."},
|
|
712
|
+
},
|
|
713
|
+
)
|
|
714
|
+
async def get_health(
|
|
715
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
716
|
+
) -> HealthResponse:
|
|
717
|
+
"""Return a comprehensive health snapshot of the running daemon.
|
|
718
|
+
|
|
719
|
+
Includes uptime, consciousness status, self-healing metrics, backend
|
|
720
|
+
transport availability, and host system resource usage. Returns HTTP 503
|
|
721
|
+
when the daemon context has not been initialised (daemon not running).
|
|
722
|
+
"""
|
|
723
|
+
state = _ctx.get("state")
|
|
724
|
+
if state is None:
|
|
725
|
+
raise HTTPException(
|
|
726
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
727
|
+
detail="Daemon is not running.",
|
|
728
|
+
)
|
|
729
|
+
snap = state.snapshot()
|
|
730
|
+
healing = snap.get("self_healing", {})
|
|
731
|
+
sys_stats = _collect_system_stats()
|
|
732
|
+
consciousness = _ctx.get("consciousness")
|
|
733
|
+
c_enabled = bool(consciousness and consciousness.stats.get("enabled", False))
|
|
734
|
+
return HealthResponse(
|
|
735
|
+
status="ok" if snap.get("running", True) else "stopped",
|
|
736
|
+
uptime_seconds=snap.get("uptime_seconds", 0.0),
|
|
737
|
+
daemon_pid=snap.get("pid"),
|
|
738
|
+
consciousness_enabled=c_enabled,
|
|
739
|
+
self_healing_last_run=healing.get("timestamp"),
|
|
740
|
+
self_healing_issues_found=healing.get("still_broken", 0),
|
|
741
|
+
self_healing_auto_fixed=healing.get("auto_fixed", 0),
|
|
742
|
+
backend_health=snap.get("transport_health", {}),
|
|
743
|
+
disk_free_gb=sys_stats.disk_free_gb,
|
|
744
|
+
memory_usage_mb=float(sys_stats.memory_used_mb),
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
|
|
748
|
+
# ── /api/v1/components ────────────────────────────────────────────────────────
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
@app.get(
|
|
752
|
+
"/api/v1/components",
|
|
753
|
+
response_model=ComponentsResponse,
|
|
754
|
+
summary="Daemon subsystem component health",
|
|
755
|
+
tags=["Health"],
|
|
756
|
+
responses={
|
|
757
|
+
200: {"description": "All component health records."},
|
|
758
|
+
503: {"description": "Daemon context not initialised."},
|
|
759
|
+
},
|
|
760
|
+
)
|
|
761
|
+
async def get_components(
|
|
762
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
763
|
+
) -> ComponentsResponse:
|
|
764
|
+
"""Return health snapshots for all registered daemon subsystem components.
|
|
765
|
+
|
|
766
|
+
Components include the poll loop, vault sync, transport health checker,
|
|
767
|
+
consciousness loop, and the self-healer watchdog. Each record includes
|
|
768
|
+
status, heartbeat age, and restart history.
|
|
769
|
+
"""
|
|
770
|
+
service = _ctx.get("state")
|
|
771
|
+
if service is None:
|
|
772
|
+
raise HTTPException(
|
|
773
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
774
|
+
detail="Daemon is not running.",
|
|
775
|
+
)
|
|
776
|
+
# The component manager is on the DaemonService object, not DaemonState.
|
|
777
|
+
# Access it via _ctx["state"] which may expose _component_mgr.
|
|
778
|
+
daemon_service = _ctx.get("service")
|
|
779
|
+
if daemon_service and hasattr(daemon_service, "_component_mgr"):
|
|
780
|
+
raw = daemon_service._component_mgr.snapshot()
|
|
781
|
+
else:
|
|
782
|
+
raw = []
|
|
783
|
+
return ComponentsResponse(components=[ComponentSnapshot(**c) for c in raw])
|
|
784
|
+
|
|
785
|
+
|
|
786
|
+
# ── /api/v1/dashboard ────────────────────────────────────────────────────────
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+
@app.get(
|
|
790
|
+
"/api/v1/dashboard",
|
|
791
|
+
response_model=DashboardResponse,
|
|
792
|
+
summary="Full daemon dashboard snapshot",
|
|
793
|
+
tags=["Dashboard"],
|
|
794
|
+
responses={
|
|
795
|
+
200: {"description": "Complete dashboard data."},
|
|
796
|
+
503: {"description": "Daemon context not initialised."},
|
|
797
|
+
},
|
|
798
|
+
)
|
|
799
|
+
async def get_dashboard(
|
|
800
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
801
|
+
) -> DashboardResponse:
|
|
802
|
+
"""Return the complete dashboard data used by the HTML UI and Flutter app.
|
|
803
|
+
|
|
804
|
+
Assembles agent identity, daemon runtime metrics, consciousness status,
|
|
805
|
+
LLM/transport backend availability, recent conversations, host system
|
|
806
|
+
stats, and recent error messages into a single JSON snapshot.
|
|
807
|
+
"""
|
|
808
|
+
state = _ctx.get("state")
|
|
809
|
+
config = _ctx.get("config")
|
|
810
|
+
if state is None or config is None:
|
|
811
|
+
raise HTTPException(
|
|
812
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
813
|
+
detail="Daemon is not running.",
|
|
814
|
+
)
|
|
815
|
+
consciousness = _ctx.get("consciousness")
|
|
816
|
+
snap = state.snapshot()
|
|
817
|
+
sys_stats = _collect_system_stats()
|
|
818
|
+
|
|
819
|
+
# Agent identity
|
|
820
|
+
agent: Dict[str, Any] = {}
|
|
821
|
+
try:
|
|
822
|
+
runtime = _ctx.get("runtime")
|
|
823
|
+
if runtime:
|
|
824
|
+
m = runtime.manifest
|
|
825
|
+
agent = {
|
|
826
|
+
"name": m.identity.name,
|
|
827
|
+
"fingerprint": m.identity.fingerprint or "",
|
|
828
|
+
"consciousness": m.consciousness.value if hasattr(m, "consciousness") else "",
|
|
829
|
+
"version": m.version,
|
|
830
|
+
}
|
|
831
|
+
except Exception:
|
|
832
|
+
pass
|
|
833
|
+
if not agent:
|
|
834
|
+
try:
|
|
835
|
+
identity_path = config.home / "identity" / "identity.json"
|
|
836
|
+
if identity_path.exists():
|
|
837
|
+
agent = json.loads(identity_path.read_text(encoding="utf-8"))
|
|
838
|
+
except Exception:
|
|
839
|
+
pass
|
|
840
|
+
|
|
841
|
+
# Consciousness stats
|
|
842
|
+
c_stats: Dict[str, Any] = {}
|
|
843
|
+
if consciousness:
|
|
844
|
+
try:
|
|
845
|
+
c_stats = dict(consciousness.stats)
|
|
846
|
+
except Exception:
|
|
847
|
+
pass
|
|
848
|
+
|
|
849
|
+
# Recent conversations
|
|
850
|
+
conversations: List[Dict[str, Any]] = []
|
|
851
|
+
try:
|
|
852
|
+
conv_dir = config.shared_root / "conversations"
|
|
853
|
+
if conv_dir.exists():
|
|
854
|
+
for cf in sorted(conv_dir.glob("*.json"), key=lambda p: p.stat().st_mtime, reverse=True)[:5]:
|
|
855
|
+
msgs = json.loads(cf.read_text(encoding="utf-8"))
|
|
856
|
+
if isinstance(msgs, list) and msgs:
|
|
857
|
+
last = msgs[-1]
|
|
858
|
+
preview = (last.get("content") or last.get("message", ""))[:80]
|
|
859
|
+
conversations.append({
|
|
860
|
+
"peer": cf.stem,
|
|
861
|
+
"count": len(msgs),
|
|
862
|
+
"last": last.get("timestamp"),
|
|
863
|
+
"preview": preview,
|
|
864
|
+
})
|
|
865
|
+
except Exception:
|
|
866
|
+
pass
|
|
867
|
+
|
|
868
|
+
daemon_summary = DaemonSummary(
|
|
869
|
+
running=snap.get("running", True),
|
|
870
|
+
pid=snap.get("pid"),
|
|
871
|
+
uptime_seconds=snap.get("uptime_seconds", 0.0),
|
|
872
|
+
messages_received=snap.get("messages_received", 0),
|
|
873
|
+
syncs_completed=snap.get("syncs_completed", 0),
|
|
874
|
+
error_count=len(snap.get("recent_errors", [])),
|
|
875
|
+
inflight_count=snap.get("inflight_count", 0),
|
|
876
|
+
)
|
|
877
|
+
|
|
878
|
+
return DashboardResponse(
|
|
879
|
+
agent=agent,
|
|
880
|
+
daemon=daemon_summary,
|
|
881
|
+
consciousness=c_stats,
|
|
882
|
+
backends=snap.get("transport_health", {}),
|
|
883
|
+
conversations=conversations,
|
|
884
|
+
system=sys_stats,
|
|
885
|
+
recent_errors=snap.get("recent_errors", []),
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
|
|
889
|
+
# ── /api/v1/capstone ─────────────────────────────────────────────────────────
|
|
890
|
+
|
|
891
|
+
|
|
892
|
+
@app.get(
|
|
893
|
+
"/api/v1/capstone",
|
|
894
|
+
response_model=CapstoneResponse,
|
|
895
|
+
summary="Capstone pillars, memory, board, and consciousness",
|
|
896
|
+
tags=["Dashboard"],
|
|
897
|
+
responses={
|
|
898
|
+
200: {"description": "Full capstone pillar snapshot."},
|
|
899
|
+
503: {"description": "Daemon context not initialised."},
|
|
900
|
+
},
|
|
901
|
+
)
|
|
902
|
+
async def get_capstone(
|
|
903
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
904
|
+
) -> CapstoneResponse:
|
|
905
|
+
"""Return pillars, memory stats, coordination board summary, and consciousness.
|
|
906
|
+
|
|
907
|
+
This is the primary endpoint consumed by the vanilla-JS dashboard and
|
|
908
|
+
Flutter app for a high-level sovereign-agent state overview.
|
|
909
|
+
"""
|
|
910
|
+
config = _ctx.get("config")
|
|
911
|
+
if config is None:
|
|
912
|
+
raise HTTPException(
|
|
913
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
914
|
+
detail="Daemon is not running.",
|
|
915
|
+
)
|
|
916
|
+
consciousness = _ctx.get("consciousness")
|
|
917
|
+
|
|
918
|
+
# Pillars via runtime
|
|
919
|
+
agent: Dict[str, Any] = {}
|
|
920
|
+
pillars: Dict[str, str] = {}
|
|
921
|
+
try:
|
|
922
|
+
runtime = _ctx.get("runtime")
|
|
923
|
+
if runtime:
|
|
924
|
+
m = runtime.manifest
|
|
925
|
+
agent = {"name": m.identity.name, "fingerprint": m.identity.fingerprint or ""}
|
|
926
|
+
pillars = {k: v.value for k, v in m.pillar_summary.items()}
|
|
927
|
+
except Exception:
|
|
928
|
+
pass
|
|
929
|
+
|
|
930
|
+
# Memory stats
|
|
931
|
+
memory = MemorySummary()
|
|
932
|
+
try:
|
|
933
|
+
from .memory_engine import get_stats as _mem_stats
|
|
934
|
+
|
|
935
|
+
ms = _mem_stats(config.home)
|
|
936
|
+
memory = MemorySummary(
|
|
937
|
+
total=ms.total_memories,
|
|
938
|
+
short_term=ms.short_term,
|
|
939
|
+
mid_term=ms.mid_term,
|
|
940
|
+
long_term=ms.long_term,
|
|
941
|
+
status=ms.status.value,
|
|
942
|
+
)
|
|
943
|
+
except Exception:
|
|
944
|
+
pass
|
|
945
|
+
|
|
946
|
+
# Coordination board
|
|
947
|
+
board: Dict[str, Any] = {"summary": {}, "active": []}
|
|
948
|
+
try:
|
|
949
|
+
from .coordination import Board
|
|
950
|
+
|
|
951
|
+
brd = Board(config.home)
|
|
952
|
+
views = brd.get_task_views()
|
|
953
|
+
board = {
|
|
954
|
+
"summary": {
|
|
955
|
+
"total": len(views),
|
|
956
|
+
"done": sum(1 for v in views if v.status.value == "done"),
|
|
957
|
+
"in_progress": sum(1 for v in views if v.status.value == "in_progress"),
|
|
958
|
+
"claimed": sum(1 for v in views if v.status.value == "claimed"),
|
|
959
|
+
"open": sum(1 for v in views if v.status.value == "open"),
|
|
960
|
+
},
|
|
961
|
+
"active": [
|
|
962
|
+
{
|
|
963
|
+
"id": v.task.id,
|
|
964
|
+
"title": v.task.title,
|
|
965
|
+
"priority": v.task.priority.value,
|
|
966
|
+
"status": v.status.value,
|
|
967
|
+
"claimed_by": v.claimed_by,
|
|
968
|
+
}
|
|
969
|
+
for v in views
|
|
970
|
+
if v.status.value in ("in_progress", "claimed")
|
|
971
|
+
],
|
|
972
|
+
}
|
|
973
|
+
except Exception:
|
|
974
|
+
pass
|
|
975
|
+
|
|
976
|
+
# Consciousness stats
|
|
977
|
+
c_stats: Dict[str, Any] = {}
|
|
978
|
+
if consciousness:
|
|
979
|
+
try:
|
|
980
|
+
c_stats = dict(consciousness.stats)
|
|
981
|
+
except Exception:
|
|
982
|
+
pass
|
|
983
|
+
|
|
984
|
+
return CapstoneResponse(
|
|
985
|
+
agent=agent,
|
|
986
|
+
pillars=pillars,
|
|
987
|
+
memory=memory,
|
|
988
|
+
board=board,
|
|
989
|
+
consciousness=c_stats,
|
|
990
|
+
)
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
# ── /api/v1/activity (SSE) ────────────────────────────────────────────────────
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
@app.get(
|
|
997
|
+
"/api/v1/activity",
|
|
998
|
+
summary="Server-Sent Events activity stream",
|
|
999
|
+
tags=["Streaming"],
|
|
1000
|
+
responses={
|
|
1001
|
+
200: {
|
|
1002
|
+
"description": "SSE stream of daemon activity events.",
|
|
1003
|
+
"content": {"text/event-stream": {"schema": {"type": "string"}}},
|
|
1004
|
+
},
|
|
1005
|
+
},
|
|
1006
|
+
)
|
|
1007
|
+
async def get_activity_stream(
|
|
1008
|
+
request: Request,
|
|
1009
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1010
|
+
) -> StreamingResponse:
|
|
1011
|
+
"""Stream daemon activity events as Server-Sent Events (SSE).
|
|
1012
|
+
|
|
1013
|
+
Replays the recent activity history on connect, then streams live
|
|
1014
|
+
events. Sends a ``: heartbeat`` comment every 15 seconds to keep
|
|
1015
|
+
the connection alive through proxies.
|
|
1016
|
+
|
|
1017
|
+
Returns a ``text/event-stream`` response; use ``EventSource`` in
|
|
1018
|
+
browsers or ``httpx-sse`` in Python clients.
|
|
1019
|
+
"""
|
|
1020
|
+
from . import activity as _activity
|
|
1021
|
+
|
|
1022
|
+
q: queue.Queue = queue.Queue(maxsize=200)
|
|
1023
|
+
_activity.register_client(q)
|
|
1024
|
+
|
|
1025
|
+
async def event_generator() -> AsyncIterator[bytes]:
|
|
1026
|
+
# Replay history so late-joining clients see context
|
|
1027
|
+
try:
|
|
1028
|
+
for chunk in _activity.get_history_encoded():
|
|
1029
|
+
yield chunk
|
|
1030
|
+
except Exception:
|
|
1031
|
+
pass
|
|
1032
|
+
# Stream live events; yield keep-alive comments on timeout
|
|
1033
|
+
try:
|
|
1034
|
+
while True:
|
|
1035
|
+
if await request.is_disconnected():
|
|
1036
|
+
break
|
|
1037
|
+
try:
|
|
1038
|
+
# Poll the queue with a short timeout to allow disconnect checks
|
|
1039
|
+
chunk = await asyncio.get_event_loop().run_in_executor(
|
|
1040
|
+
None, lambda: q.get(timeout=15)
|
|
1041
|
+
)
|
|
1042
|
+
yield chunk
|
|
1043
|
+
except Exception:
|
|
1044
|
+
yield b": heartbeat\n\n"
|
|
1045
|
+
finally:
|
|
1046
|
+
_activity.unregister_client(q)
|
|
1047
|
+
|
|
1048
|
+
return StreamingResponse(
|
|
1049
|
+
event_generator(),
|
|
1050
|
+
media_type="text/event-stream",
|
|
1051
|
+
headers={
|
|
1052
|
+
"Cache-Control": "no-cache",
|
|
1053
|
+
"Connection": "keep-alive",
|
|
1054
|
+
"X-Accel-Buffering": "no",
|
|
1055
|
+
},
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
# ── /api/v1/household/agents ──────────────────────────────────────────────────
|
|
1060
|
+
|
|
1061
|
+
|
|
1062
|
+
@app.get(
|
|
1063
|
+
"/api/v1/household/agents",
|
|
1064
|
+
response_model=HouseholdAgentsResponse,
|
|
1065
|
+
summary="List all household agents",
|
|
1066
|
+
tags=["Household"],
|
|
1067
|
+
responses={
|
|
1068
|
+
200: {"description": "All agents found in the shared household directory."},
|
|
1069
|
+
503: {"description": "Daemon context not initialised."},
|
|
1070
|
+
},
|
|
1071
|
+
)
|
|
1072
|
+
async def list_household_agents(
|
|
1073
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1074
|
+
) -> HouseholdAgentsResponse:
|
|
1075
|
+
"""Return a list of all agents known to the shared household.
|
|
1076
|
+
|
|
1077
|
+
Reads agent identity files and heartbeats from the shared root and
|
|
1078
|
+
enriches each entry with liveness status. The calling agent's
|
|
1079
|
+
consciousness stats are attached where available.
|
|
1080
|
+
"""
|
|
1081
|
+
config = _ctx.get("config")
|
|
1082
|
+
if config is None:
|
|
1083
|
+
raise HTTPException(
|
|
1084
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1085
|
+
detail="Daemon is not running.",
|
|
1086
|
+
)
|
|
1087
|
+
consciousness = _ctx.get("consciousness")
|
|
1088
|
+
agents_dir = config.shared_root / "agents"
|
|
1089
|
+
heartbeats_dir = config.shared_root / "heartbeats"
|
|
1090
|
+
agents: List[HouseholdAgent] = []
|
|
1091
|
+
|
|
1092
|
+
if agents_dir.exists():
|
|
1093
|
+
for agent_dir in sorted(agents_dir.iterdir()):
|
|
1094
|
+
if not agent_dir.is_dir():
|
|
1095
|
+
continue
|
|
1096
|
+
agent_name = agent_dir.name
|
|
1097
|
+
entry: Dict[str, Any] = {"name": agent_name}
|
|
1098
|
+
|
|
1099
|
+
identity_path = agent_dir / "identity" / "identity.json"
|
|
1100
|
+
if identity_path.exists():
|
|
1101
|
+
try:
|
|
1102
|
+
entry["identity"] = json.loads(identity_path.read_text(encoding="utf-8"))
|
|
1103
|
+
except Exception:
|
|
1104
|
+
pass
|
|
1105
|
+
|
|
1106
|
+
hb: Optional[Dict[str, Any]] = None
|
|
1107
|
+
hb_path = heartbeats_dir / f"{agent_name.lower()}.json"
|
|
1108
|
+
if hb_path.exists():
|
|
1109
|
+
try:
|
|
1110
|
+
hb = json.loads(hb_path.read_text(encoding="utf-8"))
|
|
1111
|
+
alive = _hb_alive(hb)
|
|
1112
|
+
hb["alive"] = alive
|
|
1113
|
+
entry["heartbeat"] = hb
|
|
1114
|
+
entry["status"] = hb.get("status", "unknown") if alive else "stale"
|
|
1115
|
+
except Exception:
|
|
1116
|
+
entry["status"] = "unknown"
|
|
1117
|
+
else:
|
|
1118
|
+
entry["status"] = "no_heartbeat"
|
|
1119
|
+
|
|
1120
|
+
if consciousness:
|
|
1121
|
+
entry["consciousness"] = consciousness.stats
|
|
1122
|
+
|
|
1123
|
+
agents.append(HouseholdAgent(**entry))
|
|
1124
|
+
|
|
1125
|
+
return HouseholdAgentsResponse(agents=agents)
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
# ── /api/v1/household/agent/{name} ───────────────────────────────────────────
|
|
1129
|
+
|
|
1130
|
+
|
|
1131
|
+
@app.get(
|
|
1132
|
+
"/api/v1/household/agent/{name}",
|
|
1133
|
+
response_model=HouseholdAgent,
|
|
1134
|
+
summary="Get details for a specific household agent",
|
|
1135
|
+
tags=["Household"],
|
|
1136
|
+
responses={
|
|
1137
|
+
200: {"description": "Agent details including identity, heartbeat, and memory count."},
|
|
1138
|
+
400: {"description": "Agent name is missing or invalid."},
|
|
1139
|
+
404: {"description": "Agent not found in the household directory."},
|
|
1140
|
+
503: {"description": "Daemon context not initialised."},
|
|
1141
|
+
},
|
|
1142
|
+
)
|
|
1143
|
+
async def get_household_agent(
|
|
1144
|
+
name: str = FPath(..., description="Agent directory name (e.g. 'opus')."),
|
|
1145
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1146
|
+
) -> HouseholdAgent:
|
|
1147
|
+
"""Return detailed information about a specific household agent.
|
|
1148
|
+
|
|
1149
|
+
Loads the agent's identity file, most recent heartbeat, memory count
|
|
1150
|
+
across all layers, and a list of recent conversation threads. The
|
|
1151
|
+
agent name must match the directory name under the shared ``agents/``
|
|
1152
|
+
root.
|
|
1153
|
+
"""
|
|
1154
|
+
config = _ctx.get("config")
|
|
1155
|
+
if config is None:
|
|
1156
|
+
raise HTTPException(
|
|
1157
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1158
|
+
detail="Daemon is not running.",
|
|
1159
|
+
)
|
|
1160
|
+
if not name:
|
|
1161
|
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Agent name required.")
|
|
1162
|
+
|
|
1163
|
+
agent_dir = config.shared_root / "agents" / name
|
|
1164
|
+
if not agent_dir.exists():
|
|
1165
|
+
raise HTTPException(
|
|
1166
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
1167
|
+
detail=f"Agent '{name}' not found.",
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
consciousness = _ctx.get("consciousness")
|
|
1171
|
+
entry: Dict[str, Any] = {"name": name}
|
|
1172
|
+
|
|
1173
|
+
identity_path = agent_dir / "identity" / "identity.json"
|
|
1174
|
+
if identity_path.exists():
|
|
1175
|
+
try:
|
|
1176
|
+
entry["identity"] = json.loads(identity_path.read_text(encoding="utf-8"))
|
|
1177
|
+
except Exception:
|
|
1178
|
+
pass
|
|
1179
|
+
|
|
1180
|
+
hb_path = config.shared_root / "heartbeats" / f"{name.lower()}.json"
|
|
1181
|
+
if hb_path.exists():
|
|
1182
|
+
try:
|
|
1183
|
+
hb = json.loads(hb_path.read_text(encoding="utf-8"))
|
|
1184
|
+
alive = _hb_alive(hb)
|
|
1185
|
+
hb["alive"] = alive
|
|
1186
|
+
entry["heartbeat"] = hb
|
|
1187
|
+
entry["status"] = hb.get("status", "unknown") if alive else "stale"
|
|
1188
|
+
except Exception:
|
|
1189
|
+
pass
|
|
1190
|
+
|
|
1191
|
+
# Memory count
|
|
1192
|
+
memory_dir = agent_dir / "memory"
|
|
1193
|
+
if memory_dir.exists():
|
|
1194
|
+
count = 0
|
|
1195
|
+
for layer in ("short-term", "mid-term", "long-term"):
|
|
1196
|
+
layer_dir = memory_dir / layer
|
|
1197
|
+
if layer_dir.exists():
|
|
1198
|
+
count += sum(1 for _ in layer_dir.glob("*.json"))
|
|
1199
|
+
entry["memory_count"] = count
|
|
1200
|
+
|
|
1201
|
+
if consciousness:
|
|
1202
|
+
entry["consciousness"] = consciousness.stats
|
|
1203
|
+
|
|
1204
|
+
return HouseholdAgent(**entry)
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
# ── /api/v1/conversations ─────────────────────────────────────────────────────
|
|
1208
|
+
|
|
1209
|
+
|
|
1210
|
+
@app.get(
|
|
1211
|
+
"/api/v1/conversations",
|
|
1212
|
+
response_model=ConversationsResponse,
|
|
1213
|
+
summary="List all conversation threads",
|
|
1214
|
+
tags=["Conversations"],
|
|
1215
|
+
responses={
|
|
1216
|
+
200: {"description": "All conversation threads, most recently active first."},
|
|
1217
|
+
503: {"description": "Daemon context not initialised."},
|
|
1218
|
+
},
|
|
1219
|
+
)
|
|
1220
|
+
async def list_conversations(
|
|
1221
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1222
|
+
) -> ConversationsResponse:
|
|
1223
|
+
"""Return a summary of all conversation threads in the shared conversations directory.
|
|
1224
|
+
|
|
1225
|
+
Each entry includes the peer name, message count, timestamp of the last
|
|
1226
|
+
message, and a 120-character preview of the last message content.
|
|
1227
|
+
Threads are sorted by most recently modified file.
|
|
1228
|
+
"""
|
|
1229
|
+
config = _ctx.get("config")
|
|
1230
|
+
if config is None:
|
|
1231
|
+
raise HTTPException(
|
|
1232
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1233
|
+
detail="Daemon is not running.",
|
|
1234
|
+
)
|
|
1235
|
+
conversations: List[ConversationSummary] = []
|
|
1236
|
+
conv_dir = config.shared_root / "conversations"
|
|
1237
|
+
if conv_dir.exists():
|
|
1238
|
+
for cf in sorted(conv_dir.glob("*.json"), key=lambda p: p.stat().st_mtime, reverse=True):
|
|
1239
|
+
try:
|
|
1240
|
+
msgs = json.loads(cf.read_text(encoding="utf-8"))
|
|
1241
|
+
if isinstance(msgs, list):
|
|
1242
|
+
last = msgs[-1] if msgs else {}
|
|
1243
|
+
last_content = last.get("content", last.get("message", ""))
|
|
1244
|
+
conversations.append(
|
|
1245
|
+
ConversationSummary(
|
|
1246
|
+
peer=cf.stem,
|
|
1247
|
+
message_count=len(msgs),
|
|
1248
|
+
last_message_time=last.get("timestamp") if msgs else None,
|
|
1249
|
+
last_message_preview=(last_content or "")[:120],
|
|
1250
|
+
)
|
|
1251
|
+
)
|
|
1252
|
+
except Exception:
|
|
1253
|
+
pass
|
|
1254
|
+
return ConversationsResponse(conversations=conversations)
|
|
1255
|
+
|
|
1256
|
+
|
|
1257
|
+
# ── /api/v1/conversations/{peer} ─────────────────────────────────────────────
|
|
1258
|
+
|
|
1259
|
+
|
|
1260
|
+
@app.get(
|
|
1261
|
+
"/api/v1/conversations/{peer}",
|
|
1262
|
+
response_model=ConversationHistoryResponse,
|
|
1263
|
+
summary="Get conversation history with a peer",
|
|
1264
|
+
tags=["Conversations"],
|
|
1265
|
+
responses={
|
|
1266
|
+
200: {"description": "Full message history for the conversation."},
|
|
1267
|
+
400: {"description": "Peer name is empty or invalid."},
|
|
1268
|
+
404: {"description": "No conversation found with this peer."},
|
|
1269
|
+
503: {"description": "Daemon context not initialised."},
|
|
1270
|
+
},
|
|
1271
|
+
)
|
|
1272
|
+
async def get_conversation(
|
|
1273
|
+
peer: str = FPath(
|
|
1274
|
+
..., description="Peer agent or user name (alphanumeric, dashes, underscores)."
|
|
1275
|
+
),
|
|
1276
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1277
|
+
) -> ConversationHistoryResponse:
|
|
1278
|
+
"""Return the full message history for a conversation with the named peer.
|
|
1279
|
+
|
|
1280
|
+
The peer parameter is sanitised (path-traversal prevention) before
|
|
1281
|
+
constructing the file path. Returns 404 when no conversation file
|
|
1282
|
+
exists for the given peer.
|
|
1283
|
+
"""
|
|
1284
|
+
config = _ctx.get("config")
|
|
1285
|
+
if config is None:
|
|
1286
|
+
raise HTTPException(
|
|
1287
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1288
|
+
detail="Daemon is not running.",
|
|
1289
|
+
)
|
|
1290
|
+
safe_peer = _sanitize_peer(peer)
|
|
1291
|
+
if not safe_peer:
|
|
1292
|
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Peer name required.")
|
|
1293
|
+
|
|
1294
|
+
conv_file = config.shared_root / "conversations" / f"{safe_peer}.json"
|
|
1295
|
+
if not conv_file.exists():
|
|
1296
|
+
raise HTTPException(
|
|
1297
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
1298
|
+
detail=f"No conversation with '{safe_peer}'.",
|
|
1299
|
+
)
|
|
1300
|
+
try:
|
|
1301
|
+
msgs = json.loads(conv_file.read_text(encoding="utf-8"))
|
|
1302
|
+
return ConversationHistoryResponse(peer=safe_peer, messages=msgs)
|
|
1303
|
+
except Exception as exc:
|
|
1304
|
+
raise HTTPException(
|
|
1305
|
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
1306
|
+
detail=str(exc),
|
|
1307
|
+
) from exc
|
|
1308
|
+
|
|
1309
|
+
|
|
1310
|
+
# ── POST /api/v1/conversations/{peer}/send ────────────────────────────────────
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
@app.post(
|
|
1314
|
+
"/api/v1/conversations/{peer}/send",
|
|
1315
|
+
response_model=SendMessageResponse,
|
|
1316
|
+
status_code=status.HTTP_200_OK,
|
|
1317
|
+
summary="Send a message to a peer",
|
|
1318
|
+
tags=["Conversations"],
|
|
1319
|
+
responses={
|
|
1320
|
+
200: {"description": "Message accepted and dispatched."},
|
|
1321
|
+
400: {"description": "Peer name is invalid or message content is empty."},
|
|
1322
|
+
503: {"description": "Daemon context not initialised."},
|
|
1323
|
+
},
|
|
1324
|
+
)
|
|
1325
|
+
async def send_message(
|
|
1326
|
+
peer: str = FPath(..., description="Target peer agent or user name."),
|
|
1327
|
+
body: SendMessageRequest = ...,
|
|
1328
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1329
|
+
) -> SendMessageResponse:
|
|
1330
|
+
"""Send a message to a named peer.
|
|
1331
|
+
|
|
1332
|
+
Writes the message envelope to the SKComm outbox for delivery by the
|
|
1333
|
+
transport layer. If the consciousness loop is running the message is
|
|
1334
|
+
also processed inline to generate a reply.
|
|
1335
|
+
|
|
1336
|
+
The ``content`` field in the request body is required and must be
|
|
1337
|
+
non-empty.
|
|
1338
|
+
"""
|
|
1339
|
+
config = _ctx.get("config")
|
|
1340
|
+
if config is None:
|
|
1341
|
+
raise HTTPException(
|
|
1342
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1343
|
+
detail="Daemon is not running.",
|
|
1344
|
+
)
|
|
1345
|
+
safe_peer = _sanitize_peer(peer)
|
|
1346
|
+
if not safe_peer:
|
|
1347
|
+
raise HTTPException(
|
|
1348
|
+
status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid peer name."
|
|
1349
|
+
)
|
|
1350
|
+
|
|
1351
|
+
message_id = str(uuid.uuid4())
|
|
1352
|
+
ts = datetime.now(timezone.utc).isoformat()
|
|
1353
|
+
envelope = {
|
|
1354
|
+
"message_id": message_id,
|
|
1355
|
+
"sender": "api",
|
|
1356
|
+
"recipient": safe_peer,
|
|
1357
|
+
"timestamp": ts,
|
|
1358
|
+
"payload": {"content": body.content, "content_type": "text"},
|
|
1359
|
+
}
|
|
1360
|
+
|
|
1361
|
+
try:
|
|
1362
|
+
outbox = config.shared_root / "sync" / "comms" / "outbox"
|
|
1363
|
+
outbox.mkdir(parents=True, exist_ok=True)
|
|
1364
|
+
(outbox / f"{message_id}.skc.json").write_text(
|
|
1365
|
+
json.dumps(envelope, indent=2), encoding="utf-8"
|
|
1366
|
+
)
|
|
1367
|
+
except Exception as exc:
|
|
1368
|
+
logger.warning("Outbox write failed for %s: %s", safe_peer, exc)
|
|
1369
|
+
|
|
1370
|
+
# Process through consciousness loop if available
|
|
1371
|
+
consciousness = _ctx.get("consciousness")
|
|
1372
|
+
if consciousness and getattr(consciousness, "_config", None) and consciousness._config.enabled:
|
|
1373
|
+
try:
|
|
1374
|
+
from types import SimpleNamespace
|
|
1375
|
+
|
|
1376
|
+
fake_payload = SimpleNamespace(
|
|
1377
|
+
content=body.content,
|
|
1378
|
+
content_type=SimpleNamespace(value="text"),
|
|
1379
|
+
)
|
|
1380
|
+
fake_env = SimpleNamespace(sender=safe_peer, payload=fake_payload)
|
|
1381
|
+
threading.Thread(
|
|
1382
|
+
target=consciousness.process_envelope,
|
|
1383
|
+
args=(fake_env,),
|
|
1384
|
+
daemon=True,
|
|
1385
|
+
).start()
|
|
1386
|
+
except Exception as exc:
|
|
1387
|
+
logger.debug("Consciousness process skipped: %s", exc)
|
|
1388
|
+
|
|
1389
|
+
return SendMessageResponse(status="sent", message_id=message_id)
|
|
1390
|
+
|
|
1391
|
+
|
|
1392
|
+
# ── DELETE /api/v1/conversations/{peer} ───────────────────────────────────────
|
|
1393
|
+
|
|
1394
|
+
|
|
1395
|
+
@app.delete(
|
|
1396
|
+
"/api/v1/conversations/{peer}",
|
|
1397
|
+
response_model=DeleteConversationResponse,
|
|
1398
|
+
summary="Delete a conversation thread",
|
|
1399
|
+
tags=["Conversations"],
|
|
1400
|
+
responses={
|
|
1401
|
+
200: {"description": "Conversation deleted."},
|
|
1402
|
+
400: {"description": "Peer name is invalid."},
|
|
1403
|
+
404: {"description": "No conversation found with this peer."},
|
|
1404
|
+
503: {"description": "Daemon context not initialised."},
|
|
1405
|
+
},
|
|
1406
|
+
)
|
|
1407
|
+
async def delete_conversation(
|
|
1408
|
+
peer: str = FPath(..., description="Peer name whose conversation to delete."),
|
|
1409
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1410
|
+
) -> DeleteConversationResponse:
|
|
1411
|
+
"""Permanently delete the conversation history for a named peer.
|
|
1412
|
+
|
|
1413
|
+
The peer parameter is sanitised before constructing the file path.
|
|
1414
|
+
Returns 404 when no conversation file exists. This operation is
|
|
1415
|
+
irreversible — back up the file first if needed.
|
|
1416
|
+
"""
|
|
1417
|
+
config = _ctx.get("config")
|
|
1418
|
+
if config is None:
|
|
1419
|
+
raise HTTPException(
|
|
1420
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1421
|
+
detail="Daemon is not running.",
|
|
1422
|
+
)
|
|
1423
|
+
safe_peer = _sanitize_peer(peer)
|
|
1424
|
+
if not safe_peer:
|
|
1425
|
+
raise HTTPException(
|
|
1426
|
+
status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid peer name."
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1429
|
+
conv_file = config.shared_root / "conversations" / f"{safe_peer}.json"
|
|
1430
|
+
if not conv_file.exists():
|
|
1431
|
+
raise HTTPException(
|
|
1432
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
1433
|
+
detail=f"No conversation with '{safe_peer}'.",
|
|
1434
|
+
)
|
|
1435
|
+
try:
|
|
1436
|
+
conv_file.unlink()
|
|
1437
|
+
return DeleteConversationResponse(status="deleted", peer=safe_peer)
|
|
1438
|
+
except Exception as exc:
|
|
1439
|
+
raise HTTPException(
|
|
1440
|
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
1441
|
+
detail=str(exc),
|
|
1442
|
+
) from exc
|
|
1443
|
+
|
|
1444
|
+
|
|
1445
|
+
# ── /api/v1/metrics ───────────────────────────────────────────────────────────
|
|
1446
|
+
|
|
1447
|
+
|
|
1448
|
+
@app.get(
|
|
1449
|
+
"/api/v1/metrics",
|
|
1450
|
+
response_model=MetricsResponse,
|
|
1451
|
+
summary="Consciousness loop runtime metrics",
|
|
1452
|
+
tags=["Metrics"],
|
|
1453
|
+
responses={
|
|
1454
|
+
200: {"description": "Consciousness loop metrics."},
|
|
1455
|
+
503: {"description": "Consciousness loop is not loaded."},
|
|
1456
|
+
},
|
|
1457
|
+
)
|
|
1458
|
+
async def get_metrics(
|
|
1459
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1460
|
+
) -> MetricsResponse:
|
|
1461
|
+
"""Return runtime statistics for the consciousness loop.
|
|
1462
|
+
|
|
1463
|
+
Includes loop count, messages processed, average loop duration, and
|
|
1464
|
+
error count. Returns HTTP 503 when consciousness has not been loaded
|
|
1465
|
+
(daemon started without consciousness, or not yet initialised).
|
|
1466
|
+
"""
|
|
1467
|
+
consciousness = _ctx.get("consciousness")
|
|
1468
|
+
if consciousness is None:
|
|
1469
|
+
raise HTTPException(
|
|
1470
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1471
|
+
detail="Consciousness loop is not loaded.",
|
|
1472
|
+
)
|
|
1473
|
+
try:
|
|
1474
|
+
raw = consciousness.metrics.to_dict()
|
|
1475
|
+
return MetricsResponse(**{k: v for k, v in raw.items() if k in MetricsResponse.model_fields})
|
|
1476
|
+
except Exception:
|
|
1477
|
+
return MetricsResponse()
|
|
1478
|
+
|
|
1479
|
+
|
|
1480
|
+
# ── /api/v1/skstacks/argocd/status helpers ────────────────────────────────────
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
def _find_skstacks_root() -> Path:
|
|
1484
|
+
"""Locate the skstacks v2 root directory.
|
|
1485
|
+
|
|
1486
|
+
Resolution order:
|
|
1487
|
+
1. ``SKSTACKS_V2_ROOT`` environment variable.
|
|
1488
|
+
2. ``<cwd>/skstacks/v2`` (works when run from the project root).
|
|
1489
|
+
3. Relative to this file: ``../../../../skstacks/v2``
|
|
1490
|
+
(works from an installed editable package).
|
|
1491
|
+
|
|
1492
|
+
Returns:
|
|
1493
|
+
Resolved Path to the skstacks v2 directory (may not exist).
|
|
1494
|
+
"""
|
|
1495
|
+
env = os.environ.get("SKSTACKS_V2_ROOT")
|
|
1496
|
+
if env:
|
|
1497
|
+
return Path(env)
|
|
1498
|
+
cwd_candidate = Path.cwd() / "skstacks" / "v2"
|
|
1499
|
+
if cwd_candidate.exists():
|
|
1500
|
+
return cwd_candidate
|
|
1501
|
+
pkg_candidate = Path(__file__).resolve().parents[3] / "skstacks" / "v2"
|
|
1502
|
+
if pkg_candidate.exists():
|
|
1503
|
+
return pkg_candidate
|
|
1504
|
+
return cwd_candidate
|
|
1505
|
+
|
|
1506
|
+
|
|
1507
|
+
def _load_first_argocd_doc(path: Path) -> Optional[dict]:
|
|
1508
|
+
"""Return a normalised dict for the first ArgoCD Application in a YAML file.
|
|
1509
|
+
|
|
1510
|
+
Args:
|
|
1511
|
+
path: Path to the YAML manifest.
|
|
1512
|
+
|
|
1513
|
+
Returns:
|
|
1514
|
+
dict with keys ``name``, ``namespace``, ``project``,
|
|
1515
|
+
``source_path``, ``repo_url``, ``target_revision``,
|
|
1516
|
+
``sync_policy``, ``manifest_file``, or ``None`` on failure.
|
|
1517
|
+
"""
|
|
1518
|
+
try:
|
|
1519
|
+
import yaml as _yaml
|
|
1520
|
+
|
|
1521
|
+
content = path.read_text(encoding="utf-8")
|
|
1522
|
+
for doc in _yaml.safe_load_all(content):
|
|
1523
|
+
if doc and isinstance(doc, dict) and doc.get("kind") == "Application":
|
|
1524
|
+
meta = doc.get("metadata", {}) or {}
|
|
1525
|
+
spec = doc.get("spec", {}) or {}
|
|
1526
|
+
source = spec.get("source", {}) or {}
|
|
1527
|
+
return {
|
|
1528
|
+
"name": meta.get("name", ""),
|
|
1529
|
+
"namespace": meta.get("namespace", "argocd"),
|
|
1530
|
+
"project": spec.get("project", ""),
|
|
1531
|
+
"source_path": source.get("path", ""),
|
|
1532
|
+
"repo_url": source.get("repoURL", ""),
|
|
1533
|
+
"target_revision": source.get("targetRevision", ""),
|
|
1534
|
+
"sync_policy": spec.get("syncPolicy", {}),
|
|
1535
|
+
"manifest_file": path.name,
|
|
1536
|
+
}
|
|
1537
|
+
except Exception:
|
|
1538
|
+
pass
|
|
1539
|
+
return None
|
|
1540
|
+
|
|
1541
|
+
|
|
1542
|
+
def _argocd_color(sync_status: str, health_status: str) -> str:
|
|
1543
|
+
"""Map ArgoCD sync + health status to a dashboard colour name.
|
|
1544
|
+
|
|
1545
|
+
Args:
|
|
1546
|
+
sync_status: ArgoCD sync status string.
|
|
1547
|
+
health_status: ArgoCD health status string.
|
|
1548
|
+
|
|
1549
|
+
Returns:
|
|
1550
|
+
One of ``"green"``, ``"yellow"``, ``"red"``, or ``"gray"``.
|
|
1551
|
+
"""
|
|
1552
|
+
if sync_status == "OutOfSync" or health_status == "Degraded":
|
|
1553
|
+
return "red"
|
|
1554
|
+
if sync_status == "Synced" and health_status == "Healthy":
|
|
1555
|
+
return "green"
|
|
1556
|
+
if health_status == "Progressing":
|
|
1557
|
+
return "yellow"
|
|
1558
|
+
return "gray"
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
def _get_argocd_status() -> dict:
|
|
1562
|
+
"""Parse skstacks ArgoCD manifests and optionally fetch live cluster status.
|
|
1563
|
+
|
|
1564
|
+
Reads ``skstacks/v2/cicd/argocd/app-of-apps.yaml`` and all YAMLs under
|
|
1565
|
+
``skstacks/v2/cicd/argocd/apps/`` to build the app list. If ``kubectl``
|
|
1566
|
+
is available and the cluster is reachable it enriches each entry with live
|
|
1567
|
+
``sync_status`` / ``health_status`` from the ArgoCD Application CRD.
|
|
1568
|
+
|
|
1569
|
+
Returns:
|
|
1570
|
+
dict suitable for constructing an ``ArgoCDStatusResponse``.
|
|
1571
|
+
"""
|
|
1572
|
+
skstacks_root = _find_skstacks_root()
|
|
1573
|
+
argocd_dir = skstacks_root / "cicd" / "argocd"
|
|
1574
|
+
apps_dir = argocd_dir / "apps"
|
|
1575
|
+
|
|
1576
|
+
# ── Parse static YAML manifests ──────────────────────────────────────────
|
|
1577
|
+
apps_by_name: Dict[str, dict] = {}
|
|
1578
|
+
|
|
1579
|
+
root_yaml = argocd_dir / "app-of-apps.yaml"
|
|
1580
|
+
if root_yaml.exists():
|
|
1581
|
+
doc = _load_first_argocd_doc(root_yaml)
|
|
1582
|
+
if doc and doc["name"]:
|
|
1583
|
+
apps_by_name[doc["name"]] = doc
|
|
1584
|
+
|
|
1585
|
+
if apps_dir.exists():
|
|
1586
|
+
for app_yaml in sorted(apps_dir.glob("*.yaml")):
|
|
1587
|
+
doc = _load_first_argocd_doc(app_yaml)
|
|
1588
|
+
if doc and doc["name"]:
|
|
1589
|
+
apps_by_name[doc["name"]] = doc
|
|
1590
|
+
|
|
1591
|
+
# ── Try live status via kubectl ──────────────────────────────────────────
|
|
1592
|
+
source = "yaml"
|
|
1593
|
+
live_status: Dict[str, dict] = {}
|
|
1594
|
+
try:
|
|
1595
|
+
result = subprocess.run(
|
|
1596
|
+
[
|
|
1597
|
+
"kubectl",
|
|
1598
|
+
"get",
|
|
1599
|
+
"applications.argoproj.io",
|
|
1600
|
+
"--all-namespaces",
|
|
1601
|
+
"-o",
|
|
1602
|
+
"json",
|
|
1603
|
+
],
|
|
1604
|
+
capture_output=True,
|
|
1605
|
+
text=True,
|
|
1606
|
+
timeout=8,
|
|
1607
|
+
)
|
|
1608
|
+
if result.returncode == 0:
|
|
1609
|
+
kubectl_data = json.loads(result.stdout)
|
|
1610
|
+
for item in kubectl_data.get("items", []):
|
|
1611
|
+
name = (item.get("metadata") or {}).get("name", "")
|
|
1612
|
+
if not name:
|
|
1613
|
+
continue
|
|
1614
|
+
item_status = item.get("status") or {}
|
|
1615
|
+
live_status[name] = {
|
|
1616
|
+
"sync_status": (item_status.get("sync") or {}).get("status", "Unknown"),
|
|
1617
|
+
"health_status": (item_status.get("health") or {}).get("status", "Unknown"),
|
|
1618
|
+
"last_synced": (item_status.get("operationState") or {}).get("finishedAt"),
|
|
1619
|
+
}
|
|
1620
|
+
source = "yaml+kubectl"
|
|
1621
|
+
except Exception:
|
|
1622
|
+
pass
|
|
1623
|
+
|
|
1624
|
+
# ── Merge and build output ───────────────────────────────────────────────
|
|
1625
|
+
apps = []
|
|
1626
|
+
for name, app in apps_by_name.items():
|
|
1627
|
+
ls = live_status.get(name, {})
|
|
1628
|
+
sync_status = ls.get("sync_status", "Unknown")
|
|
1629
|
+
health_status = ls.get("health_status", "Unknown")
|
|
1630
|
+
apps.append(
|
|
1631
|
+
ArgoCDApp(
|
|
1632
|
+
name=name,
|
|
1633
|
+
project=app.get("project", ""),
|
|
1634
|
+
namespace=app.get("namespace", "argocd"),
|
|
1635
|
+
source_path=app.get("source_path", ""),
|
|
1636
|
+
repo_url=app.get("repo_url", ""),
|
|
1637
|
+
target_revision=app.get("target_revision", ""),
|
|
1638
|
+
sync_status=sync_status,
|
|
1639
|
+
health_status=health_status,
|
|
1640
|
+
color=_argocd_color(sync_status, health_status),
|
|
1641
|
+
last_synced=ls.get("last_synced"),
|
|
1642
|
+
manifest_file=app.get("manifest_file", ""),
|
|
1643
|
+
)
|
|
1644
|
+
)
|
|
1645
|
+
|
|
1646
|
+
# Root app-of-apps first, then alphabetical
|
|
1647
|
+
apps.sort(key=lambda a: (0 if a.name == "skstacks-apps" else 1, a.name))
|
|
1648
|
+
|
|
1649
|
+
return {
|
|
1650
|
+
"source": source,
|
|
1651
|
+
"checked_at": datetime.now(timezone.utc).isoformat(),
|
|
1652
|
+
"skstacks_root": str(skstacks_root),
|
|
1653
|
+
"apps": apps,
|
|
1654
|
+
"summary": ArgoCDSummary(
|
|
1655
|
+
total=len(apps),
|
|
1656
|
+
synced=sum(1 for a in apps if a.sync_status == "Synced"),
|
|
1657
|
+
out_of_sync=sum(1 for a in apps if a.sync_status == "OutOfSync"),
|
|
1658
|
+
unknown=sum(1 for a in apps if a.sync_status == "Unknown"),
|
|
1659
|
+
healthy=sum(1 for a in apps if a.health_status == "Healthy"),
|
|
1660
|
+
degraded=sum(1 for a in apps if a.health_status == "Degraded"),
|
|
1661
|
+
),
|
|
1662
|
+
}
|
|
1663
|
+
|
|
1664
|
+
|
|
1665
|
+
# ── /api/v1/skstacks/argocd/status ───────────────────────────────────────────
|
|
1666
|
+
|
|
1667
|
+
|
|
1668
|
+
@app.get(
|
|
1669
|
+
"/api/v1/skstacks/argocd/status",
|
|
1670
|
+
response_model=ArgoCDStatusResponse,
|
|
1671
|
+
summary="ArgoCD application status from skstacks/v2 manifests",
|
|
1672
|
+
tags=["SKStacks"],
|
|
1673
|
+
responses={
|
|
1674
|
+
200: {
|
|
1675
|
+
"description": (
|
|
1676
|
+
"ArgoCD app list parsed from skstacks/v2/cicd/argocd/. "
|
|
1677
|
+
"Live sync/health enriched via kubectl when available."
|
|
1678
|
+
)
|
|
1679
|
+
},
|
|
1680
|
+
500: {"description": "Failed to parse manifests."},
|
|
1681
|
+
},
|
|
1682
|
+
)
|
|
1683
|
+
async def get_argocd_status(
|
|
1684
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1685
|
+
) -> ArgoCDStatusResponse:
|
|
1686
|
+
"""Return ArgoCD Applications defined in the skstacks/v2 manifests.
|
|
1687
|
+
|
|
1688
|
+
Parses ``skstacks/v2/cicd/argocd/app-of-apps.yaml`` (the root
|
|
1689
|
+
*App of Apps*) and every YAML file under
|
|
1690
|
+
``skstacks/v2/cicd/argocd/apps/``.
|
|
1691
|
+
|
|
1692
|
+
If ``kubectl`` is present and a cluster is reachable, each entry is
|
|
1693
|
+
enriched with live ``sync_status`` and ``health_status`` from the
|
|
1694
|
+
ArgoCD Application CRD (``applications.argoproj.io``). Otherwise all
|
|
1695
|
+
statuses are reported as ``Unknown`` and ``source`` is ``"yaml"``.
|
|
1696
|
+
|
|
1697
|
+
Override the skstacks v2 path via the ``SKSTACKS_V2_ROOT`` environment
|
|
1698
|
+
variable.
|
|
1699
|
+
"""
|
|
1700
|
+
try:
|
|
1701
|
+
raw = _get_argocd_status()
|
|
1702
|
+
return ArgoCDStatusResponse(**raw)
|
|
1703
|
+
except Exception as exc:
|
|
1704
|
+
raise HTTPException(
|
|
1705
|
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
1706
|
+
detail=str(exc),
|
|
1707
|
+
) from exc
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
# ── /api/v1/logs (WebSocket, CapAuth required) ────────────────────────────────
|
|
1711
|
+
|
|
1712
|
+
|
|
1713
|
+
@app.websocket("/api/v1/logs")
|
|
1714
|
+
async def websocket_logs(
|
|
1715
|
+
websocket: WebSocket,
|
|
1716
|
+
) -> None:
|
|
1717
|
+
"""Stream live daemon log lines over a WebSocket connection.
|
|
1718
|
+
|
|
1719
|
+
**Authentication:** A valid CapAuth Bearer token must be passed in the
|
|
1720
|
+
``Authorization`` header during the WebSocket upgrade handshake. The
|
|
1721
|
+
token is validated via CapAuth (or skcapstone signed tokens as fallback).
|
|
1722
|
+
The connection is closed with code 4401 if the token is missing or invalid.
|
|
1723
|
+
|
|
1724
|
+
**Protocol:** Each message is a JSON object with ``{"type": "line", "line": "..."}``
|
|
1725
|
+
for log entries. The last 50 lines from the current ``daemon.log`` are
|
|
1726
|
+
replayed on connect before streaming live tails.
|
|
1727
|
+
|
|
1728
|
+
**Tags:** Streaming, Auth
|
|
1729
|
+
"""
|
|
1730
|
+
# Validate CapAuth token from the Authorization header
|
|
1731
|
+
token_str: Optional[str] = None
|
|
1732
|
+
auth_header = websocket.headers.get("authorization", "")
|
|
1733
|
+
if auth_header.lower().startswith("bearer "):
|
|
1734
|
+
token_str = auth_header[7:].strip()
|
|
1735
|
+
|
|
1736
|
+
fingerprint: Optional[str] = None
|
|
1737
|
+
config = _ctx.get("config")
|
|
1738
|
+
|
|
1739
|
+
try:
|
|
1740
|
+
from skcomm.capauth_validator import CapAuthValidator
|
|
1741
|
+
|
|
1742
|
+
fingerprint = CapAuthValidator(require_auth=True).validate(token_str)
|
|
1743
|
+
except ImportError:
|
|
1744
|
+
if token_str and config:
|
|
1745
|
+
try:
|
|
1746
|
+
from .tokens import import_token, verify_token
|
|
1747
|
+
|
|
1748
|
+
tok = import_token(token_str)
|
|
1749
|
+
if verify_token(tok, home=config.home):
|
|
1750
|
+
fingerprint = tok.payload.issuer
|
|
1751
|
+
except Exception:
|
|
1752
|
+
fingerprint = None
|
|
1753
|
+
|
|
1754
|
+
if fingerprint is None:
|
|
1755
|
+
await websocket.close(code=4401, reason="CapAuth token required")
|
|
1756
|
+
return
|
|
1757
|
+
|
|
1758
|
+
await websocket.accept()
|
|
1759
|
+
log_file: Optional[Path] = config.log_file if config else None
|
|
1760
|
+
|
|
1761
|
+
# Replay the last 50 log lines
|
|
1762
|
+
if log_file and log_file.exists():
|
|
1763
|
+
try:
|
|
1764
|
+
from collections import deque
|
|
1765
|
+
|
|
1766
|
+
with open(log_file, encoding="utf-8", errors="replace") as fh:
|
|
1767
|
+
tail_lines = list(deque(fh, maxlen=50))
|
|
1768
|
+
for line in tail_lines:
|
|
1769
|
+
await websocket.send_json({"type": "line", "line": line.rstrip("\n")})
|
|
1770
|
+
except Exception:
|
|
1771
|
+
pass
|
|
1772
|
+
|
|
1773
|
+
# Tail the log file and stream new lines
|
|
1774
|
+
try:
|
|
1775
|
+
offset = log_file.stat().st_size if log_file and log_file.exists() else 0
|
|
1776
|
+
while True:
|
|
1777
|
+
if log_file and log_file.exists():
|
|
1778
|
+
try:
|
|
1779
|
+
with open(log_file, encoding="utf-8", errors="replace") as fh:
|
|
1780
|
+
fh.seek(offset)
|
|
1781
|
+
chunk = fh.read()
|
|
1782
|
+
if chunk:
|
|
1783
|
+
for ln in chunk.splitlines():
|
|
1784
|
+
await websocket.send_json({"type": "line", "line": ln})
|
|
1785
|
+
offset = fh.tell()
|
|
1786
|
+
except Exception:
|
|
1787
|
+
pass
|
|
1788
|
+
await asyncio.sleep(0.5)
|
|
1789
|
+
except WebSocketDisconnect:
|
|
1790
|
+
pass
|
|
1791
|
+
|
|
1792
|
+
|
|
1793
|
+
# ── Legacy endpoints ──────────────────────────────────────────────────────────
|
|
1794
|
+
|
|
1795
|
+
|
|
1796
|
+
@app.get(
|
|
1797
|
+
"/status",
|
|
1798
|
+
response_model=LegacyStatusResponse,
|
|
1799
|
+
summary="Legacy daemon status (deprecated)",
|
|
1800
|
+
tags=["Legacy"],
|
|
1801
|
+
deprecated=True,
|
|
1802
|
+
responses={
|
|
1803
|
+
200: {"description": "Daemon runtime snapshot."},
|
|
1804
|
+
503: {"description": "Daemon context not initialised."},
|
|
1805
|
+
},
|
|
1806
|
+
)
|
|
1807
|
+
async def legacy_status(
|
|
1808
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1809
|
+
) -> LegacyStatusResponse:
|
|
1810
|
+
"""Return the legacy daemon status snapshot.
|
|
1811
|
+
|
|
1812
|
+
**Deprecated.** Use ``GET /api/v1/health`` or ``GET /api/v1/dashboard``
|
|
1813
|
+
instead. This endpoint is retained for backward compatibility with
|
|
1814
|
+
older connectors and the dashboard polling widget.
|
|
1815
|
+
"""
|
|
1816
|
+
state = _ctx.get("state")
|
|
1817
|
+
if state is None:
|
|
1818
|
+
raise HTTPException(
|
|
1819
|
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
|
1820
|
+
detail="Daemon is not running.",
|
|
1821
|
+
)
|
|
1822
|
+
snap = state.snapshot()
|
|
1823
|
+
return LegacyStatusResponse(
|
|
1824
|
+
running=snap.get("running", True),
|
|
1825
|
+
pid=snap.get("pid"),
|
|
1826
|
+
uptime_seconds=snap.get("uptime_seconds", 0.0),
|
|
1827
|
+
messages_received=snap.get("messages_received", 0),
|
|
1828
|
+
syncs_completed=snap.get("syncs_completed", 0),
|
|
1829
|
+
started_at=snap.get("started_at"),
|
|
1830
|
+
recent_errors=snap.get("recent_errors", []),
|
|
1831
|
+
inflight_count=snap.get("inflight_count", 0),
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1834
|
+
|
|
1835
|
+
@app.get(
|
|
1836
|
+
"/consciousness",
|
|
1837
|
+
response_model=Dict[str, Any],
|
|
1838
|
+
summary="Legacy consciousness stats (deprecated)",
|
|
1839
|
+
tags=["Legacy"],
|
|
1840
|
+
deprecated=True,
|
|
1841
|
+
responses={
|
|
1842
|
+
200: {"description": "Raw consciousness loop stats dict."},
|
|
1843
|
+
},
|
|
1844
|
+
)
|
|
1845
|
+
async def legacy_consciousness(
|
|
1846
|
+
_key: Optional[str] = Depends(_check_api_key),
|
|
1847
|
+
) -> Dict[str, Any]:
|
|
1848
|
+
"""Return raw consciousness loop statistics.
|
|
1849
|
+
|
|
1850
|
+
**Deprecated.** Use ``GET /api/v1/capstone`` instead.
|
|
1851
|
+
"""
|
|
1852
|
+
consciousness = _ctx.get("consciousness")
|
|
1853
|
+
if consciousness:
|
|
1854
|
+
return consciousness.stats
|
|
1855
|
+
return {"enabled": False, "reason": "not loaded"}
|
|
1856
|
+
|
|
1857
|
+
|
|
1858
|
+
@app.get(
|
|
1859
|
+
"/ping",
|
|
1860
|
+
response_model=PingResponse,
|
|
1861
|
+
summary="Liveness ping",
|
|
1862
|
+
tags=["Health"],
|
|
1863
|
+
responses={
|
|
1864
|
+
200: {"description": "Pong response confirming daemon is alive."},
|
|
1865
|
+
},
|
|
1866
|
+
)
|
|
1867
|
+
async def ping(_key: Optional[str] = Depends(_check_api_key)) -> PingResponse:
|
|
1868
|
+
"""Lightweight liveness check.
|
|
1869
|
+
|
|
1870
|
+
Returns ``{"pong": true, "pid": <daemon-pid>}`` immediately. Use this
|
|
1871
|
+
to confirm the API server is reachable before making heavier requests.
|
|
1872
|
+
"""
|
|
1873
|
+
return PingResponse(pong=True, pid=os.getpid())
|
|
1874
|
+
|
|
1875
|
+
|
|
1876
|
+
# ── Server factory ────────────────────────────────────────────────────────────
|
|
1877
|
+
|
|
1878
|
+
|
|
1879
|
+
def start_api_server(
|
|
1880
|
+
state: Any,
|
|
1881
|
+
config: Any,
|
|
1882
|
+
consciousness: Any = None,
|
|
1883
|
+
runtime: Any = None,
|
|
1884
|
+
host: str = "127.0.0.1",
|
|
1885
|
+
port: int = 7779,
|
|
1886
|
+
) -> threading.Thread:
|
|
1887
|
+
"""Start the FastAPI server in a background daemon thread.
|
|
1888
|
+
|
|
1889
|
+
Calls :func:`init_api` to bind the daemon context, then starts uvicorn
|
|
1890
|
+
in a dedicated thread. The thread is a daemon thread so it will be
|
|
1891
|
+
killed automatically when the main process exits.
|
|
1892
|
+
|
|
1893
|
+
Args:
|
|
1894
|
+
state: DaemonState instance.
|
|
1895
|
+
config: DaemonConfig instance.
|
|
1896
|
+
consciousness: Optional ConsciousnessLoop.
|
|
1897
|
+
runtime: Optional AgentRuntime.
|
|
1898
|
+
host: Bind address (default ``127.0.0.1``).
|
|
1899
|
+
port: Listen port (default ``7779``).
|
|
1900
|
+
|
|
1901
|
+
Returns:
|
|
1902
|
+
The started background thread.
|
|
1903
|
+
|
|
1904
|
+
Raises:
|
|
1905
|
+
ImportError: When uvicorn is not installed.
|
|
1906
|
+
"""
|
|
1907
|
+
try:
|
|
1908
|
+
import uvicorn
|
|
1909
|
+
except ImportError as exc:
|
|
1910
|
+
raise ImportError(
|
|
1911
|
+
"uvicorn is required to start the FastAPI server. "
|
|
1912
|
+
"Install with: pip install skcapstone[api]"
|
|
1913
|
+
) from exc
|
|
1914
|
+
|
|
1915
|
+
init_api(state=state, config=config, consciousness=consciousness, runtime=runtime)
|
|
1916
|
+
|
|
1917
|
+
def _run() -> None:
|
|
1918
|
+
uvicorn.run(
|
|
1919
|
+
app,
|
|
1920
|
+
host=host,
|
|
1921
|
+
port=port,
|
|
1922
|
+
log_level="warning",
|
|
1923
|
+
access_log=False,
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
t = threading.Thread(target=_run, name="fastapi-api", daemon=True)
|
|
1927
|
+
t.start()
|
|
1928
|
+
logger.info(
|
|
1929
|
+
"FastAPI API server started — http://%s:%d docs: http://%s:%d/docs",
|
|
1930
|
+
host,
|
|
1931
|
+
port,
|
|
1932
|
+
host,
|
|
1933
|
+
port,
|
|
1934
|
+
)
|
|
1935
|
+
return t
|