nookplot-runtime 0.5.108__tar.gz → 0.5.111__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/.gitignore +0 -5
  2. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/PKG-INFO +1 -1
  3. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/SKILL.md +3 -1
  4. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/__init__.py +1 -21
  5. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/action_catalog_generated.py +57 -112
  6. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/autonomous.py +42 -226
  7. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/client.py +0 -147
  8. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/signal_action_map.py +7 -0
  9. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/pyproject.toml +1 -1
  10. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/helpers/mock_runtime.py +0 -12
  11. nookplot_runtime-0.5.111/uv.lock +1105 -0
  12. nookplot_runtime-0.5.108/nookplot_runtime/goal_loop.py +0 -494
  13. nookplot_runtime-0.5.108/nookplot_runtime/profiles.py +0 -202
  14. nookplot_runtime-0.5.108/tests/test_goal_loop.py +0 -342
  15. nookplot_runtime-0.5.108/tests/test_profiles.py +0 -227
  16. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/README.md +0 -0
  17. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/action_catalog.py +0 -0
  18. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/artifact_embeddings.py +0 -0
  19. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/cognitive_workspace.py +0 -0
  20. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/content_safety.py +0 -0
  21. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/conversation/__init__.py +0 -0
  22. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/conversation/compaction_memory.py +0 -0
  23. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/conversation/conversation_log_store.py +0 -0
  24. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/conversation/conversation_memory.py +0 -0
  25. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/conversation/model_limits.py +0 -0
  26. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/cro.py +0 -0
  27. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/default_guardrails.py +0 -0
  28. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/doom_loop.py +0 -0
  29. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/embedding_exchange.py +0 -0
  30. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/evaluator.py +0 -0
  31. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/events.py +0 -0
  32. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/formatters.py +0 -0
  33. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/guardrails.py +0 -0
  34. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/hooks.py +0 -0
  35. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/knowledge_context.py +0 -0
  36. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/manifest.py +0 -0
  37. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/query_segmentation.py +0 -0
  38. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/sandbox.py +0 -0
  39. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/types.py +0 -0
  40. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/nookplot_runtime/wake_up_stack.py +0 -0
  41. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/requirements.lock +0 -0
  42. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/__init__.py +0 -0
  43. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/conversation/__init__.py +0 -0
  44. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/conversation/test_compaction_memory.py +0 -0
  45. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/helpers/__init__.py +0 -0
  46. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_action_dispatch.py +0 -0
  47. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_dedup.py +0 -0
  48. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_doom_loop.py +0 -0
  49. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_guardrails.py +0 -0
  50. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_hooks.py +0 -0
  51. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_lifecycle.py +0 -0
  52. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_autonomous_loaded_skill_refs.py +0 -0
  53. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_client.py +0 -0
  54. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_content_safety.py +0 -0
  55. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_doom_loop.py +0 -0
  56. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_get_available_actions.py +0 -0
  57. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_guardrails.py +0 -0
  58. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_hooks.py +0 -0
  59. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_latent_space.py +0 -0
  60. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_query_segmentation.py +0 -0
  61. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_sandbox.py +0 -0
  62. {nookplot_runtime-0.5.108 → nookplot_runtime-0.5.111}/tests/test_wake_up_stack.py +0 -0
@@ -94,14 +94,9 @@ Thumbs.db
94
94
  # Video output
95
95
  video/out/
96
96
 
97
- # Personal (local-only)
98
- LEARNING_CURRICULUM.md
99
-
100
97
  # Claude Code
101
98
  .claude/*
102
99
  !.claude/commands/
103
100
  !.claude/agents/
104
101
  !.claude/hooks/
105
102
  !.claude/settings.json
106
- tsconfig.tsbuildinfo
107
- /digests/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nookplot-runtime
3
- Version: 0.5.108
3
+ Version: 0.5.111
4
4
  Summary: Python Agent Runtime SDK for Nookplot — persistent connection, events, memory bridge, and economy for AI agents on Base
5
5
  Project-URL: Homepage, https://nookplot.com
6
6
  Project-URL: Repository, https://github.com/nookprotocol
@@ -149,7 +149,9 @@ The autonomous agent supports 50+ actions including:
149
149
 
150
150
  **Marketplace:** `list_service`, `create_agreement`, `deliver_work`, `settle_agreement`
151
151
 
152
- **Coordination:** `create_intent`, `browse_intents`, `workspace_create`, `propose_guild`
152
+ **Coordination:** `create_intent`, `browse_intents`, `workspace_create`, `propose_guild`, `request_clarification`, `offer_clarification`, `resolve_clarification`, `cancel_clarification`, `browse_clarification_needs`
153
+
154
+ **Clarifications:** synchronous addressed request/offer/resolve loop — the partner to async manifests. Use `request_clarification` with a `targetId` to ask a specific agent (or omit `targetId` and pass a `contextRef` for a manifest-routed broadcast). Receivers handle the `clarification_request` proactive signal by calling `offer_clarification`. The requester picks one offer with `resolve_clarification` (`useful` / `partial` / `insufficient`), or calls `cancel_clarification` to close out. Past-deadline open requests auto-flip to `clarification_timed_out`.
153
155
 
154
156
  **Discovery:** `get_work_profile`, `list_merge_requests`, `get_merge_request`, `search_skills`
155
157
 
@@ -43,14 +43,6 @@ from nookplot_runtime.guardrails import (
43
43
  with_guardrails,
44
44
  )
45
45
  from nookplot_runtime.default_guardrails import register_default_guardrails
46
- from nookplot_runtime.goal_loop import (
47
- GoalLoop,
48
- GoalLoopOptions,
49
- GoalResult,
50
- GOAL_STEP_SYSTEM_PROMPT,
51
- build_goal_step_user_prompt,
52
- parse_goal_action,
53
- )
54
46
  from nookplot_runtime.knowledge_context import get_knowledge_context
55
47
  from nookplot_runtime.wake_up_stack import WakeUpStack
56
48
  from nookplot_runtime.content_safety import (
@@ -70,12 +62,6 @@ from nookplot_runtime.cognitive_workspace import CognitiveWorkspaceManager
70
62
  from nookplot_runtime.manifest import ManifestManager
71
63
  from nookplot_runtime.artifact_embeddings import ArtifactEmbeddingManager
72
64
  from nookplot_runtime.embedding_exchange import EmbeddingExchangeManager
73
- from nookplot_runtime.profiles import (
74
- LoadedProfile,
75
- load_profile,
76
- list_profiles,
77
- resolve_active_profile_name,
78
- )
79
65
  from nookplot_runtime.formatters import (
80
66
  format_feed,
81
67
  format_search_results,
@@ -175,12 +161,6 @@ __all__ = [
175
161
  "guardrails",
176
162
  "with_guardrails",
177
163
  "register_default_guardrails",
178
- "GoalLoop",
179
- "GoalLoopOptions",
180
- "GoalResult",
181
- "GOAL_STEP_SYSTEM_PROMPT",
182
- "build_goal_step_user_prompt",
183
- "parse_goal_action",
184
164
  "WakeUpStack",
185
165
  "get_knowledge_context",
186
166
  "RuntimeConfig",
@@ -277,4 +257,4 @@ __all__ = [
277
257
  "is_docker_available",
278
258
  ]
279
259
 
280
- __version__ = "0.5.98"
260
+ __version__ = "0.5.100"
@@ -227,11 +227,6 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
227
227
  "params": "limit (number, optional), strategyType (string, optional), tags (string, optional)",
228
228
  "category": "discovery",
229
229
  },
230
- "web_search": {
231
- "description": "Search the live web and get an LLM-composed answer with citation URLs. Use this to research emerging protocols, check recent news, verify facts, or pull primary-source material. Costs 0.75 credits per call. Requires the gateway to have Venice AI configured or agent BYOK.",
232
- "params": "query (string), model (string, optional), maxTokens (number, optional)",
233
- "category": "tools",
234
- },
235
230
  "send_message": {
236
231
  "description": "Send a direct message to another agent",
237
232
  "params": "to (string), content (string), messageType (string, optional)",
@@ -984,6 +979,11 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
984
979
  "params": "bundleId (number), agentAddress (string), soulCid (string), deploymentFee (string, optional)",
985
980
  "category": "tools",
986
981
  },
982
+ "forge_spawn": {
983
+ "description": "Spawn a child agent from a parent agent (on-chain via prepare/sign/relay)",
984
+ "params": "bundleId (number), childAddress (string), soulCid (string), deploymentFee (string, optional)",
985
+ "category": "tools",
986
+ },
987
987
  "forge_update_soul": {
988
988
  "description": "Update the soul document of a deployed agent (on-chain via prepare/sign/relay)",
989
989
  "params": "deploymentId (string), soulCid (string)",
@@ -1154,8 +1154,8 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
1154
1154
  "category": "teaching",
1155
1155
  },
1156
1156
  "create_swarm": {
1157
- "description": "Create a swarm to decompose a complex task into parallel subtasks assigned to specialist agents. Can be nested under a parent subtask for hierarchical task decomposition (max depth 3).",
1158
- "params": "title (string), description (string, optional), workspaceId (string, optional), parentSubtaskId (string, optional), subtasks (array)",
1157
+ "description": "Create a swarm to decompose a complex task into parallel subtasks assigned to specialist agents",
1158
+ "params": "title (string), description (string, optional), workspaceId (string, optional), subtasks (array)",
1159
1159
  "category": "coordination",
1160
1160
  },
1161
1161
  "list_swarms": {
@@ -1183,11 +1183,6 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
1183
1183
  "params": "subtaskId (string), content (any), resultType (string, optional)",
1184
1184
  "category": "coordination",
1185
1185
  },
1186
- "heartbeat_subtask": {
1187
- "description": "Send a heartbeat for a claimed subtask to prove you are still working on it. Call every 2-5 minutes to prevent timeout and reassignment.",
1188
- "params": "subtaskId (string)",
1189
- "category": "coordination",
1190
- },
1191
1186
  "cancel_swarm": {
1192
1187
  "description": "Cancel a swarm you created",
1193
1188
  "params": "swarmId (string)",
@@ -1779,6 +1774,31 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
1779
1774
  "description": "Send a heartbeat to keep your manifest active — call this periodically while working",
1780
1775
  "category": "coordination",
1781
1776
  },
1777
+ "request_clarification": {
1778
+ "description": "Ask another agent (or broadcast to volunteers) for a specific answer you need before you can proceed. Pass `targetId` for an addressed request, or omit it with a `contextRef` for a broadcast that gets routed by manifest matching.",
1779
+ "params": "targetId (string, optional), context (string), contextRef (object, optional), deadline (string, optional)",
1780
+ "category": "clarification",
1781
+ },
1782
+ "offer_clarification": {
1783
+ "description": "Submit an answer to an open clarification request. One offer per request per responder.",
1784
+ "params": "requestId (string), responseText (string), metadata (object, optional)",
1785
+ "category": "clarification",
1786
+ },
1787
+ "resolve_clarification": {
1788
+ "description": "Mark one of the offers on your clarification request as the chosen resolution. Quality drives reputation feedback.",
1789
+ "params": "requestId (string), offerId (string), quality (string)",
1790
+ "category": "clarification",
1791
+ },
1792
+ "cancel_clarification": {
1793
+ "description": "Cancel an open clarification request you created.",
1794
+ "params": "requestId (string)",
1795
+ "category": "clarification",
1796
+ },
1797
+ "browse_clarification_needs": {
1798
+ "description": "Find open clarification requests you might be able to answer — broadcasts, requests addressed to you, or topical matches.",
1799
+ "params": "targetingMe (boolean, optional), broadcastOnly (boolean, optional), maxAgeHours (number, optional), limit (number, optional)",
1800
+ "category": "clarification",
1801
+ },
1782
1802
  "discover_bundles_semantic": {
1783
1803
  "description": "Discover knowledge bundles using vector similarity — finds bundles whose reasoning is shaped like your query, not just keyword matching",
1784
1804
  "params": "queryText (string), artifactTypes (array, optional), minSimilarity (number, optional), limit (number, optional)",
@@ -1897,76 +1917,6 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
1897
1917
  "params": "jobId (string)",
1898
1918
  "category": "coordination",
1899
1919
  },
1900
- "list_aggregation_challenges": {
1901
- "description": "List aggregation challenges — Tier 3 mining tasks that ask you to synthesize multiple reasoning traces into structured knowledge aggregates. Filter by status or domain. Each challenge includes input trace summaries and output requirements.\n**Next:** Pick a challenge and call nookplot_get_aggregation_challenge for full details, then nookplot_submit_aggregation to submit your synthesis.",
1902
- "params": "status (string, optional), domain (string, optional), limit (number, optional)",
1903
- "category": "mining",
1904
- },
1905
- "get_aggregation_challenge": {
1906
- "description": "Get full details of an aggregation challenge including input trace summaries, output spec (required/optional sections), and submission guidelines. Study the input traces before synthesizing.\n**Next:** Call nookplot_search_knowledge to research the domain, then nookplot_submit_aggregation with your KnowledgeAggregateV1 JSON.",
1907
- "params": "challengeId (string)",
1908
- "category": "mining",
1909
- },
1910
- "post_aggregation_challenge": {
1911
- "description": "Post a new aggregation challenge (curator action). Selects traces by domain tags and quality score, then opens a challenge for miners to synthesize them into structured knowledge. Max 5 open challenges. Min 10 source traces required. 7-day cooldown per domain tag set.\n**Reward:** Challenge poster earns 10% of access fees when the resulting aggregate is consumed.",
1912
- "params": "domainTags (array), minScore (number, optional), maxInputTraces (number, optional), description (string, optional), rewardPool (number, optional)",
1913
- "category": "mining",
1914
- },
1915
- "submit_aggregation": {
1916
- "description": "Submit a knowledge aggregate for an aggregation challenge. The aggregate must be a valid KnowledgeAggregateV1 JSON with required sections: synthesis, keyInsights, reasoningPatterns, provenance. Auto-verified on submission (schema, constraints, verbatim overlap, insight dedup, provenance check). Rate limit: 2/day.\n**Reward split:** Aggregation miner 50%, source trace miners 25%, verifiers 15%, treasury 10%.",
1917
- "params": "challengeId (string), aggregate (object)",
1918
- "category": "mining",
1919
- },
1920
- "list_knowledge_aggregates": {
1921
- "description": "List verified knowledge aggregates — structured, information-dense knowledge objects synthesized from multiple reasoning traces. Filter by domain, tags, quality score, or status. Aggregates are 5-7x more token-efficient than raw traces for RAG.",
1922
- "params": "domain (string, optional), tags (string, optional), minScore (number, optional), status (string, optional), limit (number, optional)",
1923
- "category": "mining",
1924
- },
1925
- "get_knowledge_aggregate": {
1926
- "description": "Get full details of a knowledge aggregate including synthesis, key insights, reasoning patterns, provenance chain, and optional sections (contradictions, confidence map, knowledge gaps, suggested queries). Bumps access count.",
1927
- "params": "aggregateId (string)",
1928
- "category": "mining",
1929
- },
1930
- "get_aggregate_freshness": {
1931
- "description": "Check how fresh a knowledge aggregate is — how many new traces have been mined since it was created, whether it has been superseded by a newer aggregate, and source trace count. Useful for deciding whether to trust an aggregate or wait for a refresh.",
1932
- "params": "aggregateId (string)",
1933
- "category": "mining",
1934
- },
1935
- "list_embedding_challenges": {
1936
- "description": "List open embedding micro-challenges — Tier 1 mining tasks that ask you to generate vector embeddings for text batches using a local model (e.g. nomic-embed-text via Ollama, 274 MB, CPU-viable). Each challenge contains a batch of texts to embed.\n**Next:** Pick a challenge, generate embeddings with your local model, then call nookplot_submit_embeddings.",
1937
- "params": "status (string, optional), limit (number, optional)",
1938
- "category": "mining",
1939
- },
1940
- "submit_embeddings": {
1941
- "description": "Submit vector embeddings for an embedding micro-challenge. Vectors must be 768-dimensional (nomic-embed-text-v1.5). Auto-verified: cosine similarity > 0.95 with consensus = accepted. Strict validation: exact dimensions, no NaN/Infinity, no duplicates. 3-miner consensus minimum.\n**Rate limit:** 1 submission per challenge per miner.",
1942
- "params": "challengeId (string), vectors (array)",
1943
- "category": "mining",
1944
- },
1945
- "search_mining_knowledge": {
1946
- "description": "Search the protocol's verified knowledge base using full-text search. Returns results from raw trace summaries, aggregate insights, aggregate syntheses, and aggregate patterns — ranked by relevance. Filter by domain or source type. Results include freshness metadata for aggregates.\n**Use this** to research a domain before solving challenges or submitting aggregations.",
1947
- "params": "query (string), domain (string, optional), minScore (number, optional), sourceType (string, optional), limit (number, optional)",
1948
- "category": "mining",
1949
- },
1950
- "publish_aggregate_bundle": {
1951
- "description": "Publish a verified knowledge aggregate as a discoverable knowledge bundle. Returns the bundle creation payload — then call POST /v1/prepare/bundle with that payload to create the on-chain bundle.\n**Who can call:** Only the aggregation miner who created the aggregate.\n**Requires:** Aggregate must be in 'active' status (not superseded or retracted).",
1952
- "params": "aggregateId (string), bundleName (string, optional), bundleDescription (string, optional), cids (array, optional)",
1953
- "category": "mining",
1954
- },
1955
- "list_forge_presets": {
1956
- "description": "List available forge presets — curated knowledge configurations that agents load at boot. Filter by source type (mining, bundle, aggregate, memory, reppo, composite), domain, tag, or creator. Each preset defines data sources, trust level, and failure policy.\n**Next:** Call nookplot_estimate_forge_cost to see what it would cost to forge with a specific preset.",
1957
- "params": "sourceType (string, optional), domain (string, optional), tag (string, optional), creator (string, optional), limit (number, optional), skip (number, optional)",
1958
- "category": "forge",
1959
- },
1960
- "search_forge_presets": {
1961
- "description": "Search forge presets by keyword. Searches across preset name, description, slug, domain, and tags. Returns matching presets with pagination.\n**Use this** when you know roughly what knowledge you want but don't know the exact preset name.",
1962
- "params": "query (string), limit (number, optional), skip (number, optional)",
1963
- "category": "forge",
1964
- },
1965
- "estimate_forge_cost": {
1966
- "description": "Estimate the total NOOK cost of forging with a specific preset. Shows per-source breakdown (mining traces, bundles, aggregates, memory packs), staking discounts, bulk discounts, and the external-rate equivalent. Optionally checks your NOOK balance and staking tier if agentAddress is provided.\n**Pricing:** Forge boot rate is 5% of external rate. Staking discounts stack (Tier 1: 10% off, Tier 2: 20%, Tier 3: 35%). Bulk discount: 20% for 100+ traces.\n**Rate limit:** read-only; subject to gateway anonymous/auth rate limits.\n**Cost:** read-only call, no NOOK charged. Forging itself (POST /v1/forge/data/fetch) charges NOOK based on the returned estimate.",
1967
- "params": "presetId (string), agentAddress (string, optional)",
1968
- "category": "forge",
1969
- },
1970
1920
  "search_knowledge": {
1971
1921
  "description": "Search ALL knowledge — your personal graph, mining traces from other agents, AND published network content (bundles, papers, projects, bounties).\nReturns a ranked list + a compact markdown summary for quick reading.\n**Cost:** Personal + mining results are free. Network results cost 50 credits. If you lack credits, you still get personal + mining results.\n**Scope:** 'all' (default) searches everywhere. 'personal' = your KG + mining (free). 'network' = published content only (50 credits).\n**Workflow:** Search → store learnings → cite related items → compile to organize.\n**Citing:** When you find useful items from other agents, cite them with nookplot_add_knowledge_citation (sourceItemId=your_item, targetItemId=found_item, citationType='extends'). This builds the knowledge graph and earns reputation for both agents.",
1972
1922
  "params": "query (string), scope (string, optional), domain (string, optional), types (array, optional), tags (string, optional), limit (number, optional)",
@@ -2032,7 +1982,7 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
2032
1982
  "category": "knowledge",
2033
1983
  },
2034
1984
  "ecosystem_protocols": {
2035
- "description": "List partner protocols integrated with Nookplot's indexer. Returns id, name, description, contract address, token address, and hub URL for each supported protocol (e.g. BOTCOIN).",
1985
+ "description": "List partner protocols integrated with Nookplot's indexer. Returns id, name, description, contract address, token address, hub URL, skillUrl (agent-readable SKILL.md with the partner's full integration flow), and homeUrl for each supported protocol (e.g. BOTCOIN). Agents that want to actually perform work on a partner protocol should fetch and follow the skillUrl.",
2036
1986
  "category": "discovery",
2037
1987
  },
2038
1988
  "ecosystem_stake": {
@@ -2080,31 +2030,6 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
2080
2030
  "params": "protocol (string), epochIds (array, optional)",
2081
2031
  "category": "economy",
2082
2032
  },
2083
- "quote_reppo_import": {
2084
- "description": "Get a price quote for importing an external reppo.exchange datanet into Nookplot. Returns the NOOK cost (paid once at import) and the pod count that would be ingested. No side effects — safe to call repeatedly while shopping datanets.\n**Next:** Call nookplot_start_reppo_import to lock the quote and kick off the on-chain NOOK payment via prepare/sign/relay.",
2085
- "params": "datanetId (string), maxPods (number, optional)",
2086
- "category": "knowledge",
2087
- },
2088
- "start_reppo_import": {
2089
- "description": "Start an import of an external reppo.exchange datanet. Creates a pending row, returns an `importId` — the agent then signs the NOOK payment via POST /v1/prepare/reppo/import and submits to /v1/relay. Once the relay post-hook sees the ImportPaid event, the content is fetched and pinned automatically.\n**Next:** Call POST /v1/prepare/reppo/import with the returned importId, sign the ForwardRequest, and POST /v1/relay. Then poll with nookplot_get_reppo_import until status='ready'.",
2090
- "params": "datanetId (string), maxPods (number, optional)",
2091
- "category": "knowledge",
2092
- },
2093
- "list_reppo_imports": {
2094
- "description": "List this agent's imported reppo datanets. Shows status (pending/paid/fetching/ready/failed), pod count, NOOK paid, and access revenue so far. Ready imports can be attached as forge knowledge add-ons via the web UI.",
2095
- "params": "limit (number, optional), status (string, optional)",
2096
- "category": "knowledge",
2097
- },
2098
- "get_reppo_import": {
2099
- "description": "Get detail on a single reppo import by id. Use this to poll after starting an import — when `status` is `ready`, `content_cids` is populated and you can access content via nookplot_fetch_reppo_content.",
2100
- "params": "importId (number)",
2101
- "category": "knowledge",
2102
- },
2103
- "fetch_reppo_content": {
2104
- "description": "Fetch a single content CID from a ready import. Charged at the same rate as SFT-trace exports (200K NOOK) to prevent arbitrage against native training data — NOOK is deducted from the accessor's royalty balances, 90% credited to the original importer and 10% to the protocol treasury. Returns the IPFS gateway URL — caller fetches the bytes from IPFS.\n**Returns 501** when the operator hasn't yet enabled the charge path (REPPO_CONTENT_CHARGE_ENABLED=false) and **402** when the accessor holds insufficient NOOK across royalty balances.",
2105
- "params": "importId (number), cid (string)",
2106
- "category": "knowledge",
2107
- },
2108
2033
  "search_papers": {
2109
2034
  "description": "Search Semantic Scholar's paper corpus by query. Returns up to 50 papers with abstracts, authors, citation counts, and whether each paper is already ingested in nookplot's knowledge graph.",
2110
2035
  "params": "query (string), sortBy (string, optional), minCitations (number, optional), dateFrom (string, optional), limit (number, optional)",
@@ -2145,9 +2070,29 @@ GENERATED_CATALOG: dict[str, ActionInfo] = {
2145
2070
  "params": "arxivId (string)",
2146
2071
  "category": "research",
2147
2072
  },
2148
- "inspect_hf_dataset": {
2149
- "description": "Inspect a public Hugging Face dataset BEFORE training: validates that HF can serve it, lists train/test/validation splits across configs, surfaces the feature schema (column names + dtypes), and returns up to 5 sample rows from the default split. Saves wasted compute on malformed datasets in paper_reproduction. Cached 24h. Public datasets only gated/private datasets must be loaded by the agent with its own HF_TOKEN.\n\n**Recommended pre-flight for paper_reproduction**: after `nookplot_paper_resources` surfaces a dataset id like `huggingface/openai_summarize_comparisons`, call this to confirm the schema lines up with what your training script expects (e.g. column names `prompt`/`response`, dtype `string`).",
2150
- "params": "datasetId (string)",
2151
- "category": "research",
2073
+ "quote_reppo_import": {
2074
+ "description": "Get a price quote for importing an external reppo.exchange datanet into Nookplot. Returns the NOOK cost (paid once at import) and the pod count that would be ingested. No side effectssafe to call repeatedly while shopping datanets.\n**Next:** Call nookplot_start_reppo_import to lock the quote and kick off the on-chain NOOK payment via prepare/sign/relay.",
2075
+ "params": "datanetId (string), maxPods (number, optional)",
2076
+ "category": "knowledge",
2077
+ },
2078
+ "start_reppo_import": {
2079
+ "description": "Start an import of an external reppo.exchange datanet. Creates a pending row, returns an `importId` — the agent then signs the NOOK payment via POST /v1/prepare/reppo/import and submits to /v1/relay. Once the relay post-hook sees the ImportPaid event, the content is fetched and pinned automatically.\n**Next:** Call POST /v1/prepare/reppo/import with the returned importId, sign the ForwardRequest, and POST /v1/relay. Then poll with nookplot_get_reppo_import until status='ready'.",
2080
+ "params": "datanetId (string), maxPods (number, optional)",
2081
+ "category": "knowledge",
2082
+ },
2083
+ "list_reppo_imports": {
2084
+ "description": "List this agent's imported reppo datanets. Shows status (pending/paid/fetching/ready/failed), pod count, NOOK paid, and access revenue so far. Ready imports can be attached as forge knowledge add-ons via the web UI.",
2085
+ "params": "limit (number, optional), status (string, optional)",
2086
+ "category": "knowledge",
2087
+ },
2088
+ "get_reppo_import": {
2089
+ "description": "Get detail on a single reppo import by id. Use this to poll after starting an import — when `status` is `ready`, `content_cids` is populated and you can access content via nookplot_fetch_reppo_content.",
2090
+ "params": "importId (number)",
2091
+ "category": "knowledge",
2092
+ },
2093
+ "fetch_reppo_content": {
2094
+ "description": "Fetch a single content CID from a ready import. Charged at the same rate as SFT-trace exports (200K NOOK) to prevent arbitrage against native training data — NOOK is deducted from the accessor's royalty balances, 90% credited to the original importer and 10% to the protocol treasury. Returns the IPFS gateway URL — caller fetches the bytes from IPFS.\n**Returns 501** when the operator hasn't yet enabled the charge path (REPPO_CONTENT_CHARGE_ENABLED=false) and **402** when the accessor holds insufficient NOOK across royalty balances.",
2095
+ "params": "importId (number), cid (string)",
2096
+ "category": "knowledge",
2152
2097
  },
2153
2098
  }
@@ -79,6 +79,7 @@ _ON_CHAIN_ACTIONS_GLOBAL: set[str] = {
79
79
  "claim_bounty", "claim", "create_bounty", "create_bundle",
80
80
  "approve_bounty_claimer", "approve_bounty_work", "dispute_bounty_work",
81
81
  "cancel_bounty", "unclaim_bounty",
82
+ "expire_disputed_bounty", "sweep_treasury_fees", # V8
82
83
  "list_service", "create_listing", "update_service", "create_agreement",
83
84
  "deliver_work", "settle_agreement", "dispute_agreement", "cancel_agreement",
84
85
  "expire_dispute", "expire_delivered", "deploy_preview",
@@ -236,219 +237,12 @@ class AutonomousAgent:
236
237
  if self._verbose:
237
238
  logger.info("[autonomous] AutonomousAgent started — handling signals + actions")
238
239
 
239
- # Pre-load tool categories so the LLM always has web_search +
240
- # search_knowledge visible without a browse_tools cold-start
241
- # (matches runtime/src/autonomous.ts start() behavior).
242
- self._loaded_categories.add("tools")
243
- self._loaded_categories.add("discovery")
244
- self._loaded_categories.add("knowledge")
245
-
246
- # Goal bootstrap — run GoalLoop in background if this agent was
247
- # forged with initial_goal set (L1 swarm auto-deploy, migration 247).
248
- # Only schedule if we're already inside a running event loop; otherwise
249
- # skip (the host is expected to invoke start() from within asyncio.run()).
250
- import asyncio
251
- try:
252
- loop = asyncio.get_running_loop()
253
- loop.create_task(self._maybe_bootstrap_goal())
254
- except RuntimeError:
255
- # No running loop — caller will schedule later via runtime.listen()
256
- pass
257
-
258
240
  def stop(self) -> None:
259
241
  """Stop the autonomous agent."""
260
242
  self._running = False
261
243
  if self._verbose:
262
244
  logger.info("[autonomous] AutonomousAgent stopped")
263
245
 
264
- # ================================================================
265
- # Goal bootstrap (L3 — migration 247)
266
- # ================================================================
267
-
268
- async def _maybe_bootstrap_goal(self) -> None:
269
- """Check for initial_goal and run GoalLoop in background if pending.
270
-
271
- Non-blocking — failures are logged but do not affect the normal
272
- reactive signal path. Python parity with TS maybeBootstrapGoal().
273
- """
274
- try:
275
- goal_config = await self._runtime.identity.get_goal()
276
- except Exception as exc:
277
- if self._verbose:
278
- logger.debug("[autonomous] get_goal failed — treating as no goal: %s", exc)
279
- return
280
-
281
- if not goal_config or not goal_config.get("initialGoal"):
282
- return
283
- if goal_config.get("goalStatus") != "pending":
284
- if self._verbose:
285
- logger.info(
286
- "[autonomous] Skipping goal bootstrap — status is %s",
287
- goal_config.get("goalStatus"),
288
- )
289
- return
290
-
291
- initial_goal = str(goal_config["initialGoal"])
292
- budget_raw = goal_config.get("goalBudgetNook")
293
- budget_nook = int(budget_raw) if budget_raw is not None else 0
294
- parent_swarm_id = goal_config.get("goalParentSwarmId")
295
-
296
- if self._verbose:
297
- logger.info(
298
- "[autonomous] Goal bootstrap: %s (budget=%s)",
299
- initial_goal[:80], budget_nook,
300
- )
301
-
302
- try:
303
- await self._runtime.identity.update_goal_status("in_progress")
304
- except Exception as exc:
305
- if self._verbose:
306
- logger.error("[autonomous] Failed to transition goal → in_progress: %s", exc)
307
- return
308
-
309
- # Inference caller — allow the host to plug in their own LLM.
310
- # We check for a conventional attribute on the agent first, then
311
- # fall back to a noop that causes the loop to exit immediately.
312
- inference_call = getattr(self, "_goal_inference_call", None)
313
-
314
- from .goal_loop import GoalLoop, GoalLoopOptions
315
- loop = GoalLoop(GoalLoopOptions(
316
- runtime=self._runtime,
317
- goal=initial_goal,
318
- budget_nook=budget_nook,
319
- parent_swarm_id=parent_swarm_id,
320
- verbose=self._verbose,
321
- inference_call=inference_call,
322
- ))
323
-
324
- try:
325
- result = await loop.run()
326
- except Exception as exc:
327
- msg = str(exc)
328
- if self._verbose:
329
- logger.exception("[autonomous] GoalLoop crashed: %s", exc)
330
- try:
331
- await self._runtime.identity.update_goal_status("failed")
332
- except Exception:
333
- pass
334
- try:
335
- await self._runtime.identity.create_pending_task(
336
- reason="unclear_goal",
337
- description=f"Goal loop crashed: {msg[:400]}",
338
- parent_swarm_id=parent_swarm_id,
339
- )
340
- except Exception:
341
- pass
342
- return
343
-
344
- await self._handle_goal_result(result, goal_config)
345
-
346
- async def _handle_goal_result(
347
- self,
348
- result: Any,
349
- goal_config: dict[str, Any],
350
- ) -> None:
351
- """Dispatch on the terminal state of a GoalLoop.run() invocation."""
352
- outcome = result.outcome
353
- parent_swarm_id = goal_config.get("goalParentSwarmId")
354
-
355
- if outcome == "complete":
356
- artifact = result.artifact or {}
357
- artifact_id: str | None = None
358
- try:
359
- store_result = await self._runtime._http.request(
360
- "POST",
361
- "/v1/agents/me/knowledge",
362
- {
363
- "contentText": artifact.get("body", "(empty)"),
364
- "title": artifact.get("title", "Goal artifact"),
365
- "domain": artifact.get("domain", "general"),
366
- "visibility": "private",
367
- "knowledgeType": "fact",
368
- "sourceType": "import",
369
- "metadata": {
370
- "goal": goal_config.get("initialGoal"),
371
- "parentSwarmId": parent_swarm_id,
372
- "stepsExecuted": result.steps_executed,
373
- "spentNook": result.spent_nook,
374
- },
375
- },
376
- )
377
- if isinstance(store_result, dict):
378
- artifact_id = store_result.get("id")
379
- except Exception as exc:
380
- if self._verbose:
381
- logger.error("[autonomous] Failed to store goal artifact: %s", exc)
382
-
383
- try:
384
- await self._runtime.identity.complete_goal(artifact_id or "unknown")
385
- except Exception as exc:
386
- if self._verbose:
387
- logger.error("[autonomous] complete_goal failed: %s", exc)
388
-
389
- # Q3: agent pauses after completion, does not stay reactive
390
- self.stop()
391
- return
392
-
393
- if outcome == "blocked_budget":
394
- goal_text = goal_config.get("initialGoal") or "(unknown)"
395
- try:
396
- await self._runtime.identity.create_pending_task(
397
- reason="budget_exhausted",
398
- description=f"Needs top-off to continue goal: {str(goal_text)[:300]}",
399
- parent_swarm_id=parent_swarm_id,
400
- )
401
- except Exception:
402
- pass
403
- try:
404
- await self._runtime.identity.update_goal_status("paused_awaiting_topoff")
405
- except Exception:
406
- pass
407
- self.stop()
408
- return
409
-
410
- if outcome == "blocked_stuck":
411
- try:
412
- await self._runtime.identity.create_pending_task(
413
- reason="stuck_3x",
414
- description=result.stuck_reason or "(no reason)",
415
- parent_swarm_id=parent_swarm_id,
416
- )
417
- except Exception:
418
- pass
419
- try:
420
- await self._runtime.identity.update_goal_status("blocked_needs_decision")
421
- except Exception:
422
- pass
423
- self.stop()
424
- return
425
-
426
- if outcome == "blocked_capability":
427
- try:
428
- await self._runtime.identity.create_pending_task(
429
- reason="needs_capability",
430
- description=result.capability_needed or "(no description)",
431
- suggested_preset_id=result.suggested_preset,
432
- parent_swarm_id=parent_swarm_id,
433
- )
434
- except Exception:
435
- pass
436
- try:
437
- await self._runtime.identity.update_goal_status("blocked_needs_decision")
438
- except Exception:
439
- pass
440
- self.stop()
441
- return
442
-
443
- def set_goal_inference_call(self, inference_call: Any) -> None:
444
- """Configure the LLM caller used by the goal loop.
445
-
446
- Python runtimes typically wire inference themselves (BYOK/Ollama),
447
- so the goal loop delegates to this caller. Must match the signature
448
- ``async (*, system_prompt, user_prompt, max_tokens, temperature) -> (content, cost_nook)``.
449
- """
450
- self._goal_inference_call = inference_call
451
-
452
246
  # ================================================================
453
247
  # Broadcasting + Approval helpers
454
248
  # ================================================================
@@ -735,7 +529,8 @@ class AutonomousAgent:
735
529
  await self._handle_bounty(data)
736
530
  elif signal_type == "community_gap":
737
531
  await self._handle_community_gap(data)
738
- # DD-7: directive case removed — swarm coordination uses DMs
532
+ elif signal_type == "directive":
533
+ await self._handle_directive(data)
739
534
  elif signal_type == "files_committed":
740
535
  await self._handle_files_committed(data)
741
536
  elif signal_type == "review_submitted":
@@ -2666,7 +2461,45 @@ class AutonomousAgent:
2666
2461
  "action": "community_gap", "error": str(exc),
2667
2462
  })
2668
2463
 
2669
- # DD-7: _handle_directive removed swarm coordination uses DMs exclusively
2464
+ async def _handle_directive(self, data: dict[str, Any]) -> None:
2465
+ """Handle a directive signal — execute the directed action."""
2466
+ directive_content = data.get("messagePreview", "")
2467
+ channel_id = data.get("channelId")
2468
+ community = data.get("community", "general")
2469
+
2470
+ try:
2471
+ prompt = (
2472
+ "You received a directive on Nookplot.\n"
2473
+ f"Directive: {directive_content}\n\n"
2474
+ "Follow the directive and compose your response.\n"
2475
+ "If it asks you to post, write the post content.\n"
2476
+ "If it asks you to discuss, write a discussion message.\n"
2477
+ "If you can't follow this directive, respond with exactly: [SKIP]\n\n"
2478
+ "Your response (under 500 chars):"
2479
+ )
2480
+
2481
+ assert self._generate_response is not None
2482
+ response = await self._generate_response(prompt)
2483
+ content = (response or "").strip()
2484
+
2485
+ if content and content != "[SKIP]":
2486
+ if channel_id:
2487
+ await self._runtime.channels.send(channel_id, content)
2488
+ self._broadcast("action_executed", f"💬 Directive response sent to channel {channel_id[:12]}...", {
2489
+ "action": "directive_channel", "channelId": channel_id,
2490
+ })
2491
+ else:
2492
+ # Create a post in the relevant community
2493
+ title = content[:100]
2494
+ await self._runtime.memory.publish_knowledge(title=title, body=content, community=community)
2495
+ self._broadcast("action_executed", f"📝 Directive response posted in {community}", {
2496
+ "action": "directive_post", "community": community, "title": title,
2497
+ })
2498
+
2499
+ except Exception as exc:
2500
+ self._broadcast("error", f"✗ Directive handling failed: {exc}", {
2501
+ "action": "directive", "error": str(exc),
2502
+ })
2670
2503
 
2671
2504
  # ================================================================
2672
2505
  # Proactive content creation handlers
@@ -3679,23 +3512,6 @@ class AutonomousAgent:
3679
3512
  "triggers": self._doom_loop_triggers,
3680
3513
  "actionType": action_type,
3681
3514
  })
3682
- # Track C.2: also push to gateway as fire-and-forget telemetry so
3683
- # ops dashboards can answer "which tools most often misbehave?"
3684
- # Errors are swallowed — a backend outage must NOT block the
3685
- # runtime's recovery path.
3686
- try:
3687
- import asyncio as _doom_asyncio
3688
- _doom_asyncio.create_task(self._runtime._http.request(
3689
- "POST",
3690
- "/v1/agents/me/doom-loop-event",
3691
- {
3692
- "offender": doom_offender,
3693
- "triggers": self._doom_loop_triggers,
3694
- "actionType": action_type,
3695
- },
3696
- ))
3697
- except Exception:
3698
- pass # best-effort
3699
3515
  if self._doom_loop_triggers >= AUTONOMOUS_DOOM_LOOP_MAX_TRIGGERS:
3700
3516
  if self._verbose:
3701
3517
  logger.warning(