anima-core 1.0.2 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. checksums.yaml +4 -4
  2. data/.gitattributes +1 -0
  3. data/.reek.yml +47 -0
  4. data/README.md +60 -26
  5. data/anima-core.gemspec +4 -1
  6. data/app/channels/session_channel.rb +29 -10
  7. data/app/decorators/tool_call_decorator.rb +7 -3
  8. data/app/decorators/tool_decorator.rb +57 -0
  9. data/app/decorators/tool_response_decorator.rb +12 -4
  10. data/app/decorators/web_get_tool_decorator.rb +102 -0
  11. data/app/jobs/agent_request_job.rb +90 -23
  12. data/app/jobs/mneme_job.rb +51 -0
  13. data/app/jobs/passive_recall_job.rb +29 -0
  14. data/app/models/concerns/event/broadcasting.rb +18 -0
  15. data/app/models/event.rb +10 -0
  16. data/app/models/goal.rb +27 -0
  17. data/app/models/goal_pinned_event.rb +11 -0
  18. data/app/models/pinned_event.rb +41 -0
  19. data/app/models/session.rb +335 -6
  20. data/app/models/snapshot.rb +76 -0
  21. data/config/initializers/event_subscribers.rb +14 -3
  22. data/config/initializers/fts5_schema_dump.rb +21 -0
  23. data/db/migrate/20260321080000_create_mneme_schema.rb +32 -0
  24. data/db/migrate/20260321120000_create_pinned_events.rb +27 -0
  25. data/db/migrate/20260321140000_create_events_fts_index.rb +77 -0
  26. data/db/migrate/20260321140100_add_recalled_event_ids_to_sessions.rb +10 -0
  27. data/lib/agent_loop.rb +63 -20
  28. data/lib/analytical_brain/runner.rb +158 -65
  29. data/lib/analytical_brain/tools/assign_nickname.rb +76 -0
  30. data/lib/analytical_brain/tools/finish_goal.rb +6 -1
  31. data/lib/anima/cli.rb +2 -1
  32. data/lib/anima/installer.rb +11 -12
  33. data/lib/anima/settings.rb +41 -0
  34. data/lib/anima/version.rb +1 -1
  35. data/lib/events/bounce_back.rb +37 -0
  36. data/lib/events/subscribers/agent_dispatcher.rb +29 -0
  37. data/lib/events/subscribers/persister.rb +17 -0
  38. data/lib/events/subscribers/subagent_message_router.rb +102 -0
  39. data/lib/events/subscribers/transient_broadcaster.rb +36 -0
  40. data/lib/llm/client.rb +16 -8
  41. data/lib/mneme/compressed_viewport.rb +200 -0
  42. data/lib/mneme/l2_runner.rb +138 -0
  43. data/lib/mneme/passive_recall.rb +69 -0
  44. data/lib/mneme/runner.rb +254 -0
  45. data/lib/mneme/search.rb +150 -0
  46. data/lib/mneme/tools/attach_events_to_goals.rb +107 -0
  47. data/lib/mneme/tools/everything_ok.rb +24 -0
  48. data/lib/mneme/tools/save_snapshot.rb +68 -0
  49. data/lib/mneme.rb +29 -0
  50. data/lib/providers/anthropic.rb +57 -13
  51. data/lib/shell_session.rb +188 -59
  52. data/lib/tasks/fts5.rake +6 -0
  53. data/lib/tools/remember.rb +179 -0
  54. data/lib/tools/spawn_specialist.rb +21 -9
  55. data/lib/tools/spawn_subagent.rb +22 -11
  56. data/lib/tools/subagent_prompts.rb +20 -3
  57. data/lib/tools/web_get.rb +15 -6
  58. data/lib/tui/app.rb +222 -125
  59. data/lib/tui/decorators/base_decorator.rb +165 -0
  60. data/lib/tui/decorators/bash_decorator.rb +20 -0
  61. data/lib/tui/decorators/edit_decorator.rb +19 -0
  62. data/lib/tui/decorators/read_decorator.rb +24 -0
  63. data/lib/tui/decorators/think_decorator.rb +36 -0
  64. data/lib/tui/decorators/web_get_decorator.rb +19 -0
  65. data/lib/tui/decorators/write_decorator.rb +19 -0
  66. data/lib/tui/flash.rb +139 -0
  67. data/lib/tui/formatting.rb +28 -0
  68. data/lib/tui/height_map.rb +93 -0
  69. data/lib/tui/message_store.rb +25 -1
  70. data/lib/tui/performance_logger.rb +90 -0
  71. data/lib/tui/screens/chat.rb +358 -133
  72. data/templates/config.toml +40 -0
  73. metadata +83 -4
  74. data/CHANGELOG.md +0 -80
  75. data/Gemfile +0 -17
  76. data/lib/tools/return_result.rb +0 -81
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 54229e4195de8d0b5aefbe45627d6fba58f7ff1820963dfb9cea07c03bdc0eaf
4
- data.tar.gz: 413bd51de98ebbe98e79f161a90af96fef3615befc32917682e8bac7e152c9ee
3
+ metadata.gz: dca02bfff536637c003d5f3bbce8dbe20992b7eeb25e6c51bdb4991a2803b538
4
+ data.tar.gz: ead68cc1bd03306a9eef644db2f15a81b7dbfd74bbe1f059e3f57bcfc0aaf77a
5
5
  SHA512:
6
- metadata.gz: 0a2d14ae33ef2f130e2b429b9f92740833e855c6b7b5bfec43a54110751af1769868d4b37cefd0906d76230e7e01ce166f573abf1a8c547abc8c247143306a42
7
- data.tar.gz: 226a6eb5db2027255b76d077a91fdd4aaeb857195790a4ed0d71e74ddad25bb41b9e0ecfb8e753441d955fb5bd98115ea31e7455f2a1f47554cd3207af898212
6
+ metadata.gz: 45f7f927d4f931b624db684e5f500c436cac44cf9dd9a004400b7219f1167e932b181dddac3793d4cd8f1885cf6401855ba6ca681c99d7cd902af8784e49cee2
7
+ data.tar.gz: 78e532c99e39f09732c9abe9588e467a96fb38cddaf7d1b8ba526971de1c890e73efd157876cb570eabad0cb0eb7c1f93faeef68c7128f7806f972ff98e2351c
data/.gitattributes ADDED
@@ -0,0 +1 @@
1
+ spec/cassettes/**/* -diff linguist-generated=true
data/.reek.yml CHANGED
@@ -17,20 +17,44 @@ detectors:
17
17
  # EnvironmentProbe assembles output from local data structures — not envy.
18
18
  # Brain transcript builds from event collection — the method's entire purpose.
19
19
  # ConfigMigrator text processing methods naturally reference local line arrays.
20
+ # ToolDecorator subclasses operate on the tool result — that's the pattern.
21
+ # Tool rescue blocks naturally reference the error object.
20
22
  FeatureEnvy:
21
23
  exclude:
22
24
  - "AnalyticalBrainJob#perform"
23
25
  - "EnvironmentProbe"
24
26
  - "AnalyticalBrain::Runner#build_messages"
25
27
  - "Anima::ConfigMigrator"
28
+ - "WebGetToolDecorator"
29
+ - "Tools::WebGet#validate_and_fetch"
30
+ # Remember tool renders events from other objects — formatting IS the job.
31
+ - "Tools::Remember"
32
+ # Event subscribers extract payload fields — inherent to the pattern.
33
+ - "Events::Subscribers::SubagentMessageRouter"
34
+ # Spawn tools orchestrate child session creation — references are the job.
35
+ - "Tools::SpawnSubagent#spawn_child"
36
+ - "Tools::SpawnSpecialist#spawn_child"
37
+ - "Tools::SpawnSpecialist#execute"
38
+ # Nickname assignment operates on child session and parent's children — inherent.
39
+ - "Tools::SubagentPrompts#assign_nickname_via_brain"
40
+ # Validation methods naturally reference the validated value more than self.
41
+ - "AnalyticalBrain::Tools::AssignNickname#validate"
26
42
  # Private helpers don't need instance state to be valid.
27
43
  # ActiveJob#perform is always a utility function by design.
28
44
  # No-op tools (Think, EverythingIsReady) don't need instance state — by design.
45
+ # method_missing is a Ruby dispatch method, not a regular public method.
46
+ # Content-Type dispatch targets are stateless by design — they transform input,
47
+ # not instance state.
29
48
  UtilityFunction:
30
49
  public_methods_only: true
31
50
  exclude:
32
51
  - "AnalyticalBrainJob#perform"
52
+ - "PassiveRecallJob#perform"
33
53
  - "Tools::Think#execute"
54
+ - "TUI::Formatting"
55
+ - "WebGetToolDecorator#method_missing"
56
+ - "WebGetToolDecorator#application_json"
57
+ - "WebGetToolDecorator#text_html"
34
58
  # Session model is the core domain object — methods grow naturally.
35
59
  # Mcp CLI accumulates subcommand helpers across add/remove/list/secrets.
36
60
  # EnvironmentProbe probes multiple orthogonal facets (OS, Git, project files).
@@ -40,14 +64,37 @@ detectors:
40
64
  - "Session"
41
65
  - "Anima::CLI::Mcp"
42
66
  - "EnvironmentProbe"
67
+ # Runner composes system prompt from modular sections — methods grow with responsibilities.
68
+ - "AnalyticalBrain::Runner"
43
69
  # Decorators branch on tool type across 4 render modes — inherent to the pattern.
70
+ # Installer methods each guard idempotency with config_path.exist? — by design.
44
71
  RepeatedConditional:
45
72
  exclude:
46
73
  - "ToolCallDecorator"
74
+ - "Anima::Installer"
75
+ # Runner checks session type to compose responsibilities — the core dispatch.
76
+ - "AnalyticalBrain::Runner"
47
77
  # EventDecorator holds shared rendering constants (icons, markers, dispatch maps).
48
78
  TooManyConstants:
49
79
  exclude:
50
80
  - "EventDecorator"
81
+ # Abstract base class methods declare parameters for the subclass contract.
82
+ UnusedParameters:
83
+ exclude:
84
+ - "ToolDecorator#call"
85
+ # Rescue blocks naturally call error.message in multiple catch clauses.
86
+ DuplicateMethodCall:
87
+ exclude:
88
+ - "Tools::WebGet#validate_and_fetch"
89
+ # Remember tool accesses event data for formatting — inherent to rendering.
90
+ - "Tools::Remember"
91
+ # Nickname validation checks parent_session for existence then queries — two calls, one guard.
92
+ - "AnalyticalBrain::Tools::AssignNickname#sibling_nickname_taken?"
93
+ # Method length is enforced by code review, not arbitrary line counts
94
+ # build_sections passes context through to sub-methods — inherent to assembly.
95
+ LongParameterList:
96
+ exclude:
97
+ - "Tools::Remember#build_sections"
51
98
  # Method length is enforced by code review, not arbitrary line counts
52
99
  TooManyStatements:
53
100
  enabled: false
data/README.md CHANGED
@@ -10,7 +10,9 @@ Anima is different. It's built on the premise that if you want an agent — a re
10
10
 
11
11
  **A brain modeled after biology, not chat.** The human brain isn't one process — it's specialized subsystems on a shared signal bus. Anima's [analytical brain](https://blog.promptmaster.pro/posts/llms-have-adhd/) runs as a separate subconscious process, managing context, skills, and goals so the main agent can stay in flow. Not two brains — a microservice architecture where each process does one job well. More subsystems are coming.
12
12
 
13
- **Context that never degrades.** Other agents fill a static array until the model gets dumb. Anima assembles a fresh viewport over an event bus every iteration. No compaction. No summarization. Endless sessions. The [dumb zone](https://www.humanlayer.dev/blog/the-dumb-zone) never arrives — and the analytical brain curates what the agent sees, in real time.
13
+ **Context that never degrades.** Other agents fill a static array until the model gets dumb. Anima assembles a fresh viewport over an event bus every iteration. No compaction. No lossy rewriting. Endless sessions. The [dumb zone](https://github.com/humanlayer/advanced-context-engineering-for-coding-agents/blob/main/ace-fca.md) never arrives — the analytical brain curates what the agent sees in real time.
14
+
15
+ **Memory that works like memory.** Other systems bolt on memory as an afterthought — filing cabinets the agent has to consciously open mid-task. It never does; the truck is already moving. Anima's memory department ([Mneme](#semantic-memory-mneme)) runs as a third brain process on the event bus. It summarizes what's about to leave the viewport. It compresses short-term into long-term, like biological memory consolidating during sleep. It pins critical moments to active goals so exact instructions survive where summaries would lose nuance. And it recalls — automatically, passively — surfacing relevant older memories right after the soul, right before the present. The agent doesn't decide to remember. It just remembers.
14
16
 
15
17
  **Sub-agents that already know everything.** When Anima spawns a sub-agent, it inherits the parent's full event stream — every file read, every decision, every user message. No "let me summarize what I know." Lossless context. Zero wasted tool calls on rediscovery.
16
18
 
@@ -34,9 +36,9 @@ Your agent. Your machine. Your rules. Anima runs locally as a headless Rails 8.1
34
36
  - [Event-Driven Design](#event-driven-design)
35
37
  - [Context as Viewport](#context-as-viewport-not-tape)
36
38
  - [Brain as Microservices](#brain-as-microservices-on-a-shared-event-bus)
37
- - [TUI View Modes](#tui-view-modes)
38
- - [Plugin Architecture](#plugin-architecture)
39
39
  - [Semantic Memory](#semantic-memory-mneme)
40
+ - [TUI HUD & View Modes](#tui-hud--view-modes)
41
+ - [Plugin Architecture](#plugin-architecture-planned)
40
42
  - [The Vision](#the-vision)
41
43
  - [The Problem](#the-problem)
42
44
  - [The Insight](#the-insight)
@@ -62,10 +64,10 @@ Anima (Ruby, Rails 8.1 headless)
62
64
  ├── Workflows — operational recipes for multi-step tasks
63
65
  ├── MCP — external tool integration (Model Context Protocol)
64
66
  ├── Sub-agents — autonomous child sessions with lossless context inheritance
67
+ ├── Mneme — memory department (summarization, compression, pinning, recall)
65
68
 
66
69
  │ Designed:
67
70
  ├── Thymos — hormonal/desire system (stimulus → hormone vector)
68
- ├── Mneme — semantic memory (viewport pinning, associative recall)
69
71
  └── Psyche — soul matrix (coefficient table, evolving individuality)
70
72
  ```
71
73
 
@@ -76,6 +78,7 @@ Brain Server (Rails + Puma) TUI Client (RatatuiRuby)
76
78
  ├── LLM integration (Anthropic) ├── WebSocket client
77
79
  ├── Agent loop + tool execution ├── Terminal rendering
78
80
  ├── Analytical brain (background) └── User input capture
81
+ ├── Mneme memory department (background)
79
82
  ├── Skills registry + activation
80
83
  ├── Workflow registry + activation
81
84
  ├── MCP client (HTTP + stdio)
@@ -163,10 +166,9 @@ The agent has access to these built-in tools:
163
166
  | `read` | Read files with smart truncation and offset/limit paging |
164
167
  | `write` | Create or overwrite files |
165
168
  | `edit` | Surgical text replacement with uniqueness constraint |
166
- | `web_get` | Fetch content from HTTP/HTTPS URLs |
169
+ | `web_get` | Fetch content from HTTP/HTTPS URLs (HTML → Markdown, JSON → TOON) |
167
170
  | `spawn_specialist` | Spawn a named specialist sub-agent from the registry |
168
171
  | `spawn_subagent` | Spawn a generic child session with custom tool grants |
169
- | `return_result` | Sub-agents only — deliver results back to parent |
170
172
 
171
173
  Plus dynamic tools from configured MCP servers, namespaced as `server_name__tool_name`.
172
174
 
@@ -186,9 +188,9 @@ Two types:
186
188
  | `thoughts-analyzer` | Extract decisions from project history |
187
189
  | `web-search-researcher` | Research questions via web search |
188
190
 
189
- **Generic Sub-agents** — child sessions with custom tool grants for ad-hoc tasks.
191
+ **Generic Sub-agents** — child sessions with custom tool grants for ad-hoc tasks. Each generic sub-agent gets a Haiku-generated nickname (e.g. `@loop-sleuth`, `@api-scout`) for @mention addressing.
190
192
 
191
- Sub-agents run as background jobs and appear in the TUI session picker under their parent. Next: [@mention communication](https://github.com/hoblin/anima/issues/124) sub-agent text messages route to the parent automatically, parent replies via `@name`. Workers become colleagues.
193
+ Sub-agents communicate through natural text their `agent_message` events route to the parent session automatically, and the parent replies via `@name` mentions. No special tools needed; when a sub-agent writes text, the parent sees it. When the parent @mentions a sub-agent, the message arrives in that child's session. Workers become colleagues.
192
194
 
193
195
  ### Skills
194
196
 
@@ -199,7 +201,7 @@ Domain knowledge bundles loaded from Markdown files. Skills provide specialized
199
201
  - **Override:** User skills with the same name replace built-in ones
200
202
  - **Format:** Flat files (`skill-name.md`) or directories (`skill-name/SKILL.md` with `examples/` and `references/`)
201
203
 
202
- Active skills are displayed in the TUI info panel.
204
+ Active skills are displayed in the TUI HUD panel (toggle with `C-a → h`).
203
205
 
204
206
  ### Workflows
205
207
 
@@ -223,7 +225,7 @@ description: "Capture findings or context as a persistent note."
223
225
  You are tasked with capturing content as a persistent note...
224
226
  ```
225
227
 
226
- The active workflow is shown in the TUI info panel with a 🔄 indicator. The full lifecycle — activation, goal creation, execution, deactivation — is managed by the analytical brain using judgment, not hardcoded triggers.
228
+ The active workflow is shown in the TUI HUD panel with a 📜 indicator. The full lifecycle — activation, goal creation, execution, deactivation — is managed by the analytical brain using judgment, not hardcoded triggers.
227
229
 
228
230
  ### MCP Integration
229
231
 
@@ -337,7 +339,7 @@ Most agents treat context as an append-only array — messages go in, they never
337
339
 
338
340
  The viewport is a live query, not a log. It walks events newest-first until the token budget is exhausted. Events that fall out of the viewport aren't deleted — they're still in the database, just not visible to the model right now. The context can shrink, grow, or change composition between any two iterations. If the analytical brain marks a large accidental file read as irrelevant, it's gone from the next viewport — tokens recovered instantly.
339
341
 
340
- This means sessions are endless. No compaction. No summarization. No degradation. The model always operates in fresh, high-quality context. The [dumb zone](https://www.humanlayer.dev/blog/the-dumb-zone) never arrives.
342
+ This means sessions are endless. No compaction. No lossy rewriting. The model always operates in fresh, high-quality context. The [dumb zone](https://github.com/humanlayer/advanced-context-engineering-for-coding-agents/blob/main/ace-fca.md) never arrives. Meanwhile, Mneme runs as a background department — summarizing evicted events into persistent snapshots so past context is preserved, not destroyed.
341
343
 
342
344
  Sub-agent viewports compose from two event scopes — their own events (prioritized) and parent events (filling remaining budget). Same mechanism, no special handling. The bus is the architecture.
343
345
 
@@ -351,22 +353,50 @@ Anima mirrors this with an event-driven architecture. The analytical brain is th
351
353
  Event: "tool_call_failed"
352
354
 
353
355
  ├── Analytical brain: update goals, check if workflow needs changing
356
+ ├── Mneme: summarize evicted context into snapshot
354
357
  ├── Thymos subscriber: frustration += 10 [planned]
355
- ├── Mneme subscriber: log failure context for future recall [planned]
356
358
  └── Psyche subscriber: update coefficient (this agent handles errors calmly) [planned]
357
359
 
358
360
  Event: "user_sent_message"
359
361
 
360
362
  ├── Analytical brain: activate relevant skills, name session
363
+ ├── Mneme: check viewport eviction, fire if boundary left viewport
361
364
  ├── Thymos subscriber: oxytocin += 5 (bonding signal) [planned]
362
- └── Mneme subscriber: associate emotional state with topic [planned]
365
+ └── Psyche subscriber: associate emotional state with topic [planned]
363
366
  ```
364
367
 
365
368
  Each subscriber is a microservice — independent, stateless, reacting to the same event bus. No orchestrator decides what to do. The architecture IS the nervous system.
366
369
 
367
- ### TUI View Modes
370
+ ### Semantic Memory (Mneme)
371
+
372
+ Every AI agent today has the same disability: amnesia. Context fills up, gets compacted, gets destroyed. The agent gets dumber as the conversation gets longer. When the session ends, everything is gone. Some systems bolt on memory as an afterthought — markdown files with procedures for when to save and what format to use. Filing cabinets the agent has to consciously decide to open, mid-task, while in flow. It never does. The truck is already moving.
373
+
374
+ Mneme is not a filing cabinet. It's *remembering* — the way biological memory works. Continuous, automatic, layered. A third brain department running on the same event bus as the analytical brain, specializing in one job: making sure nothing important is ever truly lost.
375
+
376
+ **Eviction-triggered summarization** — Mneme tracks a boundary event on each session. When that event leaves the viewport, Mneme fires: it builds a compressed view of the conversation (full text for messages, `[N tools called]` counters for tool work), sends it to a fast model, and persists a snapshot. The boundary advances after each run — a self-regulating cycle that fires exactly when context is about to be lost, no sooner or later. No timer. No manual trigger. The architecture itself knows when to remember.
377
+
378
+ **Two-level snapshot compression** — once source events evict from the sliding window, their snapshots appear in the viewport as memory context. When enough Level 1 snapshots accumulate, Mneme compresses them into a single Level 2 snapshot — recursive summarization that mirrors how human memory consolidates short-term into long-term. Token budget splits across layers (L2: 5%, L1: 15%, recall: 5%, sliding: 75%), creating natural pressure: more memories means less live context, same principle as video compression keyframes. The viewport layout reads like geological strata — deep past at the top, recent past below, live present at the bottom:
379
+
380
+ ```
381
+ [Soul — who I am]
382
+ [L2 snapshots — weeks ago, compressed]
383
+ [L1 snapshots — hours ago, detailed]
384
+ [Associative recall — relevant older memories]
385
+ [Pinned events — critical moments from active goals]
386
+ [Sliding window — the present]
387
+ ```
388
+
389
+ **Goal-scoped event pinning** — some moments are too important for summaries. Exact user instructions. Key decisions. Critical corrections. Mneme pins these events to active Goals — they float above the sliding window, protected from eviction, surviving intact where compression would lose the nuance that matters. Pins are goal-scoped and many-to-many: one event can attach to multiple Goals, and cleanup is automatic via reference counting. When the last active Goal completes, the pin releases. No manual unpin, no stale pins accumulating forever.
390
+
391
+ **Associative recall** — FTS5 full-text search across the entire event history, across all sessions. Two modes: *passive* recall triggers automatically when goals change — Mneme searches for relevant older context and injects it into the viewport between snapshots and the sliding window. Memories surface on their own, right after the soul, right before the present. The agent doesn't have to decide to remember — the remembering happens around it. *Active* recall via the `remember(event_id:)` tool returns a fractal-resolution window centered on a target event — full detail at the center, compressed snapshots at the edges, like eye focus with sharp fovea and blurry periphery.
368
392
 
369
- Three switchable view modes let you control how much detail the TUI shows. Cycle with `Ctrl+a v`:
393
+ The difference from every other system: memory isn't a tool the agent uses. It's the substrate the agent thinks in. Every LLM call assembles a fresh viewport where identity comes first, then memories, then the present — the agent always knows who it is, always has access to what it learned, and never has to break flow to make that happen.
394
+
395
+ ### TUI HUD & View Modes
396
+
397
+ The right-side HUD panel shows session state at a glance: session name, goals (with status icons), active skills, workflow, and sub-agents. Toggle with `C-a → h`; when hidden, the input border shows `C-a → h HUD` as a reminder.
398
+
399
+ Three switchable view modes let you control how much detail the TUI shows. Cycle with `C-a → v`:
370
400
 
371
401
  | Mode | What you see |
372
402
  |------|-------------|
@@ -374,7 +404,13 @@ Three switchable view modes let you control how much detail the TUI shows. Cycle
374
404
  | **Verbose** | Everything in Basic, plus timestamps `[HH:MM:SS]`, tool call previews (`🔧 bash` / `$ command` / `↩ response`), and system messages |
375
405
  | **Debug** | Full X-ray view — timestamps, token counts per message (`[14 tok]`), full tool call args, full tool responses, tool use IDs |
376
406
 
377
- View modes are implemented via Draper decorators that operate at the transport layer. Each event type has a dedicated decorator (`UserMessageDecorator`, `ToolCallDecorator`, etc.) that returns structured data — the TUI renders it. Mode is stored on the `Session` model server-side, so it persists across reconnections.
407
+ View modes are implemented as a three-layer decorator architecture:
408
+
409
+ - **ToolDecorator** (server-side, pre-event) — transforms raw tool responses for LLM consumption. Content-Type dispatch converts HTML → Markdown, JSON → TOON. Sits between tool execution and the event stream.
410
+ - **EventDecorator** (server-side, Draper) — uniform per event type (`UserMessageDecorator`, `ToolCallDecorator`, etc.). Decides WHAT structured data enters the wire for each view mode.
411
+ - **TUI Decorator** (client-side) — unique per tool name (`BashDecorator`, `ReadDecorator`, `EditDecorator`, etc.). Decides HOW each tool looks on screen — tool-specific icons, colors, and formatting.
412
+
413
+ Mode is stored on the `Session` model server-side, so it persists across reconnections.
378
414
 
379
415
  ### Plugin Architecture [planned]
380
416
 
@@ -388,14 +424,6 @@ anima-memory-* → recall and association (Mneme subscribers)
388
424
 
389
425
  Currently tools are built-in. Plugin extraction into distributable gems comes later.
390
426
 
391
- ### Semantic Memory (Mneme) [planned]
392
-
393
- The viewport solves context degradation but creates a new question: what do we lose when events fall off the conveyor belt? Mneme is the answer — memory systems built on top of the viewport.
394
-
395
- **Viewport pinning** (next) — the analytical brain watches events approaching eviction and pins critical ones (the original user goal, key decisions). Pinned events float above the sliding window, protected from eviction. Same mental model as pinning a message in Discord or Slack. Pins consume budget, so the brain must be judicious — natural pressure toward minimalism.
396
-
397
- **Associative recall** (future) — inspired by [QMD](https://github.com/tobi/qmd). The endocrine system can recall: "Last time this topic came up, curiosity was at 95 and we had a great evening." Hormonal reactions colored by the full history of experiences — like smelling mom's baking and feeling a wave of oxytocin. Not because of the smell, but because of the memory attached to it.
398
-
399
427
  ## The Vision
400
428
 
401
429
  ### The Problem
@@ -553,10 +581,12 @@ This single example demonstrates every core principle:
553
581
  - Event-driven architecture on a shared event bus
554
582
  - Dynamic viewport context assembly (endless sessions, no compaction)
555
583
  - Analytical brain (skills, workflows, goals, session naming)
556
- - 8 built-in tools + MCP integration (HTTP + stdio transports)
584
+ - Mneme memory department (eviction-triggered summarization, persistent snapshots, goal-scoped event pinning, associative recall)
585
+ - 9 built-in tools + MCP integration (HTTP + stdio transports)
557
586
  - 7 built-in skills + 13 built-in workflows (user-extensible)
558
587
  - Sub-agents with lossless context inheritance (5 specialists + generic)
559
588
  - Client-server architecture with WebSocket transport + graceful reconnection
589
+ - Collapsible HUD panel with goals, skills, workflow, and sub-agent tracking
560
590
  - Three TUI view modes (Basic / Verbose / Debug)
561
591
  - Hot-reloadable TOML configuration
562
592
  - Self-authored soul (agent writes its own system prompt)
@@ -564,7 +594,7 @@ This single example demonstrates every core principle:
564
594
  **Designed, not yet implemented:**
565
595
 
566
596
  - Hormonal system (Thymos) — desires as behavioral drivers
567
- - Semantic memory (Mneme) — viewport pinning, associative recall
597
+ - Semantic recall (Mneme) — embedding-based search + re-ranking over FTS5
568
598
  - Soul matrix (Psyche) — evolving coefficient table for individuality
569
599
 
570
600
  ## Development
@@ -585,6 +615,10 @@ bin/dev
585
615
 
586
616
  # Terminal 2: Connect the TUI to the dev brain
587
617
  ./exe/anima tui --host localhost:42135
618
+
619
+ # Optional: enable performance logging for render profiling
620
+ ./exe/anima tui --host localhost:42135 --debug
621
+ # Frame timing data written to log/tui_performance.log
588
622
  ```
589
623
 
590
624
  Development uses port **42135** so it doesn't conflict with the production brain (port 42134) running via systemd. On first run, `bin/dev` runs `db:prepare` automatically.
data/anima-core.gemspec CHANGED
@@ -21,13 +21,14 @@ Gem::Specification.new do |spec|
21
21
  # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
22
22
  spec.files = IO.popen(%w[git ls-files -z], chdir: __dir__, err: IO::NULL) do |ls|
23
23
  ls.readlines("\x0", chomp: true).reject do |f|
24
- f.start_with?(*%w[bin/console bin/dev bin/setup .gitignore .rspec spec/ .github/ .standard.yml thoughts/ CLAUDE.md .mise.toml])
24
+ f.start_with?(*%w[bin/console bin/dev bin/setup Gemfile .gitignore .rspec spec/ .github/ .standard.yml thoughts/ CLAUDE.md .mise.toml])
25
25
  end
26
26
  end
27
27
  spec.bindir = "exe"
28
28
  spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
29
29
  spec.require_paths = ["lib"]
30
30
 
31
+ spec.add_dependency "certifi"
31
32
  spec.add_dependency "draper", "~> 4.0"
32
33
  spec.add_dependency "faraday", "~> 2.0"
33
34
  spec.add_dependency "foreman", "~> 0.88"
@@ -36,9 +37,11 @@ Gem::Specification.new do |spec|
36
37
  spec.add_dependency "puma", "~> 6.0"
37
38
  spec.add_dependency "rails", "~> 8.1"
38
39
  spec.add_dependency "ratatui_ruby", "~> 1.4"
40
+ spec.add_dependency "reverse_markdown", "~> 3.0"
39
41
  spec.add_dependency "solid_cable", "~> 3.0"
40
42
  spec.add_dependency "solid_queue", "~> 1.1"
41
43
  spec.add_dependency "sqlite3", "~> 2.0"
42
44
  spec.add_dependency "toml-rb", "~> 4.0"
45
+ spec.add_dependency "toon-ruby", "~> 0.1"
43
46
  spec.add_dependency "websocket-client-simple", "~> 0.8"
44
47
  end
@@ -42,9 +42,14 @@ class SessionChannel < ApplicationCable::Channel
42
42
  ActionCable.server.broadcast(stream_name, data)
43
43
  end
44
44
 
45
- # Processes user input: persists the message and enqueues LLM processing.
46
- # When the session is actively processing an agent request, the message
47
- # is queued as "pending" and picked up after the current loop completes.
45
+ # Processes user input by emitting a {Events::UserMessage} on the event bus.
46
+ #
47
+ # When the session is idle, the emission triggers {Events::Subscribers::AgentDispatcher}
48
+ # which schedules {AgentRequestJob} to persist the event and deliver it to the LLM
49
+ # inside a transaction (Bounce Back, #236).
50
+ #
51
+ # When the session is already processing, the message is queued as "pending"
52
+ # and picked up after the current agent loop completes.
48
53
  #
49
54
  # @param data [Hash] must include "content" with the user's message text
50
55
  def speak(data)
@@ -58,7 +63,6 @@ class SessionChannel < ApplicationCable::Channel
58
63
  Events::Bus.emit(Events::UserMessage.new(content: content, session_id: @current_session_id, status: Event::PENDING_STATUS))
59
64
  else
60
65
  Events::Bus.emit(Events::UserMessage.new(content: content, session_id: @current_session_id))
61
- AgentRequestJob.perform_later(@current_session_id)
62
66
  end
63
67
  end
64
68
 
@@ -141,10 +145,18 @@ class SessionChannel < ApplicationCable::Channel
141
145
  token = data["token"].to_s.strip
142
146
 
143
147
  Providers::Anthropic.validate_token_format!(token)
144
- Providers::Anthropic.validate_token_api!(token)
145
- write_anthropic_token(token)
146
148
 
147
- transmit({"action" => "token_saved"})
149
+ warning = begin
150
+ Providers::Anthropic.validate_token_api!(token)
151
+ nil
152
+ rescue Providers::Anthropic::TransientError => transient
153
+ # Token format is valid but API is temporarily unavailable (500, timeout, etc.).
154
+ # Save the token to break the prompt loop — it will work once the API recovers.
155
+ "Token saved but could not be verified — #{transient.message}"
156
+ end
157
+
158
+ write_anthropic_token(token)
159
+ transmit({"action" => "token_saved", "warning" => warning}.compact)
148
160
  rescue Providers::Anthropic::TokenFormatError, Providers::Anthropic::AuthenticationError => error
149
161
  transmit({"action" => "token_error", "message" => error.message})
150
162
  end
@@ -189,12 +201,12 @@ class SessionChannel < ApplicationCable::Channel
189
201
  # client can handle both paths with a single code path.
190
202
  #
191
203
  # Payload: session_id, name, parent_session_id, message_count,
192
- # view_mode, active_skills, goals.
204
+ # view_mode, active_skills, goals, children (when present).
193
205
  #
194
206
  # @param session [Session] the session to announce
195
207
  # @return [void]
196
208
  def transmit_session_changed(session)
197
- transmit({
209
+ payload = {
198
210
  "action" => "session_changed",
199
211
  "session_id" => session.id,
200
212
  "name" => session.name,
@@ -204,7 +216,14 @@ class SessionChannel < ApplicationCable::Channel
204
216
  "active_skills" => session.active_skills,
205
217
  "active_workflow" => session.active_workflow,
206
218
  "goals" => session.goals_summary
207
- })
219
+ }
220
+
221
+ children = session.child_sessions.order(:created_at).select(:id, :name, :processing)
222
+ if children.any?
223
+ payload["children"] = children.map { |child| {"id" => child.id, "name" => child.name, "processing" => child.processing?} }
224
+ end
225
+
226
+ transmit(payload)
208
227
  end
209
228
 
210
229
  # Switches the channel to a different session: stops current stream,
@@ -1,10 +1,12 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "toon"
4
+
3
5
  # Decorates tool_call events for display in the TUI.
4
6
  # Hidden in basic mode — tool activity is represented by the
5
7
  # aggregated tool counter instead. Verbose mode returns tool name
6
8
  # and a formatted preview of the input arguments. Debug mode shows
7
- # full untruncated input as pretty-printed JSON with tool_use_id.
9
+ # full untruncated input in TOON format with tool_use_id.
8
10
  #
9
11
  # Think tool calls are special: "aloud" thoughts are shown in all
10
12
  # view modes (with a thought bubble), while "inner" thoughts are
@@ -39,7 +41,7 @@ class ToolCallDecorator < EventDecorator
39
41
  {
40
42
  role: :tool_call,
41
43
  tool: payload["tool_name"],
42
- input: JSON.pretty_generate(payload["tool_input"] || {}),
44
+ input: Toon.encode(payload["tool_input"] || {}),
43
45
  tool_use_id: payload["tool_use_id"],
44
46
  timestamp: timestamp
45
47
  }
@@ -98,8 +100,10 @@ class ToolCallDecorator < EventDecorator
98
100
  "$ #{input&.dig("command")}"
99
101
  when "web_get"
100
102
  "GET #{input&.dig("url")}"
103
+ when "read", "edit", "write"
104
+ input&.dig("file_path").to_s
101
105
  else
102
- truncate_lines(input.to_json, max_lines: 2)
106
+ truncate_lines(Toon.encode(input), max_lines: 2)
103
107
  end
104
108
  end
105
109
  end
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Base class for server-side tool response decoration. Transforms raw tool
4
+ # results into LLM-optimized formats before they enter the event stream.
5
+ #
6
+ # This is a separate decorator type from {EventDecorator}: EventDecorator
7
+ # formats events for clients (TUI/web), while ToolDecorator formats tool
8
+ # responses for the LLM. They sit at different points in the pipeline:
9
+ #
10
+ # Tool executes → ToolDecorator transforms → event stream → EventDecorator renders
11
+ #
12
+ # Subclasses implement {#call} to transform a tool's raw result into an
13
+ # LLM-friendly string. Each tool can have its own ToolDecorator subclass
14
+ # (e.g. {WebGetToolDecorator}) registered in {DECORATOR_MAP}.
15
+ #
16
+ # @example Decorating a tool result
17
+ # ToolDecorator.call("web_get", {body: html, content_type: "text/html"})
18
+ # #=> "[Converted: HTML → Markdown]\n\n# Page Title\n..."
19
+ class ToolDecorator
20
+ DECORATOR_MAP = {
21
+ "web_get" => "WebGetToolDecorator"
22
+ }.freeze
23
+
24
+ # Factory: dispatches to the tool-specific decorator or passes through.
25
+ #
26
+ # @param tool_name [String] registered tool name
27
+ # @param result [String, Hash] raw tool execution result
28
+ # @return [String, Hash] decorated result (String) or original error Hash
29
+ def self.call(tool_name, result)
30
+ return result if result.is_a?(Hash) && result.key?(:error)
31
+
32
+ klass_name = DECORATOR_MAP[tool_name]
33
+ return result unless klass_name
34
+
35
+ klass_name.constantize.new.call(result)
36
+ end
37
+
38
+ # Subclasses override to transform the raw tool result.
39
+ #
40
+ # @param result [String, Hash] raw tool execution result
41
+ # @return [String] LLM-optimized content
42
+ def call(result)
43
+ raise NotImplementedError, "#{self.class} must implement #call"
44
+ end
45
+
46
+ private
47
+
48
+ # Combines decorated text with an optional metadata tag so the LLM
49
+ # knows the content was transformed.
50
+ #
51
+ # @param text [String] the transformed content
52
+ # @param meta [String, nil] conversion tag (e.g. "[Converted: HTML → Markdown]")
53
+ # @return [String]
54
+ def assemble(text:, meta:)
55
+ meta ? "#{meta}\n\n#{text}" : text
56
+ end
57
+ end
@@ -3,8 +3,9 @@
3
3
  # Decorates tool_response events for display in the TUI.
4
4
  # Hidden in basic mode — tool activity is represented by the
5
5
  # aggregated tool counter instead. Verbose mode returns truncated
6
- # output with a success/failure indicator. Debug mode shows full
7
- # untruncated output with tool_use_id and estimated token count.
6
+ # output with a success/failure indicator and tool name for per-tool
7
+ # client-side rendering. Debug mode shows full untruncated output
8
+ # with tool_use_id and estimated token count.
8
9
  #
9
10
  # Think tool responses ("OK") are hidden in basic and verbose modes
10
11
  # because the value is in the tool_call (the thoughts), not the response.
@@ -18,11 +19,13 @@ class ToolResponseDecorator < EventDecorator
18
19
 
19
20
  # Think responses are hidden in verbose mode — the "OK" adds no information.
20
21
  # @return [Hash, nil] structured tool response data, nil for think responses
22
+ # `{role: :tool_response, tool: String, content: String, success: Boolean, timestamp: Integer|nil}`
21
23
  def render_verbose
22
24
  return if think?
23
25
 
24
26
  {
25
27
  role: :tool_response,
28
+ tool: tool_name,
26
29
  content: truncate_lines(content, max_lines: 3),
27
30
  success: payload["success"] != false,
28
31
  timestamp: timestamp
@@ -30,11 +33,12 @@ class ToolResponseDecorator < EventDecorator
30
33
  end
31
34
 
32
35
  # @return [Hash] full tool response data with untruncated content, tool_use_id, and token estimate
33
- # `{role: :tool_response, content: String, success: Boolean, tool_use_id: String|nil,
36
+ # `{role: :tool_response, tool: String, content: String, success: Boolean, tool_use_id: String|nil,
34
37
  # timestamp: Integer|nil, tokens: Integer, estimated: Boolean}`
35
38
  def render_debug
36
39
  {
37
40
  role: :tool_response,
41
+ tool: tool_name,
38
42
  content: content,
39
43
  success: payload["success"] != false,
40
44
  tool_use_id: payload["tool_use_id"],
@@ -53,7 +57,11 @@ class ToolResponseDecorator < EventDecorator
53
57
 
54
58
  private
55
59
 
60
+ def tool_name
61
+ payload["tool_name"]
62
+ end
63
+
56
64
  def think?
57
- payload["tool_name"] == THINK_TOOL
65
+ tool_name == THINK_TOOL
58
66
  end
59
67
  end