anima-core 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +34 -0
  3. data/README.md +20 -32
  4. data/anima-core.gemspec +1 -0
  5. data/app/channels/session_channel.rb +220 -26
  6. data/app/decorators/agent_message_decorator.rb +24 -0
  7. data/app/decorators/application_decorator.rb +6 -0
  8. data/app/decorators/event_decorator.rb +173 -0
  9. data/app/decorators/system_message_decorator.rb +21 -0
  10. data/app/decorators/tool_call_decorator.rb +48 -0
  11. data/app/decorators/tool_response_decorator.rb +37 -0
  12. data/app/decorators/user_message_decorator.rb +35 -0
  13. data/app/jobs/agent_request_job.rb +31 -2
  14. data/app/jobs/count_event_tokens_job.rb +14 -3
  15. data/app/models/concerns/event/broadcasting.rb +63 -0
  16. data/app/models/event.rb +36 -0
  17. data/app/models/session.rb +46 -14
  18. data/config/application.rb +1 -0
  19. data/config/initializers/event_subscribers.rb +0 -1
  20. data/config/routes.rb +0 -6
  21. data/db/cable_schema.rb +14 -2
  22. data/db/migrate/20260312170000_add_view_mode_to_sessions.rb +7 -0
  23. data/db/migrate/20260313010000_add_status_to_events.rb +8 -0
  24. data/db/migrate/20260313020000_add_processing_to_sessions.rb +7 -0
  25. data/lib/agent_loop.rb +5 -2
  26. data/lib/anima/cli.rb +1 -40
  27. data/lib/anima/version.rb +1 -1
  28. data/lib/events/subscribers/persister.rb +1 -0
  29. data/lib/events/user_message.rb +17 -0
  30. data/lib/providers/anthropic.rb +3 -13
  31. data/lib/tools/edit.rb +227 -0
  32. data/lib/tools/read.rb +152 -0
  33. data/lib/tools/write.rb +86 -0
  34. data/lib/tui/app.rb +831 -55
  35. data/lib/tui/cable_client.rb +79 -31
  36. data/lib/tui/input_buffer.rb +181 -0
  37. data/lib/tui/message_store.rb +162 -14
  38. data/lib/tui/screens/chat.rb +504 -75
  39. metadata +30 -5
  40. data/app/controllers/api/sessions_controller.rb +0 -25
  41. data/lib/events/subscribers/action_cable_bridge.rb +0 -35
  42. data/lib/tui/screens/anthropic.rb +0 -25
  43. data/lib/tui/screens/settings.rb +0 -52
@@ -0,0 +1,48 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Decorates tool_call events for display in the TUI.
4
+ # Hidden in basic mode — tool activity is represented by the
5
+ # aggregated tool counter instead. Verbose mode returns tool name
6
+ # and a formatted preview of the input arguments. Debug mode shows
7
+ # full untruncated input as pretty-printed JSON with tool_use_id.
8
+ class ToolCallDecorator < EventDecorator
9
+ # @return [nil] tool calls are hidden in basic mode
10
+ def render_basic
11
+ nil
12
+ end
13
+
14
+ # @return [Hash] structured tool call data
15
+ # `{role: :tool_call, tool: String, input: String, timestamp: Integer|nil}`
16
+ def render_verbose
17
+ {role: :tool_call, tool: payload["tool_name"], input: format_input, timestamp: timestamp}
18
+ end
19
+
20
+ # @return [Hash] full tool call data with untruncated input and tool_use_id
21
+ # `{role: :tool_call, tool: String, input: String, tool_use_id: String|nil, timestamp: Integer|nil}`
22
+ def render_debug
23
+ {
24
+ role: :tool_call,
25
+ tool: payload["tool_name"],
26
+ input: JSON.pretty_generate(payload["tool_input"] || {}),
27
+ tool_use_id: payload["tool_use_id"],
28
+ timestamp: timestamp
29
+ }
30
+ end
31
+
32
+ private
33
+
34
+ # Formats tool input for display, with tool-specific formatting for
35
+ # known tools and generic JSON fallback for others.
36
+ # @return [String] formatted input preview
37
+ def format_input
38
+ input = payload["tool_input"]
39
+ case payload["tool_name"]
40
+ when "bash"
41
+ "$ #{input&.dig("command")}"
42
+ when "web_get"
43
+ "GET #{input&.dig("url")}"
44
+ else
45
+ truncate_lines(input.to_json, max_lines: 2)
46
+ end
47
+ end
48
+ end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Decorates tool_response events for display in the TUI.
4
+ # Hidden in basic mode — tool activity is represented by the
5
+ # aggregated tool counter instead. Verbose mode returns truncated
6
+ # output with a success/failure indicator. Debug mode shows full
7
+ # untruncated output with tool_use_id and estimated token count.
8
+ class ToolResponseDecorator < EventDecorator
9
+ # @return [nil] tool responses are hidden in basic mode
10
+ def render_basic
11
+ nil
12
+ end
13
+
14
+ # @return [Hash] structured tool response data
15
+ # `{role: :tool_response, content: String, success: Boolean, timestamp: Integer|nil}`
16
+ def render_verbose
17
+ {
18
+ role: :tool_response,
19
+ content: truncate_lines(content, max_lines: 3),
20
+ success: payload["success"] != false,
21
+ timestamp: timestamp
22
+ }
23
+ end
24
+
25
+ # @return [Hash] full tool response data with untruncated content, tool_use_id, and token estimate
26
+ # `{role: :tool_response, content: String, success: Boolean, tool_use_id: String|nil,
27
+ # timestamp: Integer|nil, tokens: Integer, estimated: Boolean}`
28
+ def render_debug
29
+ {
30
+ role: :tool_response,
31
+ content: content,
32
+ success: payload["success"] != false,
33
+ tool_use_id: payload["tool_use_id"],
34
+ timestamp: timestamp
35
+ }.merge(token_info)
36
+ end
37
+ end
@@ -0,0 +1,35 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Decorates user_message events for display in the TUI.
4
+ # Basic mode returns role and content. Verbose mode adds a timestamp.
5
+ # Debug mode adds token count (exact when counted, estimated when not).
6
+ # Pending messages include `status: "pending"` so the TUI renders them
7
+ # with a visual indicator (dimmed, clock icon).
8
+ class UserMessageDecorator < EventDecorator
9
+ # @return [Hash] structured user message data
10
+ # `{role: :user, content: String}` or with `status: "pending"` when queued
11
+ def render_basic
12
+ base = {role: :user, content: content}
13
+ base[:status] = "pending" if pending?
14
+ base
15
+ end
16
+
17
+ # @return [Hash] structured user message with nanosecond timestamp
18
+ def render_verbose
19
+ base = {role: :user, content: content, timestamp: timestamp}
20
+ base[:status] = "pending" if pending?
21
+ base
22
+ end
23
+
24
+ # @return [Hash] verbose output plus token count for debugging
25
+ def render_debug
26
+ render_verbose.merge(token_info)
27
+ end
28
+
29
+ private
30
+
31
+ # @return [Boolean] true when this message is queued but not yet sent to LLM
32
+ def pending?
33
+ payload["status"] == Event::PENDING_STATUS
34
+ end
35
+ end
@@ -26,23 +26,52 @@ class AgentRequestJob < ApplicationJob
26
26
 
27
27
  discard_on ActiveRecord::RecordNotFound
28
28
  discard_on Providers::Anthropic::AuthenticationError do |job, error|
29
+ session_id = job.arguments.first
30
+ # Persistent system message for the event log
29
31
  Events::Bus.emit(Events::SystemMessage.new(
30
32
  content: "Authentication failed: #{error.message}",
31
- session_id: job.arguments.first
33
+ session_id: session_id
32
34
  ))
35
+ # Transient signal to trigger TUI token setup popup (not persisted)
36
+ ActionCable.server.broadcast(
37
+ "session_#{session_id}",
38
+ {"action" => "authentication_required", "message" => error.message}
39
+ )
33
40
  end
34
41
 
35
42
  # @param session_id [Integer] ID of the session to process
36
43
  def perform(session_id)
37
44
  session = Session.find(session_id)
45
+
46
+ # Atomic: only one job processes a session at a time. If another job
47
+ # is already running, this one exits — the running job will pick up
48
+ # any pending messages after its current loop completes.
49
+ return unless claim_processing(session_id)
50
+
38
51
  agent_loop = AgentLoop.new(session: session)
39
- agent_loop.run
52
+ loop do
53
+ agent_loop.run
54
+ promoted = session.promote_pending_messages!
55
+ break if promoted == 0
56
+ end
40
57
  ensure
58
+ release_processing(session_id)
41
59
  agent_loop&.finalize
42
60
  end
43
61
 
44
62
  private
45
63
 
64
+ # Sets the session's processing flag atomically. Returns true if this
65
+ # job claimed the lock, false if another job already holds it.
66
+ def claim_processing(session_id)
67
+ Session.where(id: session_id, processing: false).update_all(processing: true) == 1
68
+ end
69
+
70
+ # Clears the processing flag so the session can accept new jobs.
71
+ def release_processing(session_id)
72
+ Session.where(id: session_id).update_all(processing: false)
73
+ end
74
+
46
75
  # Emits a system message before each retry so the user sees
47
76
  # "retrying..." instead of nothing.
48
77
  def retry_job(options = {})
@@ -12,7 +12,7 @@ class CountEventTokensJob < ApplicationJob
12
12
  # @param event_id [Integer] the Event record to count tokens for
13
13
  def perform(event_id)
14
14
  event = Event.find(event_id)
15
- return if event.token_count > 0
15
+ return if already_counted?(event)
16
16
 
17
17
  provider = Providers::Anthropic.new
18
18
  messages = [{role: event.api_role, content: event.payload["content"].to_s}]
@@ -22,7 +22,18 @@ class CountEventTokensJob < ApplicationJob
22
22
  messages: messages
23
23
  )
24
24
 
25
- # Atomic update: only write if still uncounted (avoids race with parallel jobs).
26
- Event.where(id: event.id, token_count: 0).update_all(token_count: token_count)
25
+ # Guard against parallel jobs: reload and re-check before writing.
26
+ # Uses update! (not update_all) so {Event::Broadcasting} after_update_commit
27
+ # broadcasts the updated token count to connected clients.
28
+ event.reload
29
+ return if already_counted?(event)
30
+
31
+ event.update!(token_count: token_count)
32
+ end
33
+
34
+ private
35
+
36
+ def already_counted?(event)
37
+ event.token_count > 0
27
38
  end
28
39
  end
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Broadcasts Event records to connected WebSocket clients via ActionCable.
4
+ # Follows the Turbo Streams pattern: events are broadcast on both create
5
+ # and update, with an action type so clients can distinguish append from
6
+ # replace operations.
7
+ #
8
+ # Each broadcast includes the Event's database ID, enabling clients to
9
+ # maintain an ID-indexed store for efficient in-place updates (e.g. when
10
+ # token counts arrive asynchronously from {CountEventTokensJob}).
11
+ #
12
+ # @example Create broadcast payload
13
+ # {
14
+ # "type" => "user_message", "content" => "hello", ...,
15
+ # "id" => 42, "action" => "create",
16
+ # "rendered" => { "basic" => { "role" => "user", "content" => "hello" } }
17
+ # }
18
+ #
19
+ # @example Update broadcast payload (e.g. token count arrives)
20
+ # {
21
+ # "type" => "user_message", "content" => "hello", ...,
22
+ # "id" => 42, "action" => "update",
23
+ # "rendered" => { "debug" => { "role" => "user", "content" => "hello", "tokens" => 15 } }
24
+ # }
25
+ module Event::Broadcasting
26
+ extend ActiveSupport::Concern
27
+
28
+ ACTION_CREATE = "create"
29
+ ACTION_UPDATE = "update"
30
+
31
+ included do
32
+ after_create_commit :broadcast_create
33
+ after_update_commit :broadcast_update
34
+ end
35
+
36
+ private
37
+
38
+ def broadcast_create
39
+ broadcast_event(action: ACTION_CREATE)
40
+ end
41
+
42
+ def broadcast_update
43
+ broadcast_event(action: ACTION_UPDATE)
44
+ end
45
+
46
+ # Decorates the event for the session's current view mode and broadcasts
47
+ # the payload to the session's ActionCable stream.
48
+ #
49
+ # @param action [String] ACTION_CREATE or ACTION_UPDATE — tells clients how to handle the event
50
+ def broadcast_event(action:)
51
+ return unless session_id
52
+
53
+ mode = Session.where(id: session_id).pick(:view_mode) || "basic"
54
+ decorator = EventDecorator.for(self)
55
+ broadcast_payload = payload.merge("id" => id, "action" => action)
56
+
57
+ if decorator
58
+ broadcast_payload["rendered"] = {mode => decorator.render(mode)}
59
+ end
60
+
61
+ ActionCable.server.broadcast("session_#{session_id}", broadcast_payload)
62
+ end
63
+ end
data/app/models/event.rb CHANGED
@@ -16,12 +16,18 @@
16
16
  # @!attribute tool_use_id
17
17
  # @return [String, nil] Anthropic-assigned ID correlating tool_call and tool_response
18
18
  class Event < ApplicationRecord
19
+ include Event::Broadcasting
20
+
19
21
  TYPES = %w[system_message user_message agent_message tool_call tool_response].freeze
20
22
  LLM_TYPES = %w[user_message agent_message].freeze
21
23
  CONTEXT_TYPES = %w[user_message agent_message tool_call tool_response].freeze
24
+ PENDING_STATUS = "pending"
22
25
 
23
26
  ROLE_MAP = {"user_message" => "user", "agent_message" => "assistant"}.freeze
24
27
 
28
+ # Heuristic: average bytes per token for English prose.
29
+ BYTES_PER_TOKEN = 4
30
+
25
31
  belongs_to :session
26
32
 
27
33
  validates :event_type, presence: true, inclusion: {in: TYPES}
@@ -40,6 +46,17 @@ class Event < ApplicationRecord
40
46
  # @return [ActiveRecord::Relation]
41
47
  scope :context_events, -> { where(event_type: CONTEXT_TYPES) }
42
48
 
49
+ # @!method self.pending
50
+ # User messages queued during active agent processing, not yet sent to LLM.
51
+ # @return [ActiveRecord::Relation]
52
+ scope :pending, -> { where(status: PENDING_STATUS) }
53
+
54
+ # @!method self.deliverable
55
+ # Events eligible for LLM context (excludes pending messages).
56
+ # NULL status means delivered/processed — the only excluded value is "pending".
57
+ # @return [ActiveRecord::Relation]
58
+ scope :deliverable, -> { where(status: nil) }
59
+
43
60
  # Maps event_type to the Anthropic Messages API role.
44
61
  # @return [String] "user" or "assistant"
45
62
  def api_role
@@ -56,6 +73,25 @@ class Event < ApplicationRecord
56
73
  event_type.in?(CONTEXT_TYPES)
57
74
  end
58
75
 
76
+ # @return [Boolean] true if this is a pending message not yet sent to the LLM
77
+ def pending?
78
+ status == PENDING_STATUS
79
+ end
80
+
81
+ # Heuristic token estimate: ~4 bytes per token for English prose.
82
+ # Tool events are estimated from the full payload JSON since tool_input
83
+ # and tool metadata contribute to token count. Messages use content only.
84
+ #
85
+ # @return [Integer] estimated token count (at least 1)
86
+ def estimate_tokens
87
+ text = if event_type.in?(%w[tool_call tool_response])
88
+ payload.to_json
89
+ else
90
+ payload["content"].to_s
91
+ end
92
+ [(text.bytesize / BYTES_PER_TOKEN.to_f).ceil, 1].max
93
+ end
94
+
59
95
  private
60
96
 
61
97
  def schedule_token_count
@@ -7,24 +7,38 @@ class Session < ApplicationRecord
7
7
  # Claude Sonnet 4 context window minus system prompt reserve.
8
8
  DEFAULT_TOKEN_BUDGET = 190_000
9
9
 
10
- # Heuristic: average bytes per token for English prose.
11
- BYTES_PER_TOKEN = 4
10
+ VIEW_MODES = %w[basic verbose debug].freeze
12
11
 
13
12
  has_many :events, -> { order(:id) }, dependent: :destroy
14
13
 
14
+ validates :view_mode, inclusion: {in: VIEW_MODES}
15
+
15
16
  scope :recent, ->(limit = 10) { order(updated_at: :desc).limit(limit) }
16
17
 
18
+ # Cycles to the next view mode: basic → verbose → debug → basic.
19
+ #
20
+ # @return [String] the next view mode in the cycle
21
+ def next_view_mode
22
+ current_index = VIEW_MODES.index(view_mode) || 0
23
+ VIEW_MODES[(current_index + 1) % VIEW_MODES.size]
24
+ end
25
+
17
26
  # Returns the events currently visible in the LLM context window.
18
27
  # Walks events newest-first and includes them until the token budget
19
28
  # is exhausted. Events are full-size or excluded entirely.
20
29
  #
21
30
  # @param token_budget [Integer] maximum tokens to include (positive)
31
+ # @param include_pending [Boolean] whether to include pending messages (true for
32
+ # display, false for LLM context assembly)
22
33
  # @return [Array<Event>] chronologically ordered
23
- def viewport_events(token_budget: DEFAULT_TOKEN_BUDGET)
34
+ def viewport_events(token_budget: DEFAULT_TOKEN_BUDGET, include_pending: true)
35
+ scope = events.context_events
36
+ scope = scope.deliverable unless include_pending
37
+
24
38
  selected = []
25
39
  remaining = token_budget
26
40
 
27
- events.context_events.reorder(id: :desc).each do |event|
41
+ scope.reorder(id: :desc).each do |event|
28
42
  cost = (event.token_count > 0) ? event.token_count : estimate_tokens(event)
29
43
  break if cost > remaining && selected.any?
30
44
 
@@ -35,16 +49,40 @@ class Session < ApplicationRecord
35
49
  selected.reverse
36
50
  end
37
51
 
52
+ # Returns the assembled system prompt for this session.
53
+ # The system prompt includes system instructions, goals, and memories.
54
+ # Currently a placeholder — these subsystems are not yet implemented.
55
+ #
56
+ # @return [String, nil] the system prompt text, or nil if not configured
57
+ def system_prompt
58
+ nil
59
+ end
60
+
38
61
  # Builds the message array expected by the Anthropic Messages API.
39
62
  # Includes user/agent messages and tool call/response events in
40
63
  # Anthropic's wire format. Consecutive tool_call events are grouped
41
64
  # into a single assistant message; consecutive tool_response events
42
65
  # are grouped into a single user message with tool_result blocks.
66
+ # Pending messages are excluded — they haven't been delivered yet.
43
67
  #
44
68
  # @param token_budget [Integer] maximum tokens to include (positive)
45
69
  # @return [Array<Hash>] Anthropic Messages API format
46
70
  def messages_for_llm(token_budget: DEFAULT_TOKEN_BUDGET)
47
- assemble_messages(viewport_events(token_budget: token_budget))
71
+ assemble_messages(viewport_events(token_budget: token_budget, include_pending: false))
72
+ end
73
+
74
+ # Promotes all pending user messages to delivered status so they
75
+ # appear in the next LLM context. Triggers broadcast_update for
76
+ # each event so connected clients refresh the pending indicator.
77
+ #
78
+ # @return [Integer] number of promoted messages
79
+ def promote_pending_messages!
80
+ promoted = 0
81
+ events.where(event_type: "user_message", status: Event::PENDING_STATUS).find_each do |event|
82
+ event.update!(status: nil, payload: event.payload.except("status"))
83
+ promoted += 1
84
+ end
85
+ promoted
48
86
  end
49
87
 
50
88
  private
@@ -97,18 +135,12 @@ class Session < ApplicationRecord
97
135
  }
98
136
  end
99
137
 
100
- # Rough estimate for events not yet counted by the background job.
101
- # For tool events, estimates from the full payload since tool_input
102
- # and tool metadata contribute to token count.
138
+ # Delegates to {Event#estimate_tokens} for events not yet counted
139
+ # by the background job.
103
140
  #
104
141
  # @param event [Event]
105
142
  # @return [Integer] at least 1
106
143
  def estimate_tokens(event)
107
- text = if event.event_type.in?(%w[tool_call tool_response])
108
- event.payload.to_json
109
- else
110
- event.payload["content"].to_s
111
- end
112
- [(text.bytesize / BYTES_PER_TOKEN.to_f).ceil, 1].max
144
+ event.estimate_tokens
113
145
  end
114
146
  end
@@ -8,6 +8,7 @@ require "active_record/railtie"
8
8
  require "active_job/railtie"
9
9
  require "action_cable/engine"
10
10
  require "rails/test_unit/railtie"
11
+ require "draper"
11
12
  require "solid_cable"
12
13
  require "solid_queue"
13
14
 
@@ -7,5 +7,4 @@ Rails.application.config.after_initialize do
7
7
  # Global persister handles events from all sessions (brain server, background jobs).
8
8
  # Skipped in test — specs manage their own persisters for isolation.
9
9
  Events::Bus.subscribe(Events::Subscribers::Persister.new) unless Rails.env.test?
10
- Events::Bus.subscribe(Events::Subscribers::ActionCableBridge.instance)
11
10
  end
data/config/routes.rb CHANGED
@@ -3,10 +3,4 @@
3
3
  Rails.application.routes.draw do
4
4
  mount ActionCable.server => "/cable"
5
5
  get "up", to: "rails/health#show", as: :rails_health_check
6
-
7
- namespace :api do
8
- resources :sessions, only: [:create] do
9
- get :current, on: :collection
10
- end
11
- end
12
6
  end
data/db/cable_schema.rb CHANGED
@@ -1,9 +1,21 @@
1
+ # This file is auto-generated from the current state of the database. Instead
2
+ # of editing this file, please use the migrations feature of Active Record to
3
+ # incrementally modify your database, and then regenerate this schema definition.
4
+ #
5
+ # This file is the source Rails uses to define your schema when running `bin/rails
6
+ # db:schema:load`. When creating a new database, `bin/rails db:schema:load` tends to
7
+ # be faster and is potentially less error prone than running all of your
8
+ # migrations from scratch. Old migrations may fail to apply correctly if those
9
+ # migrations use external dependencies or application code.
10
+ #
11
+ # It's strongly recommended that you check this file into your version control system.
12
+
1
13
  ActiveRecord::Schema[8.1].define(version: 1) do
2
14
  create_table "solid_cable_messages", force: :cascade do |t|
3
15
  t.binary "channel", limit: 1024, null: false
4
- t.binary "payload", limit: 536870912, null: false
5
- t.datetime "created_at", null: false
6
16
  t.integer "channel_hash", limit: 8, null: false
17
+ t.datetime "created_at", null: false
18
+ t.binary "payload", limit: 536870912, null: false
7
19
  t.index ["channel"], name: "index_solid_cable_messages_on_channel"
8
20
  t.index ["channel_hash"], name: "index_solid_cable_messages_on_channel_hash"
9
21
  t.index ["created_at"], name: "index_solid_cable_messages_on_created_at"
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ class AddViewModeToSessions < ActiveRecord::Migration[8.1]
4
+ def change
5
+ add_column :sessions, :view_mode, :string, default: "basic", null: false
6
+ end
7
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ class AddStatusToEvents < ActiveRecord::Migration[8.1]
4
+ def change
5
+ add_column :events, :status, :string
6
+ add_index :events, [:session_id, :status], name: "index_events_on_session_id_and_status"
7
+ end
8
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ class AddProcessingToSessions < ActiveRecord::Migration[8.1]
4
+ def change
5
+ add_column :sessions, :processing, :boolean, default: false, null: false
6
+ end
7
+ end
data/lib/agent_loop.rb CHANGED
@@ -87,11 +87,14 @@ class AgentLoop
87
87
  private
88
88
 
89
89
  # Builds the default tool registry with all available tools.
90
- # @return [Tools::Registry] registry with Bash and WebGet tools
90
+ # @return [Tools::Registry] registry with all available tools
91
91
  def build_tool_registry
92
92
  registry = Tools::Registry.new(context: {shell_session: @shell_session})
93
- registry.register(Tools::WebGet)
94
93
  registry.register(Tools::Bash)
94
+ registry.register(Tools::Read)
95
+ registry.register(Tools::Write)
96
+ registry.register(Tools::Edit)
97
+ registry.register(Tools::WebGet)
95
98
  registry
96
99
  end
97
100
  end
data/lib/anima/cli.rb CHANGED
@@ -47,17 +47,13 @@ module Anima
47
47
  option :host, desc: "Brain server address (default: #{DEFAULT_HOST})"
48
48
  def tui
49
49
  require "ratatui_ruby"
50
- require "net/http"
51
- require "json"
52
50
  require_relative "../tui/app"
53
51
 
54
52
  host = options[:host] || DEFAULT_HOST
55
53
 
56
54
  say "Connecting to brain at #{host}...", :cyan
57
- session_id = fetch_current_session_with_retry(host)
58
- say "Session ##{session_id} — starting TUI", :cyan
59
55
 
60
- cable_client = TUI::CableClient.new(host: host, session_id: session_id)
56
+ cable_client = TUI::CableClient.new(host: host)
61
57
  cable_client.connect
62
58
 
63
59
  TUI::App.new(cable_client: cable_client).run
@@ -71,40 +67,5 @@ module Anima
71
67
  end
72
68
 
73
69
  private
74
-
75
- MAX_SESSION_FETCH_ATTEMPTS = 10
76
- SESSION_FETCH_DELAY = 2 # seconds between retries
77
-
78
- # Fetches the current session ID from the brain's REST API.
79
- # Retries up to {MAX_SESSION_FETCH_ATTEMPTS} times if the brain is not running.
80
- #
81
- # @param host [String] brain server address
82
- # @return [Integer] session ID
83
- def fetch_current_session_with_retry(host)
84
- attempts = 0
85
- begin
86
- fetch_current_session(host)
87
- rescue Errno::ECONNREFUSED, Net::ReadTimeout, Net::OpenTimeout, SocketError => error
88
- attempts += 1
89
- if attempts >= MAX_SESSION_FETCH_ATTEMPTS
90
- say "Cannot connect to brain after #{MAX_SESSION_FETCH_ATTEMPTS} attempts", :red
91
- exit 1
92
- end
93
- say "Brain not available (#{error.class.name.split("::").last}). " \
94
- "Retrying #{attempts}/#{MAX_SESSION_FETCH_ATTEMPTS}... (Ctrl+C to cancel)", :yellow
95
- sleep SESSION_FETCH_DELAY
96
- retry
97
- end
98
- end
99
-
100
- # Fetches the current session ID from the brain's REST API.
101
- # @param host [String] brain server address
102
- # @return [Integer] session ID
103
- # @raise [RuntimeError] if the brain returns an error response
104
- def fetch_current_session(host)
105
- uri = URI("http://#{host}/api/sessions/current")
106
- body = Net::HTTP.get(uri)
107
- JSON.parse(body)["id"]
108
- end
109
70
  end
110
71
  end
data/lib/anima/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Anima
4
- VERSION = "0.2.0"
4
+ VERSION = "0.3.0"
5
5
  end
@@ -42,6 +42,7 @@ module Events
42
42
  target_session.events.create!(
43
43
  event_type: event_type,
44
44
  payload: payload,
45
+ status: payload[:status],
45
46
  tool_use_id: payload[:tool_use_id],
46
47
  timestamp: payload[:timestamp] || Process.clock_gettime(Process::CLOCK_REALTIME, :nanosecond)
47
48
  )
@@ -4,8 +4,25 @@ module Events
4
4
  class UserMessage < Base
5
5
  TYPE = "user_message"
6
6
 
7
+ # @return [String, nil] "pending" when queued during active processing, nil otherwise
8
+ attr_reader :status
9
+
10
+ # @param content [String] message text
11
+ # @param session_id [Integer, nil] session identifier
12
+ # @param status [String, nil] "pending" when queued during active agent processing
13
+ def initialize(content:, session_id: nil, status: nil)
14
+ super(content: content, session_id: session_id)
15
+ @status = status
16
+ end
17
+
7
18
  def type
8
19
  TYPE
9
20
  end
21
+
22
+ def to_h
23
+ h = super
24
+ h[:status] = status if status
25
+ h
26
+ end
10
27
  end
11
28
  end