brute 1.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. checksums.yaml +4 -4
  2. data/lib/brute/agent.rb +72 -6
  3. data/lib/brute/events/handler.rb +69 -0
  4. data/lib/brute/events/prefixed_terminal_output.rb +72 -0
  5. data/lib/brute/events/terminal_output_handler.rb +68 -0
  6. data/lib/brute/middleware/001_otel_span.rb +77 -0
  7. data/lib/brute/middleware/003_tool_result_loop.rb +103 -0
  8. data/lib/brute/middleware/004_summarize.rb +139 -0
  9. data/lib/brute/middleware/005_tracing.rb +86 -0
  10. data/lib/brute/middleware/010_max_iterations.rb +73 -0
  11. data/lib/brute/middleware/015_otel_token_usage.rb +42 -0
  12. data/lib/brute/middleware/020_system_prompt.rb +128 -0
  13. data/lib/brute/middleware/040_compaction_check.rb +155 -0
  14. data/lib/brute/middleware/060_questions.rb +41 -0
  15. data/lib/brute/middleware/070_tool_call.rb +247 -0
  16. data/lib/brute/middleware/073_otel_tool_call.rb +49 -0
  17. data/lib/brute/middleware/075_otel_tool_results.rb +46 -0
  18. data/lib/brute/middleware/100_llm_call.rb +62 -0
  19. data/lib/brute/middleware/event_handler.rb +25 -0
  20. data/lib/brute/middleware/user_queue.rb +35 -0
  21. data/lib/brute/pipeline.rb +44 -107
  22. data/lib/brute/prompts/skills.rb +2 -2
  23. data/lib/brute/prompts.rb +23 -23
  24. data/lib/brute/providers/shell.rb +6 -19
  25. data/lib/brute/providers/shell_response.rb +22 -30
  26. data/lib/brute/session.rb +52 -0
  27. data/lib/brute/store/snapshot_store.rb +21 -37
  28. data/lib/brute/sub_agent.rb +106 -0
  29. data/lib/brute/system_prompt.rb +1 -83
  30. data/lib/brute/tool.rb +107 -0
  31. data/lib/brute/tools/delegate.rb +61 -70
  32. data/lib/brute/tools/fs_patch.rb +9 -7
  33. data/lib/brute/tools/fs_read.rb +233 -20
  34. data/lib/brute/tools/fs_remove.rb +8 -9
  35. data/lib/brute/tools/fs_search.rb +98 -16
  36. data/lib/brute/tools/fs_undo.rb +8 -8
  37. data/lib/brute/tools/fs_write.rb +7 -5
  38. data/lib/brute/tools/net_fetch.rb +8 -8
  39. data/lib/brute/tools/question.rb +36 -24
  40. data/lib/brute/tools/shell.rb +74 -16
  41. data/lib/brute/tools/todo_read.rb +8 -8
  42. data/lib/brute/tools/todo_write.rb +25 -18
  43. data/lib/brute/tools.rb +8 -12
  44. data/lib/brute/truncation.rb +219 -0
  45. data/lib/brute/version.rb +1 -1
  46. data/lib/brute.rb +82 -45
  47. metadata +59 -46
  48. data/lib/brute/loop/agent_stream.rb +0 -118
  49. data/lib/brute/loop/agent_turn.rb +0 -520
  50. data/lib/brute/loop/compactor.rb +0 -107
  51. data/lib/brute/loop/doom_loop.rb +0 -86
  52. data/lib/brute/loop/step.rb +0 -332
  53. data/lib/brute/loop/tool_call_step.rb +0 -90
  54. data/lib/brute/middleware/base.rb +0 -27
  55. data/lib/brute/middleware/compaction_check.rb +0 -106
  56. data/lib/brute/middleware/doom_loop_detection.rb +0 -136
  57. data/lib/brute/middleware/llm_call.rb +0 -128
  58. data/lib/brute/middleware/message_tracking.rb +0 -339
  59. data/lib/brute/middleware/otel/span.rb +0 -105
  60. data/lib/brute/middleware/otel/token_usage.rb +0 -68
  61. data/lib/brute/middleware/otel/tool_calls.rb +0 -68
  62. data/lib/brute/middleware/otel/tool_results.rb +0 -65
  63. data/lib/brute/middleware/otel.rb +0 -34
  64. data/lib/brute/middleware/reasoning_normalizer.rb +0 -192
  65. data/lib/brute/middleware/retry.rb +0 -157
  66. data/lib/brute/middleware/session_persistence.rb +0 -72
  67. data/lib/brute/middleware/token_tracking.rb +0 -124
  68. data/lib/brute/middleware/tool_error_tracking.rb +0 -179
  69. data/lib/brute/middleware/tool_use_guard.rb +0 -133
  70. data/lib/brute/middleware/tracing.rb +0 -124
  71. data/lib/brute/middleware.rb +0 -18
  72. data/lib/brute/orchestrator/turn.rb +0 -105
  73. data/lib/brute/patches/anthropic_tool_role.rb +0 -35
  74. data/lib/brute/patches/buffer_nil_guard.rb +0 -26
  75. data/lib/brute/providers/models_dev.rb +0 -111
  76. data/lib/brute/providers/ollama.rb +0 -135
  77. data/lib/brute/providers/opencode_go.rb +0 -43
  78. data/lib/brute/providers/opencode_zen.rb +0 -87
  79. data/lib/brute/providers.rb +0 -62
  80. data/lib/brute/queue/base_queue.rb +0 -222
  81. data/lib/brute/queue/parallel_queue.rb +0 -66
  82. data/lib/brute/queue/sequential_queue.rb +0 -63
  83. data/lib/brute/store/message_store.rb +0 -362
  84. data/lib/brute/store/session.rb +0 -106
  85. /data/lib/brute/{diff.rb → utils/diff.rb} +0 -0
@@ -1,35 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- # Monkey-patch: Fix Anthropic tool result message role.
4
- #
5
- # llm.rb stores tool results as messages with role="tool" (via @llm.tool_role).
6
- # Anthropic's API requires tool result messages to have role="user" with
7
- # tool_result content blocks. The Completion adapter already correctly formats
8
- # the content (Function::Return -> {type: "tool_result", ...}), but passes
9
- # through the "tool" role unchanged — which Anthropic rejects.
10
- #
11
- # This patch overrides adapt_message to set role="user" when the message
12
- # content contains tool returns.
13
-
14
- module Brute
15
- module Patches
16
- module AnthropicToolRole
17
- private
18
-
19
- def adapt_message
20
- if message.respond_to?(:role) && message.role.to_s == "tool"
21
- {role: "user", content: adapt_content(content)}
22
- else
23
- super
24
- end
25
- end
26
-
27
- # Apply the patch lazily — LLM::Anthropic is autoloaded.
28
- def self.apply!
29
- return if @applied
30
- @applied = true
31
- LLM::Anthropic::RequestAdapter::Completion.prepend(self)
32
- end
33
- end
34
- end
35
- end
@@ -1,26 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- if __FILE__ == $0
4
- require "bundler/setup"
5
- require "brute"
6
- end
7
-
8
- # Monkey-patch: Guard LLM::Buffer against nil entries.
9
- #
10
- # llm.rb's Context#talk can sometimes concatenate nil into the message
11
- # buffer (e.g. when response parsing yields a nil choice). This causes
12
- # NoMethodError when the buffer is iterated (assistant?, tool_return?, etc).
13
- #
14
- # This patch overrides concat to filter out nils before they enter the buffer.
15
-
16
- module Brute
17
- module Patches
18
- module BufferNilGuard
19
- def concat(messages)
20
- super(Array(messages).compact)
21
- end
22
- end
23
- end
24
- end
25
-
26
- LLM::Buffer.prepend(Brute::Patches::BufferNilGuard)
@@ -1,111 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "net/http"
4
- require "json"
5
-
6
- module Brute
7
- module Providers
8
- # Fetches and caches model metadata from the models.dev catalog.
9
- #
10
- # Quacks like llm.rb's provider.models so that the REPL's model
11
- # picker can call:
12
- #
13
- # provider.models.all.select(&:chat?)
14
- #
15
- # Models are fetched from https://models.dev/api.json and cached
16
- # in-memory for the lifetime of the process (with a TTL).
17
- #
18
- class ModelsDev
19
- CATALOG_URL = "https://models.dev/api.json"
20
- CACHE_TTL = 3600 # 1 hour
21
-
22
- ModelEntry = Struct.new(:id, :name, :chat?, :cost, :limit, :reasoning, :tool_call, keyword_init: true)
23
-
24
- # @param provider [LLM::Provider] the provider instance (for delegating execute/headers)
25
- # @param provider_id [String] the provider key in models.dev (e.g., "opencode", "opencode-go")
26
- def initialize(provider:, provider_id: "opencode")
27
- @provider = provider
28
- @provider_id = provider_id
29
- end
30
-
31
- # Returns all models for this provider from the models.dev catalog.
32
- # @return [Array<ModelEntry>]
33
- def all
34
- entries = fetch_provider_models
35
- entries.map do |id, model|
36
- ModelEntry.new(
37
- id: id,
38
- name: model["name"] || id,
39
- chat?: true,
40
- cost: model["cost"],
41
- limit: model["limit"],
42
- reasoning: model["reasoning"] || false,
43
- tool_call: model["tool_call"] || false
44
- )
45
- end.sort_by(&:id)
46
- end
47
-
48
- private
49
-
50
- def fetch_provider_models
51
- catalog = self.class.fetch_catalog
52
- provider_data = catalog[@provider_id]
53
- return {} unless provider_data
54
-
55
- provider_data["models"] || {}
56
- end
57
-
58
- class << self
59
- # Fetch the models.dev catalog, with in-memory caching.
60
- # Thread-safe via a simple mutex.
61
- def fetch_catalog
62
- @mutex ||= Mutex.new
63
- @mutex.synchronize do
64
- if @catalog && @fetched_at && (Time.now - @fetched_at < CACHE_TTL)
65
- return @catalog
66
- end
67
-
68
- @catalog = download_catalog
69
- @fetched_at = Time.now
70
- @catalog
71
- end
72
- end
73
-
74
- # Force a cache refresh on next access.
75
- def invalidate_cache!
76
- @mutex&.synchronize do
77
- @catalog = nil
78
- @fetched_at = nil
79
- end
80
- end
81
-
82
- private
83
-
84
- def download_catalog
85
- uri = URI.parse(CATALOG_URL)
86
- http = Net::HTTP.new(uri.host, uri.port)
87
- http.use_ssl = true
88
- http.open_timeout = 10
89
- http.read_timeout = 30
90
-
91
- request = Net::HTTP::Get.new(uri.request_uri)
92
- request["User-Agent"] = "brute/#{Brute::VERSION}"
93
- request["Accept"] = "application/json"
94
-
95
- response = http.request(request)
96
-
97
- unless response.is_a?(Net::HTTPSuccess)
98
- raise "Failed to fetch models.dev catalog: HTTP #{response.code}"
99
- end
100
-
101
- JSON.parse(response.body)
102
- rescue => e
103
- # Return empty catalog on failure so the provider still works
104
- # with default_model, just without a model list.
105
- warn "[brute] Warning: Could not fetch models.dev catalog: #{e.message}"
106
- {}
107
- end
108
- end
109
- end
110
- end
111
- end
@@ -1,135 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "bundler/setup"
4
- require "brute"
5
-
6
- # Ensure the Ollama provider is loaded (llm.rb lazy-loads providers).
7
- unless defined?(LLM::Ollama)
8
- require "llm/providers/ollama"
9
- end
10
-
11
- module Brute
12
- module Providers
13
- ##
14
- # Brute-level wrapper around LLM::Ollama for local model inference.
15
- #
16
- # Adds environment-variable-based configuration so that all Brute
17
- # examples and the CLI work out of the box with a local Ollama
18
- # instance:
19
- #
20
- # OLLAMA_HOST — base URL (default: http://localhost:11434)
21
- # OLLAMA_MODEL — default model (default: llm.rb's default, currently qwen3:latest)
22
- #
23
- # @example Auto-detect via environment
24
- # export OLLAMA_HOST=http://localhost:11434
25
- # ruby examples/01_basic_agent.rb
26
- #
27
- # @example Remote Ollama server
28
- # export OLLAMA_HOST=http://192.168.1.50:11434
29
- # export OLLAMA_MODEL=llama3.1:8b
30
- # ruby examples/02_fix_a_bug.rb
31
- #
32
- class Ollama < LLM::Ollama
33
- ##
34
- # Parse OLLAMA_HOST into host, port, and ssl components.
35
- # Accepts formats like:
36
- # http://localhost:11434
37
- # https://ollama.example.com
38
- # 192.168.1.50:11434
39
- # localhost
40
- #
41
- # @param url [String, nil] raw OLLAMA_HOST value
42
- # @return [Hash] with :host, :port, :ssl keys
43
- def self.parse_host(url)
44
- return { host: LLM::Ollama::HOST, port: 11434, ssl: false } if url.nil? || url.empty?
45
-
46
- # Prepend scheme if missing so URI.parse works
47
- url = "http://#{url}" unless url.match?(%r{\A\w+://})
48
- uri = URI.parse(url)
49
-
50
- {
51
- host: uri.host || LLM::Ollama::HOST,
52
- port: uri.port || 11434,
53
- ssl: uri.scheme == "https",
54
- }
55
- end
56
-
57
- ##
58
- # @param key [String] ignored (Ollama needs no auth), kept for provider interface
59
- def initialize(key: "none", **)
60
- config = self.class.parse_host(ENV["OLLAMA_HOST"])
61
- super(key: key, host: config[:host], port: config[:port], ssl: config[:ssl], **)
62
- end
63
-
64
- ##
65
- # @return [Symbol]
66
- def name
67
- :ollama
68
- end
69
-
70
- ##
71
- # Returns the default model, preferring OLLAMA_MODEL env var.
72
- # @return [String]
73
- def default_model
74
- ENV["OLLAMA_MODEL"] || super
75
- end
76
- end
77
- end
78
- end
79
-
80
- test do
81
- parse = proc { |url| Brute::Providers::Ollama.parse_host(url) }
82
-
83
- describe ".parse_host" do
84
- it "returns defaults for nil" do
85
- parse.(nil).should == { host: "localhost", port: 11434, ssl: false }
86
- end
87
-
88
- it "returns defaults for empty string" do
89
- parse.("").should == { host: "localhost", port: 11434, ssl: false }
90
- end
91
-
92
- it "parses http URL with port" do
93
- parse.("http://192.168.1.50:11434").should == { host: "192.168.1.50", port: 11434, ssl: false }
94
- end
95
-
96
- it "parses https URL" do
97
- parse.("https://ollama.example.com").should == { host: "ollama.example.com", port: 443, ssl: true }
98
- end
99
-
100
- it "parses host:port without scheme" do
101
- parse.("192.168.1.50:11434").should == { host: "192.168.1.50", port: 11434, ssl: false }
102
- end
103
-
104
- it "parses bare hostname" do
105
- parse.("myhost").should == { host: "myhost", port: 80, ssl: false }
106
- end
107
- end
108
-
109
- describe "#name" do
110
- it "returns :ollama" do
111
- provider = Brute::Providers::Ollama.new
112
- provider.name.should == :ollama
113
- end
114
- end
115
-
116
- describe "#default_model" do
117
- it "falls back to llm.rb default when OLLAMA_MODEL is not set" do
118
- original = ENV["OLLAMA_MODEL"]
119
- ENV.delete("OLLAMA_MODEL")
120
- provider = Brute::Providers::Ollama.new
121
- provider.default_model.should == "qwen3:latest"
122
- ensure
123
- ENV["OLLAMA_MODEL"] = original if original
124
- end
125
-
126
- it "uses OLLAMA_MODEL env var when set" do
127
- original = ENV["OLLAMA_MODEL"]
128
- ENV["OLLAMA_MODEL"] = "llama3.1:8b"
129
- provider = Brute::Providers::Ollama.new
130
- provider.default_model.should == "llama3.1:8b"
131
- ensure
132
- ENV["OLLAMA_MODEL"] = original
133
- end
134
- end
135
- end
@@ -1,43 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- if __FILE__ == $0
4
- require "bundler/setup"
5
- require "brute"
6
- end
7
-
8
- module LLM
9
- ##
10
- # OpenAI-compatible provider for the OpenCode Go API gateway.
11
- #
12
- # OpenCode Go is the low-cost subscription plan with a restricted
13
- # (lite) model list. Same gateway as Zen, different endpoint path.
14
- #
15
- # @example
16
- # llm = LLM::OpencodeGo.new(key: ENV["OPENCODE_API_KEY"])
17
- # ctx = LLM::Context.new(llm)
18
- # ctx.talk "Hello from brute"
19
- #
20
- class OpencodeGo < OpencodeZen
21
- ##
22
- # @return [Symbol]
23
- def name
24
- :opencode_go
25
- end
26
-
27
- ##
28
- # Returns models from the models.dev catalog.
29
- # Note: The Go gateway only accepts lite-tier models, but models.dev
30
- # doesn't distinguish between Zen and Go tiers. We show the full
31
- # catalog; the gateway returns an error for unsupported models.
32
- # @return [Brute::Providers::ModelsDev]
33
- def models
34
- Brute::Providers::ModelsDev.new(provider: self, provider_id: "opencode")
35
- end
36
-
37
- private
38
-
39
- def completions_path
40
- "/zen/go/v1/chat/completions"
41
- end
42
- end
43
- end
@@ -1,87 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- if __FILE__ == $0
4
- require "bundler/setup"
5
- require "brute"
6
- end
7
-
8
- # Ensure the OpenAI provider is loaded (llm.rb lazy-loads providers).
9
- unless defined?(LLM::OpenAI)
10
- require "llm/providers/openai"
11
- end
12
-
13
- module LLM
14
- ##
15
- # OpenAI-compatible provider for the OpenCode Zen API gateway.
16
- #
17
- # OpenCode Zen is a curated model gateway at opencode.ai that proxies
18
- # requests to upstream LLM providers (Anthropic, OpenAI, Google, etc.).
19
- # All models are accessed via the OpenAI-compatible chat completions
20
- # endpoint; the gateway handles format conversion internally.
21
- #
22
- # @example
23
- # llm = LLM::OpencodeZen.new(key: ENV["OPENCODE_API_KEY"])
24
- # ctx = LLM::Context.new(llm)
25
- # ctx.talk "Hello from brute"
26
- #
27
- # @example Anonymous access (free models only)
28
- # llm = LLM::OpencodeZen.new(key: "public")
29
- # ctx = LLM::Context.new(llm)
30
- # ctx.talk "Hello"
31
- #
32
- class OpencodeZen < OpenAI
33
- HOST = "opencode.ai"
34
-
35
- ##
36
- # @param key [String] OpenCode API key, or "public" for anonymous access
37
- # @param (see LLM::Provider#initialize)
38
- def initialize(key: "public", **)
39
- super(host: HOST, key: key, **)
40
- end
41
-
42
- ##
43
- # @return [Symbol]
44
- def name
45
- :opencode_zen
46
- end
47
-
48
- ##
49
- # Returns the default model.
50
- # @return [String]
51
- def default_model
52
- "zen-bickpickle"
53
- end
54
-
55
- ##
56
- # Returns models from the models.dev catalog for the opencode provider.
57
- # @return [Brute::Providers::ModelsDev]
58
- def models
59
- Brute::Providers::ModelsDev.new(provider: self, provider_id: "opencode")
60
- end
61
-
62
- # -- Unsupported sub-APIs --
63
-
64
- def responses = raise(NotImplementedError, "Use chat completions via the Zen gateway")
65
- def images = raise(NotImplementedError, "Not supported via Zen gateway")
66
- def audio = raise(NotImplementedError, "Not supported via Zen gateway")
67
- def files = raise(NotImplementedError, "Not supported via Zen gateway")
68
- def moderations = raise(NotImplementedError, "Not supported via Zen gateway")
69
- def vector_stores = raise(NotImplementedError, "Not supported via Zen gateway")
70
-
71
- private
72
-
73
- def completions_path
74
- "/zen/v1/chat/completions"
75
- end
76
-
77
- def headers
78
- lock do
79
- (@headers || {}).merge(
80
- "Content-Type" => "application/json",
81
- "Authorization" => "Bearer #{@key}",
82
- "x-opencode-client" => "brute"
83
- )
84
- end
85
- end
86
- end
87
- end
@@ -1,62 +0,0 @@
1
- require_relative 'providers/shell_response'
2
- require_relative 'providers/shell'
3
- require_relative 'providers/models_dev'
4
- require_relative 'providers/opencode_zen'
5
- require_relative 'providers/opencode_go'
6
- require_relative 'providers/ollama'
7
-
8
- module Brute
9
- module Providers
10
- ALL = {
11
- 'anthropic' => ->(key) { LLM.anthropic(key: key).tap { Patches::AnthropicToolRole.apply! } },
12
- 'openai' => ->(key) { LLM.openai(key: key) },
13
- 'google' => ->(key) { LLM.google(key: key) },
14
- 'deepseek' => ->(key) { LLM.deepseek(key: key) },
15
- 'ollama' => ->(_key) { Providers::Ollama.new },
16
- 'xai' => ->(key) { LLM.xai(key: key) },
17
- 'opencode_zen' => ->(key) { LLM::OpencodeZen.new(key: key) },
18
- 'opencode_go' => ->(key) { LLM::OpencodeGo.new(key: key) },
19
- 'shell' => ->(_key) { Providers::Shell.new },
20
- }.freeze
21
-
22
- # Resolve the LLM provider from environment variables.
23
- #
24
- # Checks in order:
25
- # 1. LLM_API_KEY + LLM_PROVIDER (explicit)
26
- # 2. OPENCODE_API_KEY (implicit: provider = opencode_zen)
27
- # 3. ANTHROPIC_API_KEY (implicit: provider = anthropic)
28
- # 4. OPENAI_API_KEY (implicit: provider = openai)
29
- # 5. GOOGLE_API_KEY (implicit: provider = google)
30
- # 6. OLLAMA_HOST (implicit: provider = ollama, local inference)
31
- #
32
- # Returns nil if no key is found. Error is deferred to the caller.
33
- def self.guess_from_env
34
- if ENV['LLM_API_KEY']
35
- key = ENV['LLM_API_KEY']
36
- name = ENV.fetch('LLM_PROVIDER', 'opencode_zen').downcase
37
- elsif ENV['OPENCODE_API_KEY']
38
- key = ENV['OPENCODE_API_KEY']
39
- name = 'opencode_zen'
40
- elsif ENV['ANTHROPIC_API_KEY']
41
- key = ENV['ANTHROPIC_API_KEY']
42
- name = 'anthropic'
43
- elsif ENV['OPENAI_API_KEY']
44
- key = ENV['OPENAI_API_KEY']
45
- name = 'openai'
46
- elsif ENV['GOOGLE_API_KEY']
47
- key = ENV['GOOGLE_API_KEY']
48
- name = 'google'
49
- elsif ENV['OLLAMA_HOST']
50
- key = 'none'
51
- name = 'ollama'
52
- else
53
- return nil
54
- end
55
-
56
- factory = Providers::ALL[name]
57
- raise "Unknown LLM provider: #{name}. Available: #{Providers::ALL.keys.join(', ')}" unless factory
58
-
59
- factory.call(key)
60
- end
61
- end
62
- end