brute 0.1.9 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5a36d054875f1465a0e9bfc380187c98ff08f837f8477f062f784652246d4256
4
- data.tar.gz: a7c4df2710346a213b3ded2ee9be7d84b0bbe50d04c9e1ac9eaa679b9a35a7b2
3
+ metadata.gz: 2e5a610b24378a83f8ce97c8e251a4325705e17a455aa95e9f9c14efe581845a
4
+ data.tar.gz: 43a0dc2f5e1c2d5d3668b00133278956d800caaeb6f955fb5824d4091da16455
5
5
  SHA512:
6
- metadata.gz: 7893f212130fc7dd94d80e3bf6b926d47ab86e37fc629866bbbec674121d4e8083791a2b66b4437af98d2cb694c0dc89655519844f5797fe1f17e8652d4ddd01
7
- data.tar.gz: 1c48113815f9ad3f2d068252e851167b369e22ab8c1d5bd1d314387acab5f1e4169a49cde91f7c5bbb4847ade234779712f3f7b5d3fcc43b6c76b80478017a26
6
+ metadata.gz: 6482e969a2865fc56aaa24f3bf2505f7418bf4b03b7d9c96454ade03e9e715511d33ec7d878cef8d3e1dab95b9ed514bd9eb42890229f426268271ee28b8690f
7
+ data.tar.gz: 7d16e0ccbf71f5ed106b3a073a122668004d9b729f7efdc0d2a03b3c9a3a8483b9b7a9e57546bbb82820de6be5c361a8ae44f9cdae40e64d2b85be641ecd2e9c
@@ -0,0 +1,111 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "net/http"
4
+ require "json"
5
+
6
+ module Brute
7
+ module Providers
8
+ # Fetches and caches model metadata from the models.dev catalog.
9
+ #
10
+ # Quacks like llm.rb's provider.models so that the REPL's model
11
+ # picker can call:
12
+ #
13
+ # provider.models.all.select(&:chat?)
14
+ #
15
+ # Models are fetched from https://models.dev/api.json and cached
16
+ # in-memory for the lifetime of the process (with a TTL).
17
+ #
18
+ class ModelsDev
19
+ CATALOG_URL = "https://models.dev/api.json"
20
+ CACHE_TTL = 3600 # 1 hour
21
+
22
+ ModelEntry = Struct.new(:id, :name, :chat?, :cost, :limit, :reasoning, :tool_call, keyword_init: true)
23
+
24
+ # @param provider [LLM::Provider] the provider instance (for delegating execute/headers)
25
+ # @param provider_id [String] the provider key in models.dev (e.g., "opencode", "opencode-go")
26
+ def initialize(provider:, provider_id: "opencode")
27
+ @provider = provider
28
+ @provider_id = provider_id
29
+ end
30
+
31
+ # Returns all models for this provider from the models.dev catalog.
32
+ # @return [Array<ModelEntry>]
33
+ def all
34
+ entries = fetch_provider_models
35
+ entries.map do |id, model|
36
+ ModelEntry.new(
37
+ id: id,
38
+ name: model["name"] || id,
39
+ chat?: true,
40
+ cost: model["cost"],
41
+ limit: model["limit"],
42
+ reasoning: model["reasoning"] || false,
43
+ tool_call: model["tool_call"] || false
44
+ )
45
+ end.sort_by(&:id)
46
+ end
47
+
48
+ private
49
+
50
+ def fetch_provider_models
51
+ catalog = self.class.fetch_catalog
52
+ provider_data = catalog[@provider_id]
53
+ return {} unless provider_data
54
+
55
+ provider_data["models"] || {}
56
+ end
57
+
58
+ class << self
59
+ # Fetch the models.dev catalog, with in-memory caching.
60
+ # Thread-safe via a simple mutex.
61
+ def fetch_catalog
62
+ @mutex ||= Mutex.new
63
+ @mutex.synchronize do
64
+ if @catalog && @fetched_at && (Time.now - @fetched_at < CACHE_TTL)
65
+ return @catalog
66
+ end
67
+
68
+ @catalog = download_catalog
69
+ @fetched_at = Time.now
70
+ @catalog
71
+ end
72
+ end
73
+
74
+ # Force a cache refresh on next access.
75
+ def invalidate_cache!
76
+ @mutex&.synchronize do
77
+ @catalog = nil
78
+ @fetched_at = nil
79
+ end
80
+ end
81
+
82
+ private
83
+
84
+ def download_catalog
85
+ uri = URI.parse(CATALOG_URL)
86
+ http = Net::HTTP.new(uri.host, uri.port)
87
+ http.use_ssl = true
88
+ http.open_timeout = 10
89
+ http.read_timeout = 30
90
+
91
+ request = Net::HTTP::Get.new(uri.request_uri)
92
+ request["User-Agent"] = "brute/#{Brute::VERSION}"
93
+ request["Accept"] = "application/json"
94
+
95
+ response = http.request(request)
96
+
97
+ unless response.is_a?(Net::HTTPSuccess)
98
+ raise "Failed to fetch models.dev catalog: HTTP #{response.code}"
99
+ end
100
+
101
+ JSON.parse(response.body)
102
+ rescue => e
103
+ # Return empty catalog on failure so the provider still works
104
+ # with default_model, just without a model list.
105
+ warn "[brute] Warning: Could not fetch models.dev catalog: #{e.message}"
106
+ {}
107
+ end
108
+ end
109
+ end
110
+ end
111
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # OpenAI-compatible provider for the OpenCode Go API gateway.
6
+ #
7
+ # OpenCode Go is the low-cost subscription plan with a restricted
8
+ # (lite) model list. Same gateway as Zen, different endpoint path.
9
+ #
10
+ # @example
11
+ # llm = LLM::OpencodeGo.new(key: ENV["OPENCODE_API_KEY"])
12
+ # ctx = LLM::Context.new(llm)
13
+ # ctx.talk "Hello from brute"
14
+ #
15
+ class OpencodeGo < OpencodeZen
16
+ ##
17
+ # @return [Symbol]
18
+ def name
19
+ :opencode_go
20
+ end
21
+
22
+ ##
23
+ # Returns models from the models.dev catalog.
24
+ # Note: The Go gateway only accepts lite-tier models, but models.dev
25
+ # doesn't distinguish between Zen and Go tiers. We show the full
26
+ # catalog; the gateway returns an error for unsupported models.
27
+ # @return [Brute::Providers::ModelsDev]
28
+ def models
29
+ Brute::Providers::ModelsDev.new(provider: self, provider_id: "opencode")
30
+ end
31
+
32
+ private
33
+
34
+ def completions_path
35
+ "/zen/go/v1/chat/completions"
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,82 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Ensure the OpenAI provider is loaded (llm.rb lazy-loads providers).
4
+ unless defined?(LLM::OpenAI)
5
+ require "llm/providers/openai"
6
+ end
7
+
8
+ module LLM
9
+ ##
10
+ # OpenAI-compatible provider for the OpenCode Zen API gateway.
11
+ #
12
+ # OpenCode Zen is a curated model gateway at opencode.ai that proxies
13
+ # requests to upstream LLM providers (Anthropic, OpenAI, Google, etc.).
14
+ # All models are accessed via the OpenAI-compatible chat completions
15
+ # endpoint; the gateway handles format conversion internally.
16
+ #
17
+ # @example
18
+ # llm = LLM::OpencodeZen.new(key: ENV["OPENCODE_API_KEY"])
19
+ # ctx = LLM::Context.new(llm)
20
+ # ctx.talk "Hello from brute"
21
+ #
22
+ # @example Anonymous access (free models only)
23
+ # llm = LLM::OpencodeZen.new(key: "public")
24
+ # ctx = LLM::Context.new(llm)
25
+ # ctx.talk "Hello"
26
+ #
27
+ class OpencodeZen < OpenAI
28
+ HOST = "opencode.ai"
29
+
30
+ ##
31
+ # @param key [String] OpenCode API key, or "public" for anonymous access
32
+ # @param (see LLM::Provider#initialize)
33
+ def initialize(key: "public", **)
34
+ super(host: HOST, key: key, **)
35
+ end
36
+
37
+ ##
38
+ # @return [Symbol]
39
+ def name
40
+ :opencode_zen
41
+ end
42
+
43
+ ##
44
+ # Returns the default model (Claude Sonnet 4, the most common Zen model).
45
+ # @return [String]
46
+ def default_model
47
+ "claude-sonnet-4-20250514"
48
+ end
49
+
50
+ ##
51
+ # Returns models from the models.dev catalog for the opencode provider.
52
+ # @return [Brute::Providers::ModelsDev]
53
+ def models
54
+ Brute::Providers::ModelsDev.new(provider: self, provider_id: "opencode")
55
+ end
56
+
57
+ # -- Unsupported sub-APIs --
58
+
59
+ def responses = raise(NotImplementedError, "Use chat completions via the Zen gateway")
60
+ def images = raise(NotImplementedError, "Not supported via Zen gateway")
61
+ def audio = raise(NotImplementedError, "Not supported via Zen gateway")
62
+ def files = raise(NotImplementedError, "Not supported via Zen gateway")
63
+ def moderations = raise(NotImplementedError, "Not supported via Zen gateway")
64
+ def vector_stores = raise(NotImplementedError, "Not supported via Zen gateway")
65
+
66
+ private
67
+
68
+ def completions_path
69
+ "/zen/v1/chat/completions"
70
+ end
71
+
72
+ def headers
73
+ lock do
74
+ (@headers || {}).merge(
75
+ "Content-Type" => "application/json",
76
+ "Authorization" => "Bearer #{@key}",
77
+ "x-opencode-client" => "brute"
78
+ )
79
+ end
80
+ end
81
+ end
82
+ end
@@ -0,0 +1,108 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "shellwords"
4
+
5
+ module Brute
6
+ module Providers
7
+ # A pseudo-LLM provider that executes user input as code via the
8
+ # existing Brute::Tools::Shell tool.
9
+ #
10
+ # Models correspond to interpreters:
11
+ #
12
+ # bash - pass-through (default)
13
+ # ruby - ruby -e '...'
14
+ # python - python3 -c '...'
15
+ # nix - nix eval --expr '...'
16
+ #
17
+ # The provider's #complete method returns a synthetic response
18
+ # containing a single "shell" tool call. The orchestrator executes
19
+ # it through the normal pipeline — all middleware (message tracking,
20
+ # session persistence, token tracking, etc.) fires as usual.
21
+ #
22
+ class Shell
23
+ MODELS = %w[bash ruby python nix].freeze
24
+
25
+ INTERPRETERS = {
26
+ "bash" => ->(cmd) { cmd },
27
+ "ruby" => ->(cmd) { "ruby -e #{Shellwords.escape(cmd)}" },
28
+ "python" => ->(cmd) { "python3 -c #{Shellwords.escape(cmd)}" },
29
+ "nix" => ->(cmd) { "nix eval --expr #{Shellwords.escape(cmd)}" },
30
+ }.freeze
31
+
32
+ # ── LLM::Provider duck-type interface ──────────────────────────
33
+
34
+ def name = :shell
35
+ def default_model = "bash"
36
+ def user_role = :user
37
+ def tool_role = :tool
38
+ def assistant_role = :assistant
39
+ def system_role = :system
40
+ def tracer = LLM::Tracer::Null.new(self)
41
+
42
+ def complete(prompt, params = {})
43
+ model = params[:model]&.to_s || default_model
44
+ text = extract_text(prompt)
45
+ tools = params[:tools] || []
46
+
47
+ # nil text means we received tool results (second call) —
48
+ # return an empty assistant response so the orchestrator exits.
49
+ return ShellResponse.new(model: model, tools: tools) if text.nil?
50
+
51
+ wrap = INTERPRETERS.fetch(model, INTERPRETERS["bash"])
52
+ command = wrap.call(text)
53
+
54
+ ShellResponse.new(command: command, model: model, tools: tools)
55
+ end
56
+
57
+ # For the REPL model picker: provider.models.all.select(&:chat?)
58
+ def models
59
+ ModelList.new(MODELS)
60
+ end
61
+
62
+ # ── Internals ──────────────────────────────────────────────────
63
+
64
+ private
65
+
66
+ # Extract the user's text from whatever prompt format ctx.talk sends.
67
+ # Returns nil when the prompt contains tool results (the second
68
+ # round-trip) so #complete knows to return an empty response.
69
+ def extract_text(prompt)
70
+ case prompt
71
+ when String
72
+ prompt
73
+ when ::Array
74
+ return nil if prompt.any? { |p| LLM::Function::Return === p }
75
+
76
+ user_msg = prompt.reverse_each.find { |m| m.respond_to?(:role) && m.role.to_s == "user" }
77
+ user_msg&.content.to_s
78
+ else
79
+ if prompt.respond_to?(:to_a)
80
+ msgs = prompt.to_a
81
+ return nil if msgs.any? { |m| m.respond_to?(:content) && LLM::Function::Return === m.content }
82
+
83
+ user_msg = msgs.reverse_each.find { |m| m.respond_to?(:role) && m.role.to_s == "user" }
84
+ user_msg&.content.to_s
85
+ else
86
+ prompt.to_s
87
+ end
88
+ end
89
+ end
90
+
91
+ # ── ModelList ──────────────────────────────────────────────────
92
+
93
+ # Minimal object that quacks like provider.models so the REPL's
94
+ # fetch_models can call provider.models.all.select(&:chat?).
95
+ class ModelList
96
+ ModelEntry = Struct.new(:id, :chat?, keyword_init: true)
97
+
98
+ def initialize(names)
99
+ @entries = names.map { |n| ModelEntry.new(id: n, chat?: true) }
100
+ end
101
+
102
+ def all
103
+ @entries
104
+ end
105
+ end
106
+ end
107
+ end
108
+ end
@@ -0,0 +1,100 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "securerandom"
4
+
5
+ module Brute
6
+ module Providers
7
+ # Synthetic completion response returned by Brute::Providers::Shell.
8
+ #
9
+ # When +command+ is present, the response contains a single assistant
10
+ # message with a "shell" tool call. The orchestrator picks it up and
11
+ # executes Brute::Tools::Shell through the normal pipeline.
12
+ #
13
+ # When +command+ is nil (tool results round-trip), the response
14
+ # contains an empty assistant message with no tool calls, causing
15
+ # the orchestrator loop to exit.
16
+ #
17
+ class ShellResponse
18
+ def initialize(command: nil, model: "bash", tools: [])
19
+ @command = command
20
+ @model_name = model
21
+ @tools = tools || []
22
+ end
23
+
24
+ def messages
25
+ return [empty_assistant] if @command.nil?
26
+
27
+ call_id = "shell_#{SecureRandom.hex(8)}"
28
+ tool_call = LLM::Object.from(
29
+ id: call_id,
30
+ name: "shell",
31
+ arguments: { "command" => @command },
32
+ )
33
+ original = [{
34
+ "type" => "tool_use",
35
+ "id" => call_id,
36
+ "name" => "shell",
37
+ "input" => { "command" => @command },
38
+ }]
39
+
40
+ [LLM::Message.new(:assistant, "", {
41
+ tool_calls: [tool_call],
42
+ original_tool_calls: original,
43
+ tools: @tools,
44
+ })]
45
+ end
46
+ alias_method :choices, :messages
47
+
48
+ def model
49
+ @model_name
50
+ end
51
+
52
+ def input_tokens
53
+ 0
54
+ end
55
+
56
+ def output_tokens
57
+ 0
58
+ end
59
+
60
+ def reasoning_tokens
61
+ 0
62
+ end
63
+
64
+ def total_tokens
65
+ 0
66
+ end
67
+
68
+ def content
69
+ messages.find(&:assistant?)&.content
70
+ end
71
+
72
+ def content!
73
+ LLM.json.load(content)
74
+ end
75
+
76
+ def reasoning_content
77
+ nil
78
+ end
79
+
80
+ def usage
81
+ LLM::Usage.new(
82
+ input_tokens: 0,
83
+ output_tokens: 0,
84
+ reasoning_tokens: 0,
85
+ total_tokens: 0,
86
+ )
87
+ end
88
+
89
+ # Contract must be included AFTER method definitions —
90
+ # LLM::Contract checks that all required methods exist at include time.
91
+ include LLM::Contract::Completion
92
+
93
+ private
94
+
95
+ def empty_assistant
96
+ LLM::Message.new(:assistant, "")
97
+ end
98
+ end
99
+ end
100
+ end
@@ -26,9 +26,17 @@ module Brute
26
26
  # prepare-time, then appends conditional sections based on runtime state.
27
27
  def self.default
28
28
  build do |prompt, ctx|
29
- # Provider-specific base stack
29
+ # Provider-specific base stack.
30
+ # For gateway providers (opencode_zen, opencode_go), infer the
31
+ # upstream model family from the model name so we use the most
32
+ # appropriate prompt stack (e.g., anthropic stack for claude-*).
30
33
  provider = ctx[:provider_name].to_s
31
- STACKS.fetch(provider, STACKS["default"]).each do |mod|
34
+ stack_key = if provider.start_with?("opencode")
35
+ infer_stack_from_model(ctx[:model_name].to_s)
36
+ else
37
+ provider
38
+ end
39
+ STACKS.fetch(stack_key, STACKS["default"]).each do |mod|
32
40
  prompt << mod.call(ctx)
33
41
  end
34
42
 
@@ -114,6 +122,21 @@ module Brute
114
122
  ],
115
123
  }.freeze
116
124
 
125
+ # Infer the best prompt stack from a model name.
126
+ # Used for gateway providers that route to multiple upstream model families.
127
+ def self.infer_stack_from_model(model_name)
128
+ case model_name
129
+ when /\bclaude\b/i, /\bbig.?pickle\b/i
130
+ "anthropic"
131
+ when /\bgpt\b/i, /\bo[134]\b/i, /\bcodex\b/i
132
+ "openai"
133
+ when /\bgemini\b/i, /\bgemma\b/i
134
+ "google"
135
+ else
136
+ "default"
137
+ end
138
+ end
139
+
117
140
  def initialize(block)
118
141
  @block = block
119
142
  end
@@ -28,7 +28,31 @@ module Brute
28
28
  rounds += 1
29
29
  end
30
30
 
31
- {result: res.content}
31
+ {result: extract_content(res, sub)}
32
+ end
33
+
34
+ private
35
+
36
+ # Safely extract text content from the sub-agent response.
37
+ #
38
+ # When the LLM returns only tool calls (no text content block),
39
+ # res.content raises NoMethodError because the response adapter's
40
+ # choices array is empty (it only maps over text blocks), or
41
+ # returns nil when the response has no text. Fall back to the
42
+ # last assistant text in the conversation history.
43
+ def extract_content(res, context)
44
+ text = begin
45
+ res.content
46
+ rescue NoMethodError
47
+ nil
48
+ end
49
+ return text if text.is_a?(::String) && !text.empty?
50
+
51
+ last_assistant = context.messages.to_a
52
+ .select(&:assistant?)
53
+ .reverse
54
+ .find { |m| m.content.is_a?(::String) && !m.content.empty? }
55
+ last_assistant&.content || "(sub-agent completed but produced no text response)"
32
56
  end
33
57
  end
34
58
  end
data/lib/brute/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Brute
4
- VERSION = "0.1.9"
4
+ VERSION = "0.2.0"
5
5
  end
data/lib/brute.rb CHANGED
@@ -90,6 +90,13 @@ require_relative 'brute/tools/todo_read'
90
90
  require_relative 'brute/tools/delegate'
91
91
  require_relative 'brute/tools/question'
92
92
 
93
+ # Providers
94
+ require_relative 'brute/providers/shell_response'
95
+ require_relative 'brute/providers/shell'
96
+ require_relative 'brute/providers/models_dev'
97
+ require_relative 'brute/providers/opencode_zen'
98
+ require_relative 'brute/providers/opencode_go'
99
+
93
100
  # Orchestrator (depends on tools, middleware, and infrastructure)
94
101
  require_relative 'brute/orchestrator'
95
102
 
@@ -139,10 +146,14 @@ module Brute
139
146
  'google' => ->(key) { LLM.google(key: key) },
140
147
  'deepseek' => ->(key) { LLM.deepseek(key: key) },
141
148
  'ollama' => ->(key) { LLM.ollama(key: key) },
142
- 'xai' => ->(key) { LLM.xai(key: key) }
149
+ 'xai' => ->(key) { LLM.xai(key: key) },
150
+ 'opencode_zen' => ->(key) { LLM::OpencodeZen.new(key: key) },
151
+ 'opencode_go' => ->(key) { LLM::OpencodeGo.new(key: key) },
152
+ 'shell' => ->(_key) { Providers::Shell.new },
143
153
  }.freeze
144
154
 
145
155
  # List provider names that have API keys configured in the environment.
156
+ # The shell provider is always available (no key needed).
146
157
  def self.configured_providers
147
158
  PROVIDERS.keys.select { |name| api_key_for(name) }
148
159
  end
@@ -161,6 +172,14 @@ module Brute
161
172
 
162
173
  # Look up the API key for a given provider name.
163
174
  def self.api_key_for(name)
175
+ # Shell provider needs no key.
176
+ return "none" if name == "shell"
177
+
178
+ # OpenCode providers: check OPENCODE_API_KEY, fall back to "public" for anonymous access.
179
+ if name == "opencode_zen" || name == "opencode_go"
180
+ return ENV["OPENCODE_API_KEY"] || "public"
181
+ end
182
+
164
183
  # Explicit generic key always works
165
184
  return ENV["LLM_API_KEY"] if ENV["LLM_API_KEY"]
166
185
 
@@ -178,6 +197,7 @@ module Brute
178
197
  # 2. ANTHROPIC_API_KEY (implicit: provider = anthropic)
179
198
  # 3. OPENAI_API_KEY (implicit: provider = openai)
180
199
  # 4. GOOGLE_API_KEY (implicit: provider = google)
200
+ # 5. OPENCODE_API_KEY (implicit: provider = opencode_zen)
181
201
  #
182
202
  # Returns nil if no key is found. Error is deferred to Orchestrator#run.
183
203
  def self.resolve_provider
@@ -193,6 +213,9 @@ module Brute
193
213
  elsif ENV['GOOGLE_API_KEY']
194
214
  key = ENV['GOOGLE_API_KEY']
195
215
  name = 'google'
216
+ elsif ENV['OPENCODE_API_KEY']
217
+ key = ENV['OPENCODE_API_KEY']
218
+ name = 'opencode_zen'
196
219
  else
197
220
  return nil
198
221
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: brute
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.9
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Brute Contributors
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 1980-01-01 00:00:00.000000000 Z
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: async
@@ -155,6 +155,11 @@ files:
155
155
  - lib/brute/prompts/text/tool_usage/google.txt
156
156
  - lib/brute/prompts/tone_and_style.rb
157
157
  - lib/brute/prompts/tool_usage.rb
158
+ - lib/brute/providers/models_dev.rb
159
+ - lib/brute/providers/opencode_go.rb
160
+ - lib/brute/providers/opencode_zen.rb
161
+ - lib/brute/providers/shell.rb
162
+ - lib/brute/providers/shell_response.rb
158
163
  - lib/brute/session.rb
159
164
  - lib/brute/skill.rb
160
165
  - lib/brute/snapshot_store.rb