jrubyagents 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,240 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubyagents
4
+ ActionStep = Data.define(:step_number, :thought, :code, :tool_calls, :observation, :error, :duration, :token_usage) do
5
+ def initialize(step_number:, thought:, code: nil, tool_calls: nil, observation: nil, error: nil,
6
+ duration: 0.0, token_usage: nil)
7
+ super
8
+ end
9
+
10
+ def to_h
11
+ h = { type: "action", step_number: step_number, thought: thought, duration: duration }
12
+ h[:code] = code if code
13
+ h[:tool_calls] = tool_calls.map(&:to_h) if tool_calls
14
+ h[:observation] = observation if observation
15
+ h[:error] = error if error
16
+ h[:token_usage] = token_usage.to_h if token_usage
17
+ h
18
+ end
19
+ end
20
+
21
+ PlanningStep = Data.define(:plan, :duration, :token_usage) do
22
+ def to_h
23
+ h = { type: "planning", plan: plan, duration: duration }
24
+ h[:token_usage] = token_usage.to_h if token_usage
25
+ h
26
+ end
27
+ end
28
+
29
+ UserMessage = Data.define(:content) do
30
+ def to_h
31
+ { type: "user_message", content: content }
32
+ end
33
+ end
34
+
35
+ class Memory
36
+ attr_reader :system_prompt, :task, :steps, :total_tokens, :total_duration
37
+
38
+ def initialize(system_prompt:, task:)
39
+ @system_prompt = system_prompt
40
+ @task = task
41
+ @steps = []
42
+ @total_tokens = TokenUsage.new(input_tokens: 0, output_tokens: 0)
43
+ @total_duration = 0.0
44
+ end
45
+
46
+ def add_step(thought:, code: nil, tool_calls: nil, observation: nil, error: nil,
47
+ duration: 0.0, token_usage: nil)
48
+ step = ActionStep.new(
49
+ step_number: action_steps.size + 1,
50
+ thought: thought,
51
+ code: code,
52
+ tool_calls: tool_calls,
53
+ observation: observation,
54
+ error: error,
55
+ duration: duration,
56
+ token_usage: token_usage
57
+ )
58
+ record_step(step, duration, token_usage)
59
+ end
60
+
61
+ def add_plan(plan:, duration: 0.0, token_usage: nil)
62
+ step = PlanningStep.new(plan: plan, duration: duration, token_usage: token_usage)
63
+ record_step(step, duration, token_usage)
64
+ end
65
+
66
+ def add_user_message(message)
67
+ @steps << UserMessage.new(content: message)
68
+ end
69
+
70
+ def action_steps
71
+ @steps.select { |s| s.is_a?(ActionStep) }
72
+ end
73
+
74
+ def progress_summary
75
+ completed = action_steps
76
+ return "No steps completed yet." if completed.empty?
77
+
78
+ lines = ["Steps completed so far:"]
79
+ completed.each do |step|
80
+ status = step.error ? "failed" : "done"
81
+ summary = step.thought || step.observation || "no details"
82
+ lines << " #{step.step_number}. [#{status}] #{summary.to_s[0, 100]}"
83
+ end
84
+ lines.join("\n")
85
+ end
86
+
87
+ def to_messages
88
+ messages = [
89
+ { role: "system", content: system_prompt },
90
+ { role: "user", content: task }
91
+ ]
92
+
93
+ steps.each do |step|
94
+ case step
95
+ when UserMessage
96
+ messages << { role: "user", content: step.content }
97
+ when PlanningStep
98
+ messages << { role: "assistant", content: "Plan:\n#{step.plan}" }
99
+ messages << { role: "user", content: "Now proceed and carry out this plan." }
100
+ when ActionStep
101
+ assistant_msg = build_assistant_message(step)
102
+ messages << assistant_msg if assistant_msg
103
+
104
+ if step.observation
105
+ messages << { role: "user", content: "Observation: #{step.observation}" }
106
+ elsif step.error
107
+ messages << {
108
+ role: "user",
109
+ content: "Error: #{step.error}\nNow let's retry: take care not to repeat previous errors! " \
110
+ "If you have retried several times, try a completely different approach."
111
+ }
112
+ end
113
+ end
114
+ end
115
+
116
+ messages.each { |m| m[:content] = sanitize_utf8(m[:content]) if m[:content] }
117
+ end
118
+
119
+ def last_step
120
+ @steps.last
121
+ end
122
+
123
+ def return_full_code
124
+ action_steps.filter_map(&:code).join("\n\n")
125
+ end
126
+
127
+ def to_h
128
+ {
129
+ system_prompt: system_prompt,
130
+ task: task,
131
+ steps: steps.map(&:to_h),
132
+ total_tokens: total_tokens.to_h,
133
+ total_duration: total_duration
134
+ }
135
+ end
136
+
137
+ def to_json(*args)
138
+ require "json"
139
+ to_h.to_json(*args)
140
+ end
141
+
142
+ def replay(io: $stdout)
143
+ io.puts UI::Styles.final_answer.render("Task: ") + task.to_s
144
+ io.puts
145
+
146
+ steps.each do |step|
147
+ case step
148
+ when ActionStep
149
+ replay_action_step(step, io)
150
+ when PlanningStep
151
+ io.puts UI::Styles.plan_label.render(" Plan ")
152
+ io.puts UI::Styles.plan_box.render(step.plan)
153
+ replay_metrics(step, io)
154
+ when UserMessage
155
+ io.puts UI::Styles.label.render("User: ") + step.content.to_s
156
+ io.puts
157
+ end
158
+ end
159
+
160
+ parts = ["#{action_steps.size} steps", format("%.1fs total", total_duration)]
161
+ parts << total_tokens.to_s if total_tokens.total_tokens > 0
162
+ io.puts UI::Styles.dim.render(parts.join(" | "))
163
+ end
164
+
165
+ private
166
+
167
+ def replay_action_step(step, io)
168
+ if step.thought
169
+ io.puts UI::Styles.label.render("Thought: ") + step.thought
170
+ end
171
+
172
+ if step.code
173
+ io.puts
174
+ highlighted = rouge_formatter.format(rouge_lexer.lex(step.code))
175
+ highlighted.each_line { |line| io.puts " #{line.rstrip}" }
176
+ io.puts
177
+ end
178
+
179
+ if step.tool_calls
180
+ step.tool_calls.each do |tc|
181
+ args = tc.function.arguments.map { |k, v| "#{k}: #{v.inspect}" }.join(", ")
182
+ io.puts UI::Styles.label.render("Tool: ") + "#{tc.function.name}(#{args})"
183
+ end
184
+ end
185
+
186
+ if step.observation
187
+ io.puts UI::Styles.label.render("Result: ") + step.observation.to_s[0, 200]
188
+ end
189
+
190
+ if step.error
191
+ io.puts UI::Styles.error.render("Error: ") + step.error
192
+ end
193
+
194
+ replay_metrics(step, io)
195
+ end
196
+
197
+ def replay_metrics(step, io)
198
+ parts = []
199
+ parts << format("%.1fs", step.duration) if step.duration > 0
200
+ parts << step.token_usage.to_s if step.token_usage
201
+ io.puts UI::Styles.dim.render(parts.join(" | ")) unless parts.empty?
202
+ io.puts
203
+ end
204
+
205
+ def rouge_lexer
206
+ @rouge_lexer ||= Rouge::Lexers::Ruby.new
207
+ end
208
+
209
+ def rouge_formatter
210
+ @rouge_formatter ||= Rouge::Formatters::Terminal256.new(Rouge::Themes::Monokai.new)
211
+ end
212
+
213
+ def build_assistant_message(step)
214
+ if step.tool_calls
215
+ # For tool calling agents: include content and tool_calls in message
216
+ msg = { role: "assistant" }
217
+ msg[:content] = step.thought if step.thought
218
+ msg[:tool_calls] = step.tool_calls
219
+ msg
220
+ elsif step.thought || step.code
221
+ assistant_content = +""
222
+ assistant_content << "Thought: #{step.thought}\n" if step.thought
223
+ assistant_content << "Code:\n```ruby\n#{step.code}\n```\n" if step.code
224
+ { role: "assistant", content: assistant_content } unless assistant_content.empty?
225
+ end
226
+ end
227
+
228
+ def record_step(step, duration, token_usage)
229
+ @steps << step
230
+ @total_duration += duration if duration
231
+ @total_tokens = @total_tokens + token_usage if token_usage
232
+ step
233
+ end
234
+
235
+ def sanitize_utf8(str)
236
+ return str unless str.is_a?(String)
237
+ str.encode("UTF-8", invalid: :replace, undef: :replace, replace: "")
238
+ end
239
+ end
240
+ end
@@ -0,0 +1,99 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubyagents
4
+ TokenUsage = Data.define(:input_tokens, :output_tokens) do
5
+ def total_tokens = input_tokens + output_tokens
6
+
7
+ def +(other)
8
+ TokenUsage.new(
9
+ input_tokens: input_tokens + other.input_tokens,
10
+ output_tokens: output_tokens + other.output_tokens
11
+ )
12
+ end
13
+
14
+ def to_s
15
+ "#{total_tokens} tokens (#{input_tokens} in / #{output_tokens} out)"
16
+ end
17
+
18
+ def to_h
19
+ { input_tokens: input_tokens, output_tokens: output_tokens }
20
+ end
21
+ end
22
+
23
+ ToolCallFunction = Data.define(:name, :arguments) do
24
+ def to_h
25
+ { name: name, arguments: arguments }
26
+ end
27
+ end
28
+
29
+ ToolCall = Data.define(:id, :function) do
30
+ def to_h
31
+ { id: id, function: function.to_h }
32
+ end
33
+ end
34
+
35
+ ChatMessage = Data.define(:role, :content, :token_usage, :tool_calls) do
36
+ def initialize(role:, content:, token_usage: nil, tool_calls: nil)
37
+ super
38
+ end
39
+ end
40
+
41
+ RunResult = Data.define(:output, :state, :steps, :token_usage, :timing) do
42
+ def initialize(output:, state:, steps: [], token_usage: nil, timing: nil)
43
+ super
44
+ end
45
+
46
+ def success? = state == "success"
47
+
48
+ def to_h
49
+ {
50
+ output: output,
51
+ state: state,
52
+ steps: steps.map(&:to_h),
53
+ token_usage: token_usage&.to_h,
54
+ timing: timing
55
+ }
56
+ end
57
+
58
+ def to_json(*args)
59
+ require "json"
60
+ to_h.to_json(*args)
61
+ end
62
+ end
63
+
64
+ class Model
65
+ @registry = {}
66
+
67
+ class << self
68
+ attr_reader :registry
69
+
70
+ def register(prefix, klass)
71
+ @registry[prefix] = klass
72
+ end
73
+
74
+ def for(model_id)
75
+ if model_id.include?("/")
76
+ prefix, model_name = model_id.split("/", 2)
77
+ adapter_class = @registry[prefix]
78
+ if adapter_class
79
+ adapter_class.new(model_name)
80
+ else
81
+ Models::RubyLLMAdapter.new(model_name, provider: prefix)
82
+ end
83
+ else
84
+ Models::RubyLLMAdapter.new(model_id)
85
+ end
86
+ end
87
+ end
88
+
89
+ attr_reader :model_name
90
+
91
+ def initialize(model_name)
92
+ @model_name = model_name
93
+ end
94
+
95
+ def generate(messages, tools: nil, &on_stream)
96
+ raise NotImplementedError, "#{self.class} must implement #generate"
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,158 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubyagents
4
+ module Models
5
+ class RubyLLMAdapter < Model
6
+ attr_reader :provider
7
+
8
+ def initialize(model_name, provider: nil)
9
+ super(model_name)
10
+ @provider = provider&.to_sym
11
+ end
12
+
13
+ def generate(messages, tools: nil, &on_stream)
14
+ require "ruby_llm"
15
+ self.class.configure_ruby_llm
16
+
17
+ chat = build_chat
18
+ load_messages(chat, messages)
19
+ load_tools(chat, tools) if tools&.any?
20
+
21
+ response = if on_stream
22
+ chat.complete { |chunk| on_stream.call(chunk.content) if chunk.content }
23
+ else
24
+ chat.complete
25
+ end
26
+
27
+ extract_response(chat, response)
28
+ rescue RubyLLM::ConfigurationError => e
29
+ raise Error, "Model configuration error: #{e.message}. Set the appropriate API key env var."
30
+ rescue RubyLLM::ModelNotFoundError => e
31
+ raise Error, "Unknown model '#{model_name}'. Check the model name or set a provider prefix (e.g. 'openai/#{model_name}')."
32
+ rescue RubyLLM::UnauthorizedError => e
33
+ raise Error, "API authentication failed: #{e.message}. Check your API key."
34
+ rescue RubyLLM::Error => e
35
+ raise Error, "LLM API error: #{e.message}"
36
+ end
37
+
38
+ def self.configure_ruby_llm
39
+ return if @configured
40
+
41
+ RubyLLM.configure do |config|
42
+ config.openai_api_key = ENV["OPENAI_API_KEY"] if ENV["OPENAI_API_KEY"]
43
+ config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"] if ENV["ANTHROPIC_API_KEY"]
44
+ config.gemini_api_key = ENV["GEMINI_API_KEY"] if ENV["GEMINI_API_KEY"]
45
+ config.deepseek_api_key = ENV["DEEPSEEK_API_KEY"] if ENV["DEEPSEEK_API_KEY"]
46
+ config.openrouter_api_key = ENV["OPENROUTER_API_KEY"] if ENV["OPENROUTER_API_KEY"]
47
+ config.ollama_api_base = ENV["OLLAMA_HOST"] if ENV["OLLAMA_HOST"]
48
+ end
49
+
50
+ @configured = true
51
+ end
52
+
53
+ private
54
+
55
+ def build_chat
56
+ RubyLLM::Chat.new(model: model_name, provider: @provider)
57
+ end
58
+
59
+ def load_messages(chat, messages)
60
+ messages.each do |msg|
61
+ role = msg[:role]
62
+ content = msg[:content]
63
+
64
+ case role
65
+ when "system"
66
+ chat.with_instructions(content)
67
+ when "assistant"
68
+ attrs = { role: :assistant, content: content }
69
+ if msg[:tool_calls]
70
+ attrs[:tool_calls] = convert_tool_calls_to_ruby_llm(msg[:tool_calls])
71
+ end
72
+ chat.add_message(attrs)
73
+ else
74
+ chat.add_message(role: role.to_sym, content: content || "")
75
+ end
76
+ end
77
+ end
78
+
79
+ def load_tools(chat, tool_schemas)
80
+ tool_schemas.each { |schema| chat.with_tool(build_stub_tool(schema)) }
81
+ end
82
+
83
+ def build_stub_tool(schema)
84
+ tool_name = schema[:name]
85
+ tool_desc = schema[:description]
86
+ params = schema[:parameters] || {}
87
+ properties = params[:properties] || params["properties"] || {}
88
+ required_list = (params[:required] || params["required"] || []).map(&:to_s)
89
+
90
+ klass = Class.new(RubyLLM::Tool) do
91
+ description tool_desc
92
+
93
+ properties.each do |pname, pschema|
94
+ ptype = (pschema[:type] || pschema["type"] || "string").to_s
95
+ pdesc = pschema[:description] || pschema["description"]
96
+ is_req = required_list.include?(pname.to_s)
97
+ param pname.to_sym, type: ptype, desc: pdesc, required: is_req
98
+ end
99
+
100
+ define_method(:execute) { |**_kwargs| halt("tool_called") }
101
+ end
102
+
103
+ instance = klass.new
104
+ instance.define_singleton_method(:name) { tool_name }
105
+ instance
106
+ end
107
+
108
+ def extract_response(chat, response)
109
+ if defined?(RubyLLM::Tool::Halt) && response.is_a?(RubyLLM::Tool::Halt)
110
+ # Tool calls halted the loop; find the assistant message with tool_calls
111
+ message = chat.messages.reverse.find { |m| m.role == :assistant && m.tool_call? }
112
+ build_chat_message(message)
113
+ else
114
+ build_chat_message(response)
115
+ end
116
+ end
117
+
118
+ def build_chat_message(message)
119
+ ChatMessage.new(
120
+ role: "assistant",
121
+ content: message.content,
122
+ token_usage: extract_token_usage(message),
123
+ tool_calls: extract_tool_calls(message)
124
+ )
125
+ end
126
+
127
+ def extract_token_usage(message)
128
+ input = message.input_tokens
129
+ output = message.output_tokens
130
+ return nil unless input || output
131
+ TokenUsage.new(input_tokens: input || 0, output_tokens: output || 0)
132
+ end
133
+
134
+ def extract_tool_calls(message)
135
+ return nil unless message.tool_call?
136
+
137
+ message.tool_calls.map do |_id, tc|
138
+ ToolCall.new(
139
+ id: tc.id,
140
+ function: ToolCallFunction.new(name: tc.name, arguments: tc.arguments || {})
141
+ )
142
+ end
143
+ end
144
+
145
+ def convert_tool_calls_to_ruby_llm(tool_calls)
146
+ return nil unless tool_calls&.any?
147
+
148
+ tool_calls.each_with_object({}) do |tc, hash|
149
+ hash[tc.id] = RubyLLM::ToolCall.new(
150
+ id: tc.id,
151
+ name: tc.function.name,
152
+ arguments: tc.function.arguments || {}
153
+ )
154
+ end
155
+ end
156
+ end
157
+ end
158
+ end
@@ -0,0 +1,123 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Rubyagents
4
+ PromptTemplates = Data.define(:system_prompt, :planning_initial, :planning_update) do
5
+ def initialize(system_prompt: nil, planning_initial: nil, planning_update: nil) = super
6
+ end
7
+
8
+ module Prompt
9
+ CODE_AGENT_SYSTEM = <<~'PROMPT'
10
+ You are an expert Ruby programmer and problem solver. You solve tasks by writing and executing Ruby code.
11
+
12
+ On each step, you will write a short Thought, then write Ruby code to make progress on the task.
13
+
14
+ ## Rules
15
+ - Always provide your reasoning in "Thought:" before writing code
16
+ - Write Ruby code inside a ```ruby code block
17
+ - Your code has access to these tools as methods: {{tool_descriptions}}
18
+ - Variables persist between steps - you can build on previous results
19
+ - Use puts to print intermediate values for debugging
20
+ - Keep code simple and direct
21
+
22
+ ## Available Ruby libraries
23
+ Your code runs in a full Ruby environment. You can `require` and use:
24
+ - Ruby standard library: net/http, uri, json, csv, fileutils, open-uri, date, time, set, etc.
25
+ - Any gems installed in the current environment (use the list_gems tool to see them)
26
+ Use these freely -- e.g. `require "net/http"` to fetch URLs, `require "json"` to parse JSON, etc.
27
+
28
+ ## CRITICAL: final_answer rules
29
+ - Call final_answer(answer: "...") ONLY when you have the actual, complete answer
30
+ - NEVER call final_answer in the same step where you gather data with tools
31
+ - First step: gather information. Next step: process it. Final step: call final_answer with the result.
32
+ - If a tool returns data you haven't read yet, do NOT call final_answer - wait for the next step
33
+
34
+ ## Response format
35
+
36
+ Thought: <your reasoning about what to do next>
37
+ Code:
38
+ ```ruby
39
+ <your Ruby code here>
40
+ ```
41
+
42
+ ## Example
43
+
44
+ Task: What is the 10th Fibonacci number?
45
+
46
+ Thought: I'll write a simple iterative Fibonacci computation.
47
+ Code:
48
+ ```ruby
49
+ a, b = 0, 1
50
+ 8.times { a, b = b, a + b }
51
+ final_answer(answer: "The 10th Fibonacci number is #{b}")
52
+ ```
53
+
54
+ Now solve the following task. Think step by step and write Ruby code to find the answer.
55
+ PROMPT
56
+
57
+ TOOL_CALLING_AGENT_SYSTEM = <<~'PROMPT'
58
+ You are an expert problem solver. You solve tasks by calling the available tools.
59
+
60
+ On each step, think about what to do next, then call one or more tools to make progress.
61
+
62
+ ## Rules
63
+ - Think step by step about the problem
64
+ - Use the available tools to gather information and solve the task
65
+ - Variables do NOT persist between steps - each tool call is independent
66
+ - When you have the final answer, call the final_answer tool with your result
67
+
68
+ ## CRITICAL: final_answer rules
69
+ - Call the final_answer tool ONLY when you have the actual, complete answer
70
+ - NEVER call final_answer in the same step where you gather data with other tools
71
+ - First step: gather information. Next step: process it. Final step: call final_answer with the result.
72
+
73
+ ## Available tools
74
+ {{tool_descriptions}}
75
+
76
+ Now solve the following task. Think step by step and use tools to find the answer.
77
+ PROMPT
78
+
79
+ INITIAL_PLAN = <<~'PROMPT'
80
+ You are a planning assistant. Based on the task, create a step-by-step plan.
81
+
82
+ Write a concise numbered plan (3-7 steps) for how to complete the task.
83
+ Focus on what needs to be done to solve the problem.
84
+
85
+ Respond with just the plan, no code.
86
+ PROMPT
87
+
88
+ UPDATE_PLAN = <<~'PROMPT'
89
+ You are a planning assistant. Based on the task and the work done so far, update the plan.
90
+
91
+ ## Progress so far
92
+ {{progress_summary}}
93
+
94
+ Write a concise numbered plan (3-7 steps) for how to complete the remaining work.
95
+ Focus on what still needs to be done, not what's already been accomplished.
96
+
97
+ Respond with just the plan, no code.
98
+ PROMPT
99
+
100
+ def self.code_agent_system(tools:)
101
+ tool_descriptions = tools.map { |t| t.class.to_prompt }.join("\n\n")
102
+ CODE_AGENT_SYSTEM.gsub("{{tool_descriptions}}", tool_descriptions)
103
+ end
104
+
105
+ def self.tool_calling_agent_system(tools:)
106
+ tool_descriptions = tools.map { |t| t.class.to_prompt }.join("\n\n")
107
+ TOOL_CALLING_AGENT_SYSTEM.gsub("{{tool_descriptions}}", tool_descriptions)
108
+ end
109
+
110
+ def self.initial_plan
111
+ INITIAL_PLAN
112
+ end
113
+
114
+ def self.update_plan(progress_summary:)
115
+ UPDATE_PLAN.gsub("{{progress_summary}}", progress_summary)
116
+ end
117
+
118
+ # Backward compatibility
119
+ def self.planning
120
+ INITIAL_PLAN
121
+ end
122
+ end
123
+ end