ollama-client 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,266 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example: Tool Calling Pattern (as documented in README)
5
+ # Demonstrates the correct architecture: LLM outputs intent, agent executes tools
6
+ # This matches the pattern shown in README.md lines 430-500
7
+
8
+ require "json"
9
+ require_relative "../lib/ollama_client"
10
+
11
+ # Tool Registry - stores available tools
12
+ class ToolRegistry
13
+ def initialize
14
+ @tools = {}
15
+ end
16
+
17
+ def register(name, tool)
18
+ @tools[name] = tool
19
+ end
20
+
21
+ def fetch(name)
22
+ @tools.fetch(name) { raise "Tool '#{name}' not found. Available: #{@tools.keys.join(', ')}" }
23
+ end
24
+
25
+ def available
26
+ @tools.keys
27
+ end
28
+ end
29
+
30
+ # Base Tool class
31
+ class Tool
32
+ attr_reader :name, :description
33
+
34
+ def initialize(name:, description:)
35
+ @name = name
36
+ @description = description
37
+ end
38
+
39
+ def call(input:, context:)
40
+ raise NotImplementedError, "Subclass must implement #call"
41
+ end
42
+ end
43
+
44
+ # Example Tools
45
+ class SearchTool < Tool
46
+ def initialize
47
+ super(name: "search", description: "Search for information")
48
+ end
49
+
50
+ def call(input:, context: nil) # rubocop:disable Lint/UnusedMethodArgument
51
+ query = input["query"] || "default"
52
+ # In real code, this would call your search API
53
+ {
54
+ query: query,
55
+ results: [
56
+ "Result 1 for: #{query}",
57
+ "Result 2 for: #{query}",
58
+ "Result 3 for: #{query}"
59
+ ],
60
+ count: 3
61
+ }
62
+ end
63
+ end
64
+
65
+ class CalculateTool < Tool
66
+ def initialize
67
+ super(name: "calculate", description: "Perform calculations")
68
+ end
69
+
70
+ def call(input:, context: nil) # rubocop:disable Lint/UnusedMethodArgument
71
+ operation = input["operation"] || "add"
72
+ a = input["a"] || 0
73
+ b = input["b"] || 0
74
+
75
+ result = case operation
76
+ when "add" then a + b
77
+ when "subtract" then a - b
78
+ when "multiply" then a * b
79
+ when "divide" then b.zero? ? "Error: Division by zero" : a / b
80
+ else "Unknown operation: #{operation}"
81
+ end
82
+
83
+ {
84
+ operation: operation,
85
+ operands: { a: a, b: b },
86
+ result: result
87
+ }
88
+ end
89
+ end
90
+
91
+ class StoreTool < Tool
92
+ def initialize
93
+ super(name: "store", description: "Store data")
94
+ @storage = {}
95
+ end
96
+
97
+ def call(input:, context: nil) # rubocop:disable Lint/UnusedMethodArgument
98
+ key = input["key"] || "default"
99
+ value = input["value"] || {}
100
+ @storage[key] = value
101
+
102
+ {
103
+ key: key,
104
+ stored: true,
105
+ message: "Data stored successfully"
106
+ }
107
+ end
108
+ end
109
+
110
+ # Tool Router - matches README example exactly
111
+ class ToolRouter
112
+ def initialize(llm:, registry:)
113
+ @llm = llm # Ollama::Client instance
114
+ @registry = registry
115
+ end
116
+
117
+ def step(prompt:, context: {})
118
+ # LLM outputs intent (not execution)
119
+ decision = @llm.generate(
120
+ prompt: prompt,
121
+ schema: {
122
+ "type" => "object",
123
+ "required" => ["action"],
124
+ "properties" => {
125
+ "action" => { "type" => "string" },
126
+ "input" => { "type" => "object" }
127
+ }
128
+ }
129
+ )
130
+
131
+ return { done: true } if decision["action"] == "finish"
132
+
133
+ # Agent executes tool (deterministic)
134
+ tool = @registry.fetch(decision["action"])
135
+ output = tool.call(input: decision["input"] || {}, context: context)
136
+
137
+ { tool: tool.name, output: output }
138
+ end
139
+ end
140
+
141
+ # Example usage
142
+ if __FILE__ == $PROGRAM_NAME
143
+ puts "=" * 60
144
+ puts "Tool Calling Pattern Example"
145
+ puts "=" * 60
146
+ puts
147
+
148
+ # Setup
149
+ client = Ollama::Client.new
150
+ registry = ToolRegistry.new
151
+
152
+ # Register tools
153
+ registry.register("search", SearchTool.new)
154
+ registry.register("calculate", CalculateTool.new)
155
+ registry.register("store", StoreTool.new)
156
+
157
+ # Create router
158
+ router = ToolRouter.new(llm: client, registry: registry)
159
+
160
+ puts "Available tools: #{registry.available.join(', ')}"
161
+ puts
162
+
163
+ # Example 1: Search
164
+ puts "─" * 60
165
+ puts "Example 1: Search Tool"
166
+ puts "─" * 60
167
+ begin
168
+ result = router.step(
169
+ prompt: "User wants to search for 'Ruby programming'. Use the search tool with query 'Ruby programming'.",
170
+ context: {}
171
+ )
172
+
173
+ if result[:done]
174
+ puts "✅ Workflow complete"
175
+ else
176
+ puts "Tool: #{result[:tool]}"
177
+ puts "Output: #{JSON.pretty_generate(result[:output])}"
178
+ end
179
+ rescue Ollama::Error => e
180
+ puts "❌ Error: #{e.message}"
181
+ rescue StandardError => e
182
+ puts "❌ Error: #{e.class}: #{e.message}"
183
+ end
184
+
185
+ puts
186
+
187
+ # Example 2: Calculate
188
+ puts "─" * 60
189
+ puts "Example 2: Calculate Tool"
190
+ puts "─" * 60
191
+ begin
192
+ result = router.step(
193
+ prompt: "User wants to calculate 15 * 7. Use the calculate tool with operation 'multiply', a=15, b=7.",
194
+ context: {}
195
+ )
196
+
197
+ if result[:done]
198
+ puts "✅ Workflow complete"
199
+ else
200
+ puts "Tool: #{result[:tool]}"
201
+ puts "Output: #{JSON.pretty_generate(result[:output])}"
202
+ end
203
+ rescue Ollama::Error => e
204
+ puts "❌ Error: #{e.message}"
205
+ rescue StandardError => e
206
+ puts "❌ Error: #{e.class}: #{e.message}"
207
+ end
208
+
209
+ puts
210
+
211
+ # Example 3: Store
212
+ puts "─" * 60
213
+ puts "Example 3: Store Tool"
214
+ puts "─" * 60
215
+ begin
216
+ result = router.step(
217
+ prompt: "User wants to store data with key 'user_preferences' and value {'theme': 'dark'}. Use the store tool.",
218
+ context: {}
219
+ )
220
+
221
+ if result[:done]
222
+ puts "✅ Workflow complete"
223
+ else
224
+ puts "Tool: #{result[:tool]}"
225
+ puts "Output: #{JSON.pretty_generate(result[:output])}"
226
+ end
227
+ rescue Ollama::Error => e
228
+ puts "❌ Error: #{e.message}"
229
+ rescue StandardError => e
230
+ puts "❌ Error: #{e.class}: #{e.message}"
231
+ end
232
+
233
+ puts
234
+
235
+ # Example 4: Finish
236
+ puts "─" * 60
237
+ puts "Example 4: Finish Action"
238
+ puts "─" * 60
239
+ begin
240
+ result = router.step(
241
+ prompt: "The task is complete. Use action 'finish'.",
242
+ context: {}
243
+ )
244
+
245
+ if result[:done]
246
+ puts "✅ Workflow complete"
247
+ else
248
+ puts "Tool: #{result[:tool]}"
249
+ puts "Output: #{JSON.pretty_generate(result[:output])}"
250
+ end
251
+ rescue Ollama::Error => e
252
+ puts "❌ Error: #{e.message}"
253
+ rescue StandardError => e
254
+ puts "❌ Error: #{e.class}: #{e.message}"
255
+ end
256
+
257
+ puts
258
+ puts "=" * 60
259
+ puts "Pattern demonstrated:"
260
+ puts " 1. LLM outputs structured intent (via ollama-client)"
261
+ puts " 2. Agent validates and routes to tool"
262
+ puts " 3. Tool executes deterministically (pure Ruby)"
263
+ puts " 4. Results returned to agent"
264
+ puts "=" * 60
265
+ end
266
+
data/exe/ollama-client ADDED
@@ -0,0 +1,4 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "ollama/client"
@@ -0,0 +1,157 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require_relative "messages"
5
+
6
+ module Ollama
7
+ module Agent
8
+ # Stateful executor-style agent using /api/chat + tool-calling loop.
9
+ #
10
+ # The LLM never executes tools. It can only request tool calls; this class
11
+ # executes Ruby callables and feeds results back as role: "tool" messages.
12
+ class Executor
13
+ attr_reader :messages
14
+
15
+ def initialize(client, tools:, max_steps: 20, stream: nil)
16
+ @client = client
17
+ @tools = tools || {}
18
+ @max_steps = max_steps
19
+ @stream = stream
20
+ @messages = []
21
+ end
22
+
23
+ # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity, Metrics/BlockLength
24
+ def run(system:, user:)
25
+ @messages = [
26
+ Messages.system(system),
27
+ Messages.user(user)
28
+ ]
29
+
30
+ last_assistant_content = nil
31
+
32
+ @max_steps.times do
33
+ @stream&.emit(:state, state: :assistant_streaming)
34
+
35
+ response =
36
+ if @stream
37
+ @client.chat_raw(
38
+ messages: @messages,
39
+ tools: tool_definitions,
40
+ allow_chat: true,
41
+ stream: true
42
+ ) do |chunk|
43
+ delta = chunk.dig("message", "content")
44
+ @stream.emit(:token, text: delta.to_s) if delta && !delta.to_s.empty?
45
+
46
+ calls = chunk.dig("message", "tool_calls")
47
+ if calls.is_a?(Array)
48
+ calls.each do |call|
49
+ name = dig(call, %w[function name]) || call["name"]
50
+ @stream.emit(:tool_call_detected, name: name, data: call) if name
51
+ end
52
+ end
53
+ end
54
+ else
55
+ @client.chat_raw(messages: @messages, tools: tool_definitions, allow_chat: true)
56
+ end
57
+
58
+ message = response["message"] || {}
59
+ content = message["content"]
60
+ tool_calls = message["tool_calls"]
61
+
62
+ # Preserve the assistant turn in history (including tool_calls if present).
63
+ @messages << Messages.assistant(content.to_s, tool_calls: tool_calls) if content || tool_calls
64
+ last_assistant_content = content if content && !content.empty?
65
+
66
+ break if tool_calls.nil? || tool_calls.empty?
67
+
68
+ tool_calls.each do |call|
69
+ name = dig(call, %w[function name]) || call["name"]
70
+ raise Ollama::Error, "Tool call missing function name: #{call.inspect}" if name.nil? || name.empty?
71
+
72
+ args = dig(call, %w[function arguments])
73
+ args_hash = normalize_arguments(args)
74
+
75
+ callable = @tools[name]
76
+ raise Ollama::Error, "Tool '#{name}' not found. Available: #{@tools.keys.sort.join(", ")}" unless callable
77
+
78
+ @stream&.emit(:state, state: :tool_executing)
79
+ result = invoke_tool(callable, args_hash)
80
+ tool_content = encode_tool_result(result)
81
+
82
+ tool_call_id = call["id"] || call["tool_call_id"]
83
+ @messages << Messages.tool(content: tool_content, name: name, tool_call_id: tool_call_id)
84
+ @stream&.emit(:state, state: :tool_result_injected)
85
+ end
86
+ end
87
+
88
+ if last_assistant_content.nil?
89
+ raise Ollama::Error,
90
+ "Executor exceeded max_steps=#{@max_steps} (possible infinite tool loop)"
91
+ end
92
+
93
+ @stream&.emit(:final, text: last_assistant_content.to_s)
94
+ last_assistant_content
95
+ end
96
+ # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity, Metrics/BlockLength
97
+
98
+ private
99
+
100
+ def tool_definitions
101
+ @tools.keys.sort.map do |name|
102
+ {
103
+ type: "function",
104
+ function: {
105
+ name: name,
106
+ description: "Tool: #{name}",
107
+ parameters: {
108
+ "type" => "object",
109
+ "additionalProperties" => true
110
+ }
111
+ }
112
+ }
113
+ end
114
+ end
115
+
116
+ def dig(obj, path)
117
+ cur = obj
118
+ path.each do |k|
119
+ return nil unless cur.is_a?(Hash)
120
+
121
+ cur = cur[k] || cur[k.to_sym]
122
+ end
123
+ cur
124
+ end
125
+
126
+ def normalize_arguments(args)
127
+ return {} if args.nil? || args == ""
128
+ return args if args.is_a?(Hash)
129
+
130
+ if args.is_a?(String)
131
+ JSON.parse(args)
132
+ else
133
+ {}
134
+ end
135
+ rescue JSON::ParserError => e
136
+ raise Ollama::InvalidJSONError, "Failed to parse tool arguments JSON: #{e.message}. Arguments: #{args.inspect}"
137
+ end
138
+
139
+ def invoke_tool(callable, args_hash)
140
+ sym_args = args_hash.transform_keys { |k| k.to_s.to_sym }
141
+
142
+ # Prefer keyword invocation (common for Ruby tools), fall back to a single hash.
143
+ callable.call(**sym_args)
144
+ rescue ArgumentError
145
+ callable.call(args_hash)
146
+ end
147
+
148
+ def encode_tool_result(result)
149
+ return result if result.is_a?(String)
150
+
151
+ JSON.generate(result)
152
+ rescue JSON::GeneratorError
153
+ result.to_s
154
+ end
155
+ end
156
+ end
157
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Ollama
4
+ module Agent
5
+ # Small helpers for building chat message hashes.
6
+ module Messages
7
+ def self.system(content)
8
+ { role: "system", content: content.to_s }
9
+ end
10
+
11
+ def self.user(content)
12
+ { role: "user", content: content.to_s }
13
+ end
14
+
15
+ def self.assistant(content, tool_calls: nil)
16
+ msg = { role: "assistant", content: content.to_s }
17
+ msg[:tool_calls] = tool_calls if tool_calls
18
+ msg
19
+ end
20
+
21
+ # Tool results are sent back as role: "tool".
22
+ # Some APIs require `tool_call_id` to associate results with calls.
23
+ def self.tool(content:, name: nil, tool_call_id: nil)
24
+ msg = { role: "tool", content: content.to_s }
25
+ msg[:name] = name if name
26
+ msg[:tool_call_id] = tool_call_id if tool_call_id
27
+ msg
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require_relative "messages"
5
+
6
+ module Ollama
7
+ module Agent
8
+ # Stateless planner-style agent using /api/generate.
9
+ #
10
+ # Intended for planning, classification, routing, and deterministic structured outputs.
11
+ class Planner
12
+ ANY_JSON_SCHEMA = {
13
+ "anyOf" => [
14
+ { "type" => "object", "additionalProperties" => true },
15
+ { "type" => "array" },
16
+ { "type" => "string" },
17
+ { "type" => "number" },
18
+ { "type" => "integer" },
19
+ { "type" => "boolean" },
20
+ { "type" => "null" }
21
+ ]
22
+ }.freeze
23
+
24
+ def initialize(client)
25
+ @client = client
26
+ end
27
+
28
+ # @param prompt [String]
29
+ # @param context [Hash, nil]
30
+ # @param schema [Hash, nil]
31
+ # @return [Object] Parsed JSON (Hash/Array/String/Number/Boolean/Nil)
32
+ def run(prompt:, context: nil, schema: nil)
33
+ full_prompt = prompt.to_s
34
+
35
+ if context && !context.empty?
36
+ full_prompt = "#{full_prompt}\n\nContext (JSON):\n#{JSON.pretty_generate(context)}"
37
+ end
38
+
39
+ @client.generate(
40
+ prompt: full_prompt,
41
+ schema: schema || ANY_JSON_SCHEMA,
42
+ strict: true
43
+ )
44
+ end
45
+ end
46
+ end
47
+ end