scout-ai 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/.vimproject +80 -15
  3. data/README.md +296 -0
  4. data/Rakefile +2 -0
  5. data/VERSION +1 -1
  6. data/doc/Agent.md +279 -0
  7. data/doc/Chat.md +258 -0
  8. data/doc/LLM.md +446 -0
  9. data/doc/Model.md +513 -0
  10. data/doc/RAG.md +129 -0
  11. data/lib/scout/llm/agent/chat.rb +51 -1
  12. data/lib/scout/llm/agent/delegate.rb +39 -0
  13. data/lib/scout/llm/agent/iterate.rb +44 -0
  14. data/lib/scout/llm/agent.rb +42 -21
  15. data/lib/scout/llm/ask.rb +38 -6
  16. data/lib/scout/llm/backends/anthropic.rb +147 -0
  17. data/lib/scout/llm/backends/bedrock.rb +1 -1
  18. data/lib/scout/llm/backends/ollama.rb +23 -29
  19. data/lib/scout/llm/backends/openai.rb +34 -40
  20. data/lib/scout/llm/backends/responses.rb +158 -110
  21. data/lib/scout/llm/chat.rb +250 -94
  22. data/lib/scout/llm/embed.rb +4 -4
  23. data/lib/scout/llm/mcp.rb +28 -0
  24. data/lib/scout/llm/parse.rb +1 -0
  25. data/lib/scout/llm/rag.rb +9 -0
  26. data/lib/scout/llm/tools/call.rb +66 -0
  27. data/lib/scout/llm/tools/knowledge_base.rb +158 -0
  28. data/lib/scout/llm/tools/mcp.rb +59 -0
  29. data/lib/scout/llm/tools/workflow.rb +69 -0
  30. data/lib/scout/llm/tools.rb +58 -143
  31. data/lib/scout-ai.rb +1 -0
  32. data/scout-ai.gemspec +31 -18
  33. data/scout_commands/agent/ask +28 -71
  34. data/scout_commands/documenter +148 -0
  35. data/scout_commands/llm/ask +2 -2
  36. data/scout_commands/llm/server +319 -0
  37. data/share/server/chat.html +138 -0
  38. data/share/server/chat.js +468 -0
  39. data/test/scout/llm/backends/test_anthropic.rb +134 -0
  40. data/test/scout/llm/backends/test_openai.rb +45 -6
  41. data/test/scout/llm/backends/test_responses.rb +124 -0
  42. data/test/scout/llm/test_agent.rb +0 -70
  43. data/test/scout/llm/test_ask.rb +3 -1
  44. data/test/scout/llm/test_chat.rb +43 -1
  45. data/test/scout/llm/test_mcp.rb +29 -0
  46. data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
  47. data/test/scout/llm/tools/test_mcp.rb +11 -0
  48. data/test/scout/llm/tools/test_workflow.rb +39 -0
  49. metadata +56 -17
  50. data/README.rdoc +0 -18
  51. data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
  52. data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
  53. data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
  54. data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
  55. data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
  56. data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
  57. data/python/scout_ai/atcold/plot_lib.py +0 -141
  58. data/python/scout_ai/atcold/spiral.py +0 -27
  59. data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
  60. data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
  61. data/python/scout_ai/language_model.py +0 -70
  62. /data/{python/scout_ai/atcold/__init__.py → test/scout/llm/tools/test_call.rb} +0 -0
@@ -15,44 +15,49 @@ module LLM
15
15
  mime = "image/extension"
16
16
  end
17
17
 
18
- base64_image = Base64.strict_encode64(file_content)
18
+ base64_string = Base64.strict_encode64(file_content)
19
19
 
20
- "data:#{mime};base64,#{base64_image}"
20
+ "data:#{mime};base64,#{base64_string}"
21
21
  end
22
22
 
23
23
  def self.encode_pdf(path)
24
24
  file_content = File.binread(path) # Replace with your file name
25
- Base64.strict_encode64(file_content)
26
- end
27
- def self.tool_response(tool_call, &block)
28
- tool_call_id = tool_call.dig("id").sub(/^fc_/, '')
29
- function_name = tool_call.dig("function", "name")
30
- function_arguments = tool_call.dig("function", "arguments")
31
- function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
32
- IndiferentHash.setup function_arguments
33
- function_response = block.call function_name, function_arguments
34
-
35
- content = case function_response
36
- when nil
37
- "success"
38
- else
39
- function_response
40
- end
41
- content = content.to_s if Numeric === content
25
+ base64_string = Base64.strict_encode64(file_content)
26
+
27
+ "data:application/pdf;base64,#{base64_string}"
42
28
  end
43
29
 
30
+ #def self.tool_response(tool_call, &block)
31
+ # tool_call_id = tool_call.dig("call_id").sub(/^fc_/, '')
32
+ # function_name = tool_call.dig("function", "name")
33
+ # function_arguments = tool_call.dig("function", "arguments")
34
+ # function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
35
+ # IndiferentHash.setup function_arguments
36
+ # function_response = block.call function_name, function_arguments
37
+
38
+ # content = case function_response
39
+ # when nil
40
+ # "success"
41
+ # else
42
+ # function_response
43
+ # end
44
+ # content = content.to_s if Numeric === content
45
+ #end
46
+
44
47
  def self.tools_to_responses(messages)
45
48
  messages.collect do |message|
46
49
  if message[:role] == 'function_call'
47
50
  info = JSON.parse(message[:content])
48
51
  IndiferentHash.setup info
52
+ name = info[:name] || IndiferentHash.dig(info,:function, :name)
53
+ IndiferentHash.setup info
49
54
  id = info[:id].sub(/^fc_/, '')
50
55
  IndiferentHash.setup({
51
56
  "type" => "function_call",
52
57
  "status" => "completed",
53
- "name" => info[:name],
58
+ "name" => name,
54
59
  "arguments" => (info[:arguments] || {}).to_json,
55
- "call_id"=>"call_#{id}",
60
+ "call_id"=>id,
56
61
  })
57
62
  elsif message[:role] == 'function_call_output'
58
63
  info = JSON.parse(message[:content])
@@ -61,7 +66,7 @@ module LLM
61
66
  { # append result message
62
67
  "type" => "function_call_output",
63
68
  "output" => info[:content],
64
- "call_id"=>"call_#{id}",
69
+ "call_id"=>id,
65
70
  }
66
71
  else
67
72
  message
@@ -69,7 +74,7 @@ module LLM
69
74
  end.flatten
70
75
  end
71
76
 
72
- def self.process_response(response, &block)
77
+ def self.process_response(response, tools, &block)
73
78
  Log.debug "Respose: #{Log.fingerprint response}"
74
79
 
75
80
  response['output'].collect do |output|
@@ -81,11 +86,14 @@ module LLM
81
86
  IndiferentHash.setup({role: 'assistant', content: content['text']})
82
87
  end
83
88
  end
89
+ when 'reasoning'
90
+ next
84
91
  when 'function_call'
85
- LLM.call_tools [output], &block
92
+ LLM.process_calls(tools, [output], &block)
86
93
  when 'web_search_call'
87
94
  next
88
95
  else
96
+ eee response
89
97
  eee output
90
98
  raise
91
99
  end
@@ -99,6 +107,7 @@ module LLM
99
107
  IndiferentHash.setup(message)
100
108
  if message[:role] == 'image'
101
109
  path = message[:content]
110
+ path = LLM.find_file path
102
111
  if Open.remote?(path)
103
112
  {role: :user, content: {type: :input_image, image_url: path }}
104
113
  elsif Open.exists?(path)
@@ -108,35 +117,91 @@ module LLM
108
117
  raise
109
118
  end
110
119
  elsif message[:role] == 'pdf'
111
- path = message[:content]
120
+ path = original_path = message[:content]
112
121
  if Open.remote?(path)
113
122
  {role: :user, content: {type: :input_file, file_url: path }}
114
123
  elsif Open.exists?(path)
115
124
  data = self.encode_pdf(path)
116
- {role: :user, content: [{type: :input_file, file_data: data }]}
125
+ {role: :user, content: [{type: :input_file, file_data: data, filename: File.basename(path) }]}
117
126
  else
118
127
  raise
119
128
  end
120
129
  elsif message[:role] == 'websearch'
121
- {role: :tool, content: {type: "web_search_preview"} }
130
+ {role: :tool, content: {type: "web_search_preview"} }
122
131
  else
123
132
  message
124
133
  end
125
134
  end.flatten
126
135
  end
127
136
 
137
+ def self.process_format(format)
138
+ case format
139
+ when :json, :json_object, "json", "json_object"
140
+ {format: {type: 'json_object'}}
141
+ when String, Symbol
142
+ {format: {type: format}}
143
+ when Hash
144
+ IndiferentHash.setup format
145
+
146
+ if format.include?('format')
147
+ format
148
+ elsif format['type'] == 'json_schema'
149
+ {format: format}
150
+ else
151
+
152
+ if ! format.include?('properties')
153
+ format = IndiferentHash.setup({properties: format})
154
+ end
155
+
156
+ properties = format['properties']
157
+ new_properties = {}
158
+ properties.each do |name,info|
159
+ case info
160
+ when Symbol, String
161
+ new_properties[name] = {type: info}
162
+ when Array
163
+ new_properties[name] = {type: info[0], description: info[1], default: info[2]}
164
+ else
165
+ new_properties[name] = info
166
+ end
167
+ end
168
+ format['properties'] = new_properties
169
+
170
+ required = format['properties'].reject{|p,i| i[:default] }.collect{|p,i| p }
171
+
172
+ name = format.include?('name') ? format.delete('name') : 'response'
173
+
174
+ format['type'] ||= 'object'
175
+ format[:additionalProperties] = required.empty? ? {type: :string} : false
176
+ format[:required] = required
177
+ {format: {name: name,
178
+ type: "json_schema",
179
+ schema: format,
180
+ }}
181
+ end
182
+ end
183
+ end
184
+
128
185
  def self.ask(question, options = {}, &block)
129
186
  original_options = options.dup
130
187
 
131
188
  messages = LLM.chat(question)
132
189
  options = options.merge LLM.options messages
133
- tools = LLM.tools messages
134
- associations = LLM.associations messages
135
190
 
136
- client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
137
- :client, :url, :key, :model, :log_errors, :return_messages, :format,
191
+ client, url, key, model, log_errors, return_messages, format, websearch, previous_response_id, tools, = IndiferentHash.process_options options,
192
+ :client, :url, :key, :model, :log_errors, :return_messages, :format, :websearch, :previous_response_id, :tools,
138
193
  log_errors: true
139
194
 
195
+ reasoning_options = IndiferentHash.pull_keys options, :reasoning
196
+ options[:reasoning] = reasoning_options if reasoning_options.any?
197
+
198
+ text_options = IndiferentHash.pull_keys options, :text
199
+ options[:text] = text_options if text_options.any?
200
+
201
+ if websearch
202
+ messages << {role: 'websearch', content: true}
203
+ end
204
+
140
205
  if client.nil?
141
206
  url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
142
207
  key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
@@ -148,56 +213,29 @@ module LLM
148
213
  model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
149
214
  end
150
215
 
151
- case format
152
- when :json, :json_object, "json", "json_object"
153
- options['text'] = {format: {type: 'json_object'}}
154
- when String, Symbol
155
- options['text'] = {format: {type: format}}
156
- when Hash
157
- if format.include?('format')
158
- options['text'] = format
159
- elsif format['type'] == 'json_schema'
160
- options['text'] = {format: format}
161
- else
162
- options['text'] = {format: {name: "response_schema",
163
- type: "json_schema",
164
- additionalProperties: false,
165
- required: format['properties'].keys,
166
- schema: format,
167
- }}
168
- end
169
- end if format
216
+ options['text'] = self.process_format format if format
170
217
 
171
218
  parameters = options.merge(model: model)
172
219
 
173
- if tools.any? || associations.any?
174
- parameters[:tools] ||= []
175
- parameters[:tools] += tools.values.collect{|a| a.last } if tools
176
- parameters[:tools] += associations.values.collect{|a| a.last } if associations
177
- parameters[:tools] = parameters[:tools].collect{|tool|
178
- function = tool.delete :function;
179
- tool.merge function
180
- }
181
- if not block_given?
182
- block = Proc.new do |name,parameters|
183
- IndiferentHash.setup parameters
184
- if tools[name]
185
- workflow = tools[name].first
186
- jobname = parameters.delete :jobname
187
- workflow.job(name, jobname, parameters).run
188
- else
189
- kb = associations[name].first
190
- entities, reverse = IndiferentHash.process_options parameters, :entities, :reverse
191
- if reverse
192
- kb.parents(name, entities)
193
- else
194
- kb.children(name, entities)
195
- end
196
- end
197
- end
220
+ case tools
221
+ when Array
222
+ tools = tools.inject({}) do |acc,definition|
223
+ IndiferentHash.setup definition
224
+ name = definition.dig('name') || definition.dig('function', 'name')
225
+ acc.merge(name => definition)
198
226
  end
227
+ when nil
228
+ tools = {}
229
+ end
230
+
231
+ tools.merge!(LLM.tools messages)
232
+ tools.merge!(LLM.associations messages)
233
+
234
+ if tools.any?
235
+ parameters[:tools] = tools.values.collect{|obj,definition| Hash === obj ? obj : definition}
199
236
  end
200
237
 
238
+ parameters['previous_response_id'] = previous_response_id if String === previous_response_id
201
239
  Log.low "Calling client with parameters #{Log.fingerprint parameters}\n#{LLM.print messages}"
202
240
 
203
241
  messages = self.process_input messages
@@ -210,63 +248,73 @@ module LLM
210
248
  input << message
211
249
  end
212
250
  end
213
- parameters[:input] = input
251
+
252
+ parameters[:input] = LLM.tools_to_openai input
214
253
 
215
254
  response = client.responses.create(parameters: parameters)
216
- response = self.process_response response, &block
255
+
256
+ Thread.current["previous_response_id"] = previous_response_id = response['id']
257
+ previous_response_message = {role: :previous_response_id, content: previous_response_id}
258
+
259
+ response = self.process_response response, tools, &block
217
260
 
218
261
  res = if response.last[:role] == 'function_call_output'
219
- response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: parameters[:tools]), &block)
262
+ case previous_response_id
263
+ when String
264
+ response + self.ask(response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools, previous_response_id: previous_response_id), &block)
265
+ else
266
+ response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools), &block)
267
+ end
220
268
  else
221
269
  response
222
270
  end
223
271
 
224
272
  if return_messages
225
- res
273
+ if res.last[:role] == :previous_response_id
274
+ res
275
+ else
276
+ res + [previous_response_message]
277
+ end
226
278
  else
227
- res.last['content']
279
+ LLM.purge(res).last['content']
228
280
  end
229
281
  end
230
282
 
231
- end
232
283
 
233
- def self.image(question, options = {}, &block)
234
- original_options = options.dup
284
+ def self.image(question, options = {}, &block)
285
+ original_options = options.dup
235
286
 
236
- messages = LLM.chat(question)
237
- options = options.merge LLM.options messages
238
- tools = LLM.tools messages
239
- associations = LLM.associations messages
287
+ messages = LLM.chat(question)
288
+ options = options.merge LLM.options messages
289
+ tools = LLM.tools messages
290
+ associations = LLM.associations messages
240
291
 
241
- client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
242
- :client, :url, :key, :model, :log_errors, :return_messages, :format,
243
- log_errors: true
292
+ client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
293
+ :client, :url, :key, :model, :log_errors, :return_messages, :format,
294
+ log_errors: true
244
295
 
245
- if client.nil?
246
- url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
247
- key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
248
- client = LLM::OpenAI.client url, key, log_errors
249
- end
296
+ if client.nil?
297
+ url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
298
+ key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
299
+ client = LLM::OpenAI.client url, key, log_errors
300
+ end
250
301
 
251
- if model.nil?
252
- url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
253
- model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-image-1")
254
- end
302
+ if model.nil?
303
+ url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
304
+ model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-image-1")
305
+ end
255
306
 
256
- messages = self.process_input messages
257
- input = []
258
- messages.each do |message|
259
- parameters[:tools] ||= []
260
- if message[:role].to_s == 'tool'
261
- parameters[:tools] << message[:content]
262
- else
307
+ messages = self.process_input messages
308
+ input = []
309
+ parameters = {}
310
+ messages.each do |message|
263
311
  input << message
264
312
  end
265
- end
266
- parameters[:prompt] = LLM.print(input)
313
+ parameters[:prompt] = LLM.print(input)
267
314
 
268
- response = client.images.generate(parameters: parameters)
315
+ response = client.images.generate(parameters: parameters)
269
316
 
270
- response[0]['b64_json']
317
+ response
318
+ end
271
319
  end
272
320
  end