scout-ai 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.vimproject +87 -15
- data/README.md +296 -0
- data/Rakefile +2 -0
- data/VERSION +1 -1
- data/doc/Agent.md +279 -0
- data/doc/Chat.md +258 -0
- data/doc/LLM.md +446 -0
- data/doc/Model.md +513 -0
- data/doc/RAG.md +129 -0
- data/lib/scout/llm/agent/chat.rb +48 -1
- data/lib/scout/llm/agent/delegate.rb +51 -0
- data/lib/scout/llm/agent/iterate.rb +44 -0
- data/lib/scout/llm/agent.rb +43 -22
- data/lib/scout/llm/ask.rb +47 -7
- data/lib/scout/llm/backends/anthropic.rb +147 -0
- data/lib/scout/llm/backends/bedrock.rb +1 -1
- data/lib/scout/llm/backends/ollama.rb +27 -30
- data/lib/scout/llm/backends/openai.rb +36 -41
- data/lib/scout/llm/backends/responses.rb +166 -113
- data/lib/scout/llm/chat.rb +270 -102
- data/lib/scout/llm/embed.rb +4 -4
- data/lib/scout/llm/mcp.rb +28 -0
- data/lib/scout/llm/parse.rb +1 -0
- data/lib/scout/llm/rag.rb +9 -0
- data/lib/scout/llm/tools/call.rb +76 -0
- data/lib/scout/llm/tools/knowledge_base.rb +159 -0
- data/lib/scout/llm/tools/mcp.rb +59 -0
- data/lib/scout/llm/tools/workflow.rb +106 -0
- data/lib/scout/llm/tools.rb +98 -141
- data/lib/scout-ai.rb +1 -0
- data/scout-ai.gemspec +31 -18
- data/scout_commands/agent/ask +59 -78
- data/scout_commands/documenter +148 -0
- data/scout_commands/llm/ask +3 -2
- data/scout_commands/llm/server +319 -0
- data/share/server/chat.html +138 -0
- data/share/server/chat.js +468 -0
- data/test/scout/llm/backends/test_anthropic.rb +134 -0
- data/test/scout/llm/backends/test_ollama.rb +1 -1
- data/test/scout/llm/backends/test_openai.rb +45 -6
- data/test/scout/llm/backends/test_responses.rb +124 -0
- data/test/scout/llm/test_agent.rb +1 -93
- data/test/scout/llm/test_ask.rb +3 -1
- data/test/scout/llm/test_chat.rb +43 -1
- data/test/scout/llm/test_mcp.rb +29 -0
- data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
- data/test/scout/llm/tools/test_mcp.rb +11 -0
- data/test/scout/llm/tools/test_workflow.rb +39 -0
- metadata +56 -17
- data/README.rdoc +0 -18
- data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
- data/python/scout_ai/atcold/plot_lib.py +0 -141
- data/python/scout_ai/atcold/spiral.py +0 -27
- data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
- data/python/scout_ai/language_model.py +0 -70
- /data/{python/scout_ai/atcold/__init__.py → test/scout/llm/tools/test_call.rb} +0 -0
|
@@ -5,23 +5,24 @@ require_relative '../chat'
|
|
|
5
5
|
module LLM
|
|
6
6
|
module OpenAI
|
|
7
7
|
|
|
8
|
-
def self.client(url, key, log_errors = false)
|
|
9
|
-
|
|
8
|
+
def self.client(url = nil, key = nil, log_errors = false, request_timeout: 1200)
|
|
9
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
10
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
11
|
+
Object::OpenAI::Client.new(access_token:key, log_errors: log_errors, uri_base: url, request_timeout: request_timeout)
|
|
10
12
|
end
|
|
11
13
|
|
|
12
14
|
def self.process_input(messages)
|
|
13
15
|
messages.collect do |message|
|
|
14
|
-
if message[:role] == '
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
message[:content]
|
|
16
|
+
if message[:role] == 'image'
|
|
17
|
+
Log.warn "Endpoint 'openai' does not support images, try 'responses': #{message[:content]}"
|
|
18
|
+
next
|
|
18
19
|
else
|
|
19
20
|
message
|
|
20
21
|
end
|
|
21
|
-
end.flatten
|
|
22
|
+
end.flatten.compact
|
|
22
23
|
end
|
|
23
24
|
|
|
24
|
-
def self.process_response(response, &block)
|
|
25
|
+
def self.process_response(response, tools, &block)
|
|
25
26
|
Log.debug "Respose: #{Log.fingerprint response}"
|
|
26
27
|
raise Exception, response["error"] if response["error"]
|
|
27
28
|
|
|
@@ -30,7 +31,7 @@ module LLM
|
|
|
30
31
|
response.dig("choices", 0, "message", "tool_calls")
|
|
31
32
|
|
|
32
33
|
if tool_calls && tool_calls.any?
|
|
33
|
-
LLM.
|
|
34
|
+
LLM.process_calls(tools, tool_calls, &block)
|
|
34
35
|
else
|
|
35
36
|
[message]
|
|
36
37
|
end
|
|
@@ -41,12 +42,10 @@ module LLM
|
|
|
41
42
|
|
|
42
43
|
messages = LLM.chat(question)
|
|
43
44
|
options = options.merge LLM.options messages
|
|
44
|
-
tools = LLM.tools messages
|
|
45
|
-
associations = LLM.associations messages
|
|
46
45
|
|
|
47
|
-
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
|
48
|
-
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
|
49
|
-
log_errors: true
|
|
46
|
+
client, url, key, model, log_errors, return_messages, format, tool_choice_next, previous_response_id, tools, = IndiferentHash.process_options options,
|
|
47
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format, :tool_choice_next, :previous_response_id, :tools,
|
|
48
|
+
log_errors: true, tool_choice_next: :none
|
|
50
49
|
|
|
51
50
|
if client.nil?
|
|
52
51
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
@@ -59,8 +58,6 @@ module LLM
|
|
|
59
58
|
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
|
60
59
|
end
|
|
61
60
|
|
|
62
|
-
#role = IndiferentHash.process_options options, :role
|
|
63
|
-
|
|
64
61
|
case format.to_sym
|
|
65
62
|
when :json, :json_object
|
|
66
63
|
options[:response_format] = {type: 'json_object'}
|
|
@@ -70,39 +67,37 @@ module LLM
|
|
|
70
67
|
|
|
71
68
|
parameters = options.merge(model: model)
|
|
72
69
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
workflow = tools[name].first
|
|
82
|
-
jobname = parameters.delete :jobname
|
|
83
|
-
workflow.job(name, jobname, parameters).run
|
|
84
|
-
else
|
|
85
|
-
kb = associations[name].first
|
|
86
|
-
entities, reverse = IndiferentHash.process_options parameters, :entities, :reverse
|
|
87
|
-
if reverse
|
|
88
|
-
kb.parents(name, entities)
|
|
89
|
-
else
|
|
90
|
-
kb.children(name, entities)
|
|
91
|
-
end
|
|
92
|
-
end
|
|
93
|
-
end
|
|
70
|
+
# Process tools
|
|
71
|
+
|
|
72
|
+
case tools
|
|
73
|
+
when Array
|
|
74
|
+
tools = tools.inject({}) do |acc,definition|
|
|
75
|
+
IndiferentHash.setup definition
|
|
76
|
+
name = definition.dig('name') || definition.dig('function', 'name')
|
|
77
|
+
acc.merge(name => definition)
|
|
94
78
|
end
|
|
79
|
+
when nil
|
|
80
|
+
tools = {}
|
|
95
81
|
end
|
|
96
82
|
|
|
97
|
-
|
|
98
|
-
|
|
83
|
+
tools.merge!(LLM.tools messages)
|
|
84
|
+
tools.merge!(LLM.associations messages)
|
|
85
|
+
|
|
86
|
+
if tools.any?
|
|
87
|
+
parameters[:tools] = LLM.tool_definitions_to_openai tools
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
messages = self.process_input messages
|
|
91
|
+
|
|
92
|
+
Log.debug "Calling openai #{url}: #{Log.fingerprint(parameters.except(:tools))}}"
|
|
93
|
+
Log.high "Tools: #{Log.fingerprint tools.keys}}" if tools
|
|
99
94
|
|
|
100
95
|
parameters[:messages] = LLM.tools_to_openai messages
|
|
101
96
|
|
|
102
|
-
response = self.process_response client.chat(parameters: parameters), &block
|
|
97
|
+
response = self.process_response client.chat(parameters: parameters), tools, &block
|
|
103
98
|
|
|
104
99
|
res = if response.last[:role] == 'function_call_output'
|
|
105
|
-
response + self.ask(messages + response, original_options.
|
|
100
|
+
response + self.ask(messages + response, original_options.merge(tool_choice: tool_choice_next, return_messages: true, tools: tools ), &block)
|
|
106
101
|
else
|
|
107
102
|
response
|
|
108
103
|
end
|
|
@@ -15,53 +15,61 @@ module LLM
|
|
|
15
15
|
mime = "image/extension"
|
|
16
16
|
end
|
|
17
17
|
|
|
18
|
-
|
|
18
|
+
base64_string = Base64.strict_encode64(file_content)
|
|
19
19
|
|
|
20
|
-
"data:#{mime};base64,#{
|
|
20
|
+
"data:#{mime};base64,#{base64_string}"
|
|
21
21
|
end
|
|
22
22
|
|
|
23
23
|
def self.encode_pdf(path)
|
|
24
24
|
file_content = File.binread(path) # Replace with your file name
|
|
25
|
-
Base64.strict_encode64(file_content)
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
tool_call_id = tool_call.dig("id").sub(/^fc_/, '')
|
|
29
|
-
function_name = tool_call.dig("function", "name")
|
|
30
|
-
function_arguments = tool_call.dig("function", "arguments")
|
|
31
|
-
function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
|
|
32
|
-
IndiferentHash.setup function_arguments
|
|
33
|
-
function_response = block.call function_name, function_arguments
|
|
34
|
-
|
|
35
|
-
content = case function_response
|
|
36
|
-
when nil
|
|
37
|
-
"success"
|
|
38
|
-
else
|
|
39
|
-
function_response
|
|
40
|
-
end
|
|
41
|
-
content = content.to_s if Numeric === content
|
|
25
|
+
base64_string = Base64.strict_encode64(file_content)
|
|
26
|
+
|
|
27
|
+
"data:application/pdf;base64,#{base64_string}"
|
|
42
28
|
end
|
|
43
29
|
|
|
30
|
+
#def self.tool_response(tool_call, &block)
|
|
31
|
+
# tool_call_id = tool_call.dig("call_id").sub(/^fc_/, '')
|
|
32
|
+
# function_name = tool_call.dig("function", "name")
|
|
33
|
+
# function_arguments = tool_call.dig("function", "arguments")
|
|
34
|
+
# function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
|
|
35
|
+
# IndiferentHash.setup function_arguments
|
|
36
|
+
# function_response = block.call function_name, function_arguments
|
|
37
|
+
|
|
38
|
+
# content = case function_response
|
|
39
|
+
# when nil
|
|
40
|
+
# "success"
|
|
41
|
+
# else
|
|
42
|
+
# function_response
|
|
43
|
+
# end
|
|
44
|
+
# content = content.to_s if Numeric === content
|
|
45
|
+
#end
|
|
46
|
+
|
|
44
47
|
def self.tools_to_responses(messages)
|
|
48
|
+
last_id = nil
|
|
45
49
|
messages.collect do |message|
|
|
46
50
|
if message[:role] == 'function_call'
|
|
47
51
|
info = JSON.parse(message[:content])
|
|
48
52
|
IndiferentHash.setup info
|
|
49
|
-
|
|
53
|
+
name = info[:name] || IndiferentHash.dig(info,:function, :name)
|
|
54
|
+
IndiferentHash.setup info
|
|
55
|
+
id = last_id = info[:id] || "fc_#{rand(1000).to_s}"
|
|
56
|
+
id = id.sub(/^fc_/, '')
|
|
50
57
|
IndiferentHash.setup({
|
|
51
58
|
"type" => "function_call",
|
|
52
59
|
"status" => "completed",
|
|
53
|
-
"name" =>
|
|
60
|
+
"name" => name,
|
|
54
61
|
"arguments" => (info[:arguments] || {}).to_json,
|
|
55
|
-
"call_id"=>
|
|
62
|
+
"call_id"=>id,
|
|
56
63
|
})
|
|
57
64
|
elsif message[:role] == 'function_call_output'
|
|
58
65
|
info = JSON.parse(message[:content])
|
|
59
66
|
IndiferentHash.setup info
|
|
60
|
-
id = info[:id]
|
|
67
|
+
id = info[:id] || last_id
|
|
68
|
+
id = id.sub(/^fc_/, '')
|
|
61
69
|
{ # append result message
|
|
62
70
|
"type" => "function_call_output",
|
|
63
71
|
"output" => info[:content],
|
|
64
|
-
"call_id"=>
|
|
72
|
+
"call_id"=>id,
|
|
65
73
|
}
|
|
66
74
|
else
|
|
67
75
|
message
|
|
@@ -69,7 +77,7 @@ module LLM
|
|
|
69
77
|
end.flatten
|
|
70
78
|
end
|
|
71
79
|
|
|
72
|
-
def self.process_response(response, &block)
|
|
80
|
+
def self.process_response(response, tools, &block)
|
|
73
81
|
Log.debug "Respose: #{Log.fingerprint response}"
|
|
74
82
|
|
|
75
83
|
response['output'].collect do |output|
|
|
@@ -81,11 +89,14 @@ module LLM
|
|
|
81
89
|
IndiferentHash.setup({role: 'assistant', content: content['text']})
|
|
82
90
|
end
|
|
83
91
|
end
|
|
92
|
+
when 'reasoning'
|
|
93
|
+
next
|
|
84
94
|
when 'function_call'
|
|
85
|
-
LLM.
|
|
95
|
+
LLM.process_calls(tools, [output], &block)
|
|
86
96
|
when 'web_search_call'
|
|
87
97
|
next
|
|
88
98
|
else
|
|
99
|
+
eee response
|
|
89
100
|
eee output
|
|
90
101
|
raise
|
|
91
102
|
end
|
|
@@ -99,6 +110,7 @@ module LLM
|
|
|
99
110
|
IndiferentHash.setup(message)
|
|
100
111
|
if message[:role] == 'image'
|
|
101
112
|
path = message[:content]
|
|
113
|
+
path = LLM.find_file path
|
|
102
114
|
if Open.remote?(path)
|
|
103
115
|
{role: :user, content: {type: :input_image, image_url: path }}
|
|
104
116
|
elsif Open.exists?(path)
|
|
@@ -108,35 +120,91 @@ module LLM
|
|
|
108
120
|
raise
|
|
109
121
|
end
|
|
110
122
|
elsif message[:role] == 'pdf'
|
|
111
|
-
path = message[:content]
|
|
123
|
+
path = original_path = message[:content]
|
|
112
124
|
if Open.remote?(path)
|
|
113
125
|
{role: :user, content: {type: :input_file, file_url: path }}
|
|
114
126
|
elsif Open.exists?(path)
|
|
115
127
|
data = self.encode_pdf(path)
|
|
116
|
-
{role: :user, content: [{type: :input_file, file_data: data }]}
|
|
128
|
+
{role: :user, content: [{type: :input_file, file_data: data, filename: File.basename(path) }]}
|
|
117
129
|
else
|
|
118
130
|
raise
|
|
119
131
|
end
|
|
120
132
|
elsif message[:role] == 'websearch'
|
|
121
|
-
|
|
133
|
+
{role: :tool, content: {type: "web_search_preview"} }
|
|
122
134
|
else
|
|
123
135
|
message
|
|
124
136
|
end
|
|
125
137
|
end.flatten
|
|
126
138
|
end
|
|
127
139
|
|
|
140
|
+
def self.process_format(format)
|
|
141
|
+
case format
|
|
142
|
+
when :json, :json_object, "json", "json_object"
|
|
143
|
+
{format: {type: 'json_object'}}
|
|
144
|
+
when String, Symbol
|
|
145
|
+
{format: {type: format}}
|
|
146
|
+
when Hash
|
|
147
|
+
IndiferentHash.setup format
|
|
148
|
+
|
|
149
|
+
if format.include?('format')
|
|
150
|
+
format
|
|
151
|
+
elsif format['type'] == 'json_schema'
|
|
152
|
+
{format: format}
|
|
153
|
+
else
|
|
154
|
+
|
|
155
|
+
if ! format.include?('properties')
|
|
156
|
+
format = IndiferentHash.setup({properties: format})
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
properties = format['properties']
|
|
160
|
+
new_properties = {}
|
|
161
|
+
properties.each do |name,info|
|
|
162
|
+
case info
|
|
163
|
+
when Symbol, String
|
|
164
|
+
new_properties[name] = {type: info}
|
|
165
|
+
when Array
|
|
166
|
+
new_properties[name] = {type: info[0], description: info[1], default: info[2]}
|
|
167
|
+
else
|
|
168
|
+
new_properties[name] = info
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
format['properties'] = new_properties
|
|
172
|
+
|
|
173
|
+
required = format['properties'].reject{|p,i| i[:default] }.collect{|p,i| p }
|
|
174
|
+
|
|
175
|
+
name = format.include?('name') ? format.delete('name') : 'response'
|
|
176
|
+
|
|
177
|
+
format['type'] ||= 'object'
|
|
178
|
+
format[:additionalProperties] = required.empty? ? {type: :string} : false
|
|
179
|
+
format[:required] = required
|
|
180
|
+
{format: {name: name,
|
|
181
|
+
type: "json_schema",
|
|
182
|
+
schema: format,
|
|
183
|
+
}}
|
|
184
|
+
end
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
|
|
128
188
|
def self.ask(question, options = {}, &block)
|
|
129
189
|
original_options = options.dup
|
|
130
190
|
|
|
131
191
|
messages = LLM.chat(question)
|
|
132
192
|
options = options.merge LLM.options messages
|
|
133
|
-
tools = LLM.tools messages
|
|
134
|
-
associations = LLM.associations messages
|
|
135
193
|
|
|
136
|
-
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
|
137
|
-
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
|
194
|
+
client, url, key, model, log_errors, return_messages, format, websearch, previous_response_id, tools, = IndiferentHash.process_options options,
|
|
195
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format, :websearch, :previous_response_id, :tools,
|
|
138
196
|
log_errors: true
|
|
139
197
|
|
|
198
|
+
reasoning_options = IndiferentHash.pull_keys options, :reasoning
|
|
199
|
+
options[:reasoning] = reasoning_options if reasoning_options.any?
|
|
200
|
+
|
|
201
|
+
text_options = IndiferentHash.pull_keys options, :text
|
|
202
|
+
options[:text] = text_options if text_options.any?
|
|
203
|
+
|
|
204
|
+
if websearch
|
|
205
|
+
messages << {role: 'websearch', content: true}
|
|
206
|
+
end
|
|
207
|
+
|
|
140
208
|
if client.nil?
|
|
141
209
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
142
210
|
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
@@ -148,57 +216,32 @@ module LLM
|
|
|
148
216
|
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
|
149
217
|
end
|
|
150
218
|
|
|
151
|
-
|
|
152
|
-
when :json, :json_object, "json", "json_object"
|
|
153
|
-
options['text'] = {format: {type: 'json_object'}}
|
|
154
|
-
when String, Symbol
|
|
155
|
-
options['text'] = {format: {type: format}}
|
|
156
|
-
when Hash
|
|
157
|
-
if format.include?('format')
|
|
158
|
-
options['text'] = format
|
|
159
|
-
elsif format['type'] == 'json_schema'
|
|
160
|
-
options['text'] = {format: format}
|
|
161
|
-
else
|
|
162
|
-
options['text'] = {format: {name: "response_schema",
|
|
163
|
-
type: "json_schema",
|
|
164
|
-
additionalProperties: false,
|
|
165
|
-
required: format['properties'].keys,
|
|
166
|
-
schema: format,
|
|
167
|
-
}}
|
|
168
|
-
end
|
|
169
|
-
end if format
|
|
219
|
+
options['text'] = self.process_format format if format
|
|
170
220
|
|
|
171
221
|
parameters = options.merge(model: model)
|
|
172
222
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
tool.merge function
|
|
180
|
-
}
|
|
181
|
-
if not block_given?
|
|
182
|
-
block = Proc.new do |name,parameters|
|
|
183
|
-
IndiferentHash.setup parameters
|
|
184
|
-
if tools[name]
|
|
185
|
-
workflow = tools[name].first
|
|
186
|
-
jobname = parameters.delete :jobname
|
|
187
|
-
workflow.job(name, jobname, parameters).run
|
|
188
|
-
else
|
|
189
|
-
kb = associations[name].first
|
|
190
|
-
entities, reverse = IndiferentHash.process_options parameters, :entities, :reverse
|
|
191
|
-
if reverse
|
|
192
|
-
kb.parents(name, entities)
|
|
193
|
-
else
|
|
194
|
-
kb.children(name, entities)
|
|
195
|
-
end
|
|
196
|
-
end
|
|
197
|
-
end
|
|
223
|
+
case tools
|
|
224
|
+
when Array
|
|
225
|
+
tools = tools.inject({}) do |acc,definition|
|
|
226
|
+
IndiferentHash.setup definition
|
|
227
|
+
name = definition.dig('name') || definition.dig('function', 'name')
|
|
228
|
+
acc.merge(name => definition)
|
|
198
229
|
end
|
|
230
|
+
when nil
|
|
231
|
+
tools = {}
|
|
199
232
|
end
|
|
200
233
|
|
|
201
|
-
|
|
234
|
+
tools.merge!(LLM.tools messages)
|
|
235
|
+
tools.merge!(LLM.associations messages)
|
|
236
|
+
|
|
237
|
+
if tools.any?
|
|
238
|
+
parameters[:tools] = LLM.tool_definitions_to_reponses tools
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
parameters['previous_response_id'] = previous_response_id if String === previous_response_id
|
|
242
|
+
|
|
243
|
+
Log.low "Calling responses #{url}: #{Log.fingerprint(parameters.except(:tools))}}"
|
|
244
|
+
Log.medium "Tools: #{Log.fingerprint tools.keys}}" if tools
|
|
202
245
|
|
|
203
246
|
messages = self.process_input messages
|
|
204
247
|
input = []
|
|
@@ -210,63 +253,73 @@ module LLM
|
|
|
210
253
|
input << message
|
|
211
254
|
end
|
|
212
255
|
end
|
|
213
|
-
|
|
256
|
+
|
|
257
|
+
parameters[:input] = LLM.tools_to_openai input
|
|
214
258
|
|
|
215
259
|
response = client.responses.create(parameters: parameters)
|
|
216
|
-
|
|
260
|
+
|
|
261
|
+
Thread.current["previous_response_id"] = previous_response_id = response['id']
|
|
262
|
+
previous_response_message = {role: :previous_response_id, content: previous_response_id}
|
|
263
|
+
|
|
264
|
+
response = self.process_response response, tools, &block
|
|
217
265
|
|
|
218
266
|
res = if response.last[:role] == 'function_call_output'
|
|
219
|
-
|
|
267
|
+
case previous_response_id
|
|
268
|
+
when String
|
|
269
|
+
response + self.ask(response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools, previous_response_id: previous_response_id), &block)
|
|
270
|
+
else
|
|
271
|
+
response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools), &block)
|
|
272
|
+
end
|
|
220
273
|
else
|
|
221
274
|
response
|
|
222
275
|
end
|
|
223
276
|
|
|
224
277
|
if return_messages
|
|
225
|
-
res
|
|
278
|
+
if res.last[:role] == :previous_response_id
|
|
279
|
+
res
|
|
280
|
+
else
|
|
281
|
+
res + [previous_response_message]
|
|
282
|
+
end
|
|
226
283
|
else
|
|
227
|
-
res.last['content']
|
|
284
|
+
LLM.purge(res).last['content']
|
|
228
285
|
end
|
|
229
286
|
end
|
|
230
287
|
|
|
231
|
-
end
|
|
232
288
|
|
|
233
|
-
|
|
234
|
-
|
|
289
|
+
def self.image(question, options = {}, &block)
|
|
290
|
+
original_options = options.dup
|
|
235
291
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
292
|
+
messages = LLM.chat(question)
|
|
293
|
+
options = options.merge LLM.options messages
|
|
294
|
+
tools = LLM.tools messages
|
|
295
|
+
associations = LLM.associations messages
|
|
240
296
|
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
297
|
+
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
|
298
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
|
299
|
+
log_errors: true
|
|
244
300
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
301
|
+
if client.nil?
|
|
302
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
303
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
304
|
+
client = LLM::OpenAI.client url, key, log_errors
|
|
305
|
+
end
|
|
250
306
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
307
|
+
if model.nil?
|
|
308
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
309
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-image-1")
|
|
310
|
+
end
|
|
255
311
|
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
if message[:role].to_s == 'tool'
|
|
261
|
-
parameters[:tools] << message[:content]
|
|
262
|
-
else
|
|
312
|
+
messages = self.process_input messages
|
|
313
|
+
input = []
|
|
314
|
+
parameters = {}
|
|
315
|
+
messages.each do |message|
|
|
263
316
|
input << message
|
|
264
317
|
end
|
|
265
|
-
|
|
266
|
-
parameters[:prompt] = LLM.print(input)
|
|
318
|
+
parameters[:prompt] = LLM.print(input)
|
|
267
319
|
|
|
268
|
-
|
|
320
|
+
response = client.images.generate(parameters: parameters)
|
|
269
321
|
|
|
270
|
-
|
|
322
|
+
response
|
|
323
|
+
end
|
|
271
324
|
end
|
|
272
325
|
end
|