scout-ai 0.2.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.vimproject +155 -9
- data/README.md +296 -0
- data/Rakefile +3 -0
- data/VERSION +1 -1
- data/bin/scout-ai +2 -0
- data/doc/Agent.md +279 -0
- data/doc/Chat.md +258 -0
- data/doc/LLM.md +446 -0
- data/doc/Model.md +513 -0
- data/doc/RAG.md +129 -0
- data/lib/scout/llm/agent/chat.rb +74 -0
- data/lib/scout/llm/agent/delegate.rb +39 -0
- data/lib/scout/llm/agent/iterate.rb +44 -0
- data/lib/scout/llm/agent.rb +51 -30
- data/lib/scout/llm/ask.rb +63 -21
- data/lib/scout/llm/backends/anthropic.rb +147 -0
- data/lib/scout/llm/backends/bedrock.rb +129 -0
- data/lib/scout/llm/backends/huggingface.rb +6 -21
- data/lib/scout/llm/backends/ollama.rb +62 -35
- data/lib/scout/llm/backends/openai.rb +77 -33
- data/lib/scout/llm/backends/openwebui.rb +1 -1
- data/lib/scout/llm/backends/relay.rb +3 -2
- data/lib/scout/llm/backends/responses.rb +320 -0
- data/lib/scout/llm/chat.rb +703 -0
- data/lib/scout/llm/embed.rb +4 -4
- data/lib/scout/llm/mcp.rb +28 -0
- data/lib/scout/llm/parse.rb +71 -13
- data/lib/scout/llm/rag.rb +9 -0
- data/lib/scout/llm/tools/call.rb +66 -0
- data/lib/scout/llm/tools/knowledge_base.rb +158 -0
- data/lib/scout/llm/tools/mcp.rb +59 -0
- data/lib/scout/llm/tools/workflow.rb +69 -0
- data/lib/scout/llm/tools.rb +112 -76
- data/lib/scout/llm/utils.rb +17 -10
- data/lib/scout/model/base.rb +19 -0
- data/lib/scout/model/python/base.rb +25 -0
- data/lib/scout/model/python/huggingface/causal/next_token.rb +23 -0
- data/lib/scout/model/python/huggingface/causal.rb +29 -0
- data/lib/scout/model/python/huggingface/classification +0 -0
- data/lib/scout/model/python/huggingface/classification.rb +50 -0
- data/lib/scout/model/python/huggingface.rb +112 -0
- data/lib/scout/model/python/torch/dataloader.rb +57 -0
- data/lib/scout/model/python/torch/helpers.rb +84 -0
- data/lib/scout/model/python/torch/introspection.rb +34 -0
- data/lib/scout/model/python/torch/load_and_save.rb +47 -0
- data/lib/scout/model/python/torch.rb +94 -0
- data/lib/scout/model/util/run.rb +181 -0
- data/lib/scout/model/util/save.rb +81 -0
- data/lib/scout-ai.rb +4 -1
- data/python/scout_ai/__init__.py +35 -0
- data/python/scout_ai/huggingface/data.py +48 -0
- data/python/scout_ai/huggingface/eval.py +60 -0
- data/python/scout_ai/huggingface/model.py +29 -0
- data/python/scout_ai/huggingface/rlhf.py +83 -0
- data/python/scout_ai/huggingface/train/__init__.py +34 -0
- data/python/scout_ai/huggingface/train/next_token.py +315 -0
- data/python/scout_ai/util.py +32 -0
- data/scout-ai.gemspec +143 -0
- data/scout_commands/agent/ask +89 -14
- data/scout_commands/agent/kb +15 -0
- data/scout_commands/documenter +148 -0
- data/scout_commands/llm/ask +71 -12
- data/scout_commands/llm/process +4 -2
- data/scout_commands/llm/server +319 -0
- data/share/server/chat.html +138 -0
- data/share/server/chat.js +468 -0
- data/test/data/cat.jpg +0 -0
- data/test/scout/llm/agent/test_chat.rb +14 -0
- data/test/scout/llm/backends/test_anthropic.rb +134 -0
- data/test/scout/llm/backends/test_bedrock.rb +60 -0
- data/test/scout/llm/backends/test_huggingface.rb +3 -3
- data/test/scout/llm/backends/test_ollama.rb +48 -10
- data/test/scout/llm/backends/test_openai.rb +134 -10
- data/test/scout/llm/backends/test_responses.rb +239 -0
- data/test/scout/llm/test_agent.rb +0 -70
- data/test/scout/llm/test_ask.rb +4 -1
- data/test/scout/llm/test_chat.rb +256 -0
- data/test/scout/llm/test_mcp.rb +29 -0
- data/test/scout/llm/test_parse.rb +81 -2
- data/test/scout/llm/tools/test_call.rb +0 -0
- data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
- data/test/scout/llm/tools/test_mcp.rb +11 -0
- data/test/scout/llm/tools/test_workflow.rb +39 -0
- data/test/scout/model/python/huggingface/causal/test_next_token.rb +59 -0
- data/test/scout/model/python/huggingface/test_causal.rb +33 -0
- data/test/scout/model/python/huggingface/test_classification.rb +30 -0
- data/test/scout/model/python/test_base.rb +44 -0
- data/test/scout/model/python/test_huggingface.rb +9 -0
- data/test/scout/model/python/test_torch.rb +71 -0
- data/test/scout/model/python/torch/test_helpers.rb +14 -0
- data/test/scout/model/test_base.rb +117 -0
- data/test/scout/model/util/test_save.rb +31 -0
- metadata +113 -7
- data/README.rdoc +0 -18
- data/questions/coach +0 -2
|
@@ -1,17 +1,18 @@
|
|
|
1
1
|
require_relative '../parse'
|
|
2
2
|
require_relative '../tools'
|
|
3
|
+
require_relative '../chat'
|
|
3
4
|
|
|
4
5
|
module LLM
|
|
5
6
|
module Huggingface
|
|
6
7
|
|
|
7
8
|
def self.model(model_options)
|
|
8
|
-
require '
|
|
9
|
-
require '
|
|
9
|
+
require 'scout/model/python/huggingface'
|
|
10
|
+
require 'scout/model/python/huggingface/causal'
|
|
10
11
|
|
|
11
12
|
model, task, checkpoint, dir = IndiferentHash.process_options model_options, :model, :task, :checkpoint, :dir
|
|
12
13
|
model ||= Scout::Config.get(:model, :huggingface, env: 'HUGGINGFACE_MODEL,HF_MODEL')
|
|
13
14
|
|
|
14
|
-
|
|
15
|
+
CausalModel.new model, dir, model_options
|
|
15
16
|
end
|
|
16
17
|
|
|
17
18
|
def self.ask(question, options = {}, &block)
|
|
@@ -20,7 +21,7 @@ module LLM
|
|
|
20
21
|
|
|
21
22
|
model = self.model model_options
|
|
22
23
|
|
|
23
|
-
messages = LLM.
|
|
24
|
+
messages = LLM.messages(question)
|
|
24
25
|
|
|
25
26
|
system = []
|
|
26
27
|
prompt = []
|
|
@@ -36,23 +37,7 @@ module LLM
|
|
|
36
37
|
parameters = options.merge(messages: messages)
|
|
37
38
|
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
38
39
|
|
|
39
|
-
|
|
40
|
-
message = response[-1]
|
|
41
|
-
while message["role"] == "assistant" && message["tool_calls"]
|
|
42
|
-
messages << message
|
|
43
|
-
|
|
44
|
-
message["tool_calls"].each do |tool_call|
|
|
45
|
-
response_message = LLM.tool_response(tool_call, &block)
|
|
46
|
-
messages << response_message
|
|
47
|
-
end
|
|
48
|
-
|
|
49
|
-
parameters[:messages] = messages
|
|
50
|
-
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
51
|
-
response = model.eval(parameters)
|
|
52
|
-
message = response[-1]
|
|
53
|
-
end
|
|
54
|
-
|
|
55
|
-
message["content"]
|
|
40
|
+
model.eval(messages)
|
|
56
41
|
end
|
|
57
42
|
|
|
58
43
|
def self.embed(text, options = {})
|
|
@@ -2,6 +2,7 @@ require 'ollama-ai'
|
|
|
2
2
|
require_relative '../parse'
|
|
3
3
|
require_relative '../tools'
|
|
4
4
|
require_relative '../utils'
|
|
5
|
+
require_relative '../chat'
|
|
5
6
|
|
|
6
7
|
module LLM
|
|
7
8
|
module OLlama
|
|
@@ -15,9 +16,32 @@ module LLM
|
|
|
15
16
|
)
|
|
16
17
|
end
|
|
17
18
|
|
|
19
|
+
|
|
20
|
+
def self.process_response(responses, tools, &block)
|
|
21
|
+
responses.collect do |response|
|
|
22
|
+
Log.debug "Respose: #{Log.fingerprint response}"
|
|
23
|
+
|
|
24
|
+
message = response['message']
|
|
25
|
+
tool_calls = response.dig("tool_calls") ||
|
|
26
|
+
response.dig("message", "tool_calls")
|
|
27
|
+
|
|
28
|
+
if tool_calls && tool_calls.any?
|
|
29
|
+
LLM.process_calls tools, tool_calls, &block
|
|
30
|
+
else
|
|
31
|
+
[message]
|
|
32
|
+
end
|
|
33
|
+
end.flatten
|
|
34
|
+
end
|
|
35
|
+
|
|
18
36
|
def self.ask(question, options = {}, &block)
|
|
37
|
+
original_options = options.dup
|
|
19
38
|
|
|
20
|
-
|
|
39
|
+
messages = LLM.chat(question)
|
|
40
|
+
options = options.merge LLM.options messages
|
|
41
|
+
|
|
42
|
+
client, url, key, model, return_messages, format, stream, previous_response_id, tools = IndiferentHash.process_options options,
|
|
43
|
+
:client, :url, :key, :model, :return_messages, :format, :stream, :previous_response_id, :tools,
|
|
44
|
+
stream: false
|
|
21
45
|
|
|
22
46
|
if client.nil?
|
|
23
47
|
url ||= Scout::Config.get(:url, :ollama_ask, :ask, :ollama, env: 'OLLAMA_URL', default: "http://localhost:11434")
|
|
@@ -30,51 +54,54 @@ module LLM
|
|
|
30
54
|
model ||= LLM.get_url_config(:model, url, :ollama_ask, :ask, :ollama, env: 'OLLAMA_MODEL', default: "mistral")
|
|
31
55
|
end
|
|
32
56
|
|
|
33
|
-
mode = IndiferentHash.process_options options, :mode
|
|
34
57
|
|
|
35
|
-
|
|
58
|
+
case format.to_sym
|
|
59
|
+
when :json, :json_object
|
|
60
|
+
options[:response_format] = {type: 'json_object'}
|
|
61
|
+
else
|
|
62
|
+
options[:response_format] = {type: format}
|
|
63
|
+
end if format
|
|
36
64
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
65
|
+
parameters = options.merge(model: model)
|
|
66
|
+
|
|
67
|
+
# Process tools
|
|
68
|
+
|
|
69
|
+
case tools
|
|
70
|
+
when Array
|
|
71
|
+
tools = tools.inject({}) do |acc,definition|
|
|
72
|
+
IndiferentHash.setup definition
|
|
73
|
+
name = definition.dig('name') || definition.dig('function', 'name')
|
|
74
|
+
acc.merge(name => definition)
|
|
45
75
|
end
|
|
76
|
+
when nil
|
|
77
|
+
tools = {}
|
|
46
78
|
end
|
|
47
79
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
parameters = options.merge(model: model, messages: messages)
|
|
51
|
-
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
80
|
+
tools.merge!(LLM.tools messages)
|
|
81
|
+
tools.merge!(LLM.associations messages)
|
|
52
82
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
while message["role"] == "assistant" && message["tool_calls"]
|
|
57
|
-
messages << message
|
|
83
|
+
if tools.any?
|
|
84
|
+
parameters[:tools] = tools.values.collect{|obj,definition| Hash === obj ? obj : definition}
|
|
85
|
+
end
|
|
58
86
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
end
|
|
87
|
+
Log.low "Calling client with parameters #{Log.fingerprint parameters}\n#{LLM.print messages}"
|
|
88
|
+
|
|
89
|
+
parameters[:messages] = LLM.tools_to_ollama messages
|
|
63
90
|
|
|
64
|
-
|
|
65
|
-
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
66
|
-
response = client.chat(parameters)
|
|
91
|
+
parameters[:stream] = stream
|
|
67
92
|
|
|
68
|
-
|
|
69
|
-
|
|
93
|
+
response = self.process_response client.chat(parameters), tools, &block
|
|
94
|
+
|
|
95
|
+
res = if response.last[:role] == 'function_call_output'
|
|
96
|
+
response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools), &block)
|
|
97
|
+
else
|
|
98
|
+
response
|
|
99
|
+
end
|
|
70
100
|
|
|
71
|
-
|
|
72
|
-
|
|
101
|
+
if return_messages
|
|
102
|
+
res
|
|
73
103
|
else
|
|
74
|
-
|
|
75
|
-
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
76
|
-
response = client.generate(parameters)
|
|
77
|
-
response.collect{|e| e['response']} * ""
|
|
104
|
+
res.last['content']
|
|
78
105
|
end
|
|
79
106
|
end
|
|
80
107
|
|
|
@@ -1,19 +1,51 @@
|
|
|
1
1
|
require 'scout'
|
|
2
2
|
require 'openai'
|
|
3
|
-
require_relative '../
|
|
4
|
-
require_relative '../tools'
|
|
5
|
-
require_relative '../utils'
|
|
3
|
+
require_relative '../chat'
|
|
6
4
|
|
|
7
5
|
module LLM
|
|
8
6
|
module OpenAI
|
|
9
7
|
|
|
10
|
-
def self.client(url, key, log_errors = false)
|
|
11
|
-
|
|
8
|
+
def self.client(url = nil, key = nil, log_errors = false, request_timeout: 1200)
|
|
9
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
10
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
11
|
+
Object::OpenAI::Client.new(access_token:key, log_errors: log_errors, uri_base: url, request_timeout: request_timeout)
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
def self.process_input(messages)
|
|
15
|
+
messages.collect do |message|
|
|
16
|
+
if message[:role] == 'image'
|
|
17
|
+
Log.warn "Endpoint 'openai' does not support images, try 'responses': #{message[:content]}"
|
|
18
|
+
next
|
|
19
|
+
else
|
|
20
|
+
message
|
|
21
|
+
end
|
|
22
|
+
end.flatten.compact
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.process_response(response, tools, &block)
|
|
26
|
+
Log.debug "Respose: #{Log.fingerprint response}"
|
|
27
|
+
raise Exception, response["error"] if response["error"]
|
|
28
|
+
|
|
29
|
+
message = response.dig("choices", 0, "message")
|
|
30
|
+
tool_calls = response.dig("choices", 0, "tool_calls") ||
|
|
31
|
+
response.dig("choices", 0, "message", "tool_calls")
|
|
32
|
+
|
|
33
|
+
if tool_calls && tool_calls.any?
|
|
34
|
+
LLM.process_calls(tools, tool_calls, &block)
|
|
35
|
+
else
|
|
36
|
+
[message]
|
|
37
|
+
end
|
|
12
38
|
end
|
|
13
39
|
|
|
14
40
|
def self.ask(question, options = {}, &block)
|
|
41
|
+
original_options = options.dup
|
|
15
42
|
|
|
16
|
-
|
|
43
|
+
messages = LLM.chat(question)
|
|
44
|
+
options = options.merge LLM.options messages
|
|
45
|
+
|
|
46
|
+
client, url, key, model, log_errors, return_messages, format, tool_choice_next, previous_response_id, tools, = IndiferentHash.process_options options,
|
|
47
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format, :tool_choice_next, :previous_response_id, :tools,
|
|
48
|
+
log_errors: true, tool_choice_next: :none
|
|
17
49
|
|
|
18
50
|
if client.nil?
|
|
19
51
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
@@ -23,45 +55,57 @@ module LLM
|
|
|
23
55
|
|
|
24
56
|
if model.nil?
|
|
25
57
|
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
26
|
-
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-
|
|
58
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
|
27
59
|
end
|
|
28
60
|
|
|
29
|
-
|
|
61
|
+
case format.to_sym
|
|
62
|
+
when :json, :json_object
|
|
63
|
+
options[:response_format] = {type: 'json_object'}
|
|
64
|
+
else
|
|
65
|
+
options[:response_format] = {type: format}
|
|
66
|
+
end if format
|
|
30
67
|
|
|
31
|
-
|
|
68
|
+
parameters = options.merge(model: model)
|
|
32
69
|
|
|
33
|
-
|
|
70
|
+
# Process tools
|
|
34
71
|
|
|
35
|
-
|
|
72
|
+
case tools
|
|
73
|
+
when Array
|
|
74
|
+
tools = tools.inject({}) do |acc,definition|
|
|
75
|
+
IndiferentHash.setup definition
|
|
76
|
+
name = definition.dig('name') || definition.dig('function', 'name')
|
|
77
|
+
acc.merge(name => definition)
|
|
78
|
+
end
|
|
79
|
+
when nil
|
|
80
|
+
tools = {}
|
|
81
|
+
end
|
|
36
82
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
message = response.dig("choices", 0, "message")
|
|
40
|
-
tool_calls = response.dig("choices", 0, "tool_calls") ||
|
|
41
|
-
response.dig("choices", 0, "message", "tool_calls")
|
|
83
|
+
tools.merge!(LLM.tools messages)
|
|
84
|
+
tools.merge!(LLM.associations messages)
|
|
42
85
|
|
|
43
|
-
|
|
86
|
+
if tools.any?
|
|
87
|
+
parameters[:tools] = tools.values.collect{|obj,definition| Hash === obj ? obj : definition}
|
|
88
|
+
end
|
|
44
89
|
|
|
45
|
-
|
|
46
|
-
messages << message
|
|
90
|
+
messages = self.process_input messages
|
|
47
91
|
|
|
48
|
-
|
|
49
|
-
tool_calls.each do |tool_call|
|
|
50
|
-
response_message = LLM.tool_response(tool_call, &block)
|
|
51
|
-
messages << response_message
|
|
52
|
-
end
|
|
92
|
+
Log.low "Calling openai #{url}: #{Log.fingerprint parameters}}"
|
|
53
93
|
|
|
54
|
-
|
|
55
|
-
Log.debug "Calling client with parameters: #{Log.fingerprint parameters}"
|
|
56
|
-
response = client.chat( parameters: parameters)
|
|
57
|
-
Log.debug "Respose: #{Log.fingerprint response}"
|
|
94
|
+
parameters[:messages] = LLM.tools_to_openai messages
|
|
58
95
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
96
|
+
response = self.process_response client.chat(parameters: parameters), tools, &block
|
|
97
|
+
|
|
98
|
+
res = if response.last[:role] == 'function_call_output'
|
|
99
|
+
response + self.ask(messages + response, original_options.merge(tool_choice: tool_choice_next, return_messages: true, tools: tools ), &block)
|
|
100
|
+
else
|
|
101
|
+
response
|
|
102
|
+
end
|
|
63
103
|
|
|
64
|
-
|
|
104
|
+
if return_messages
|
|
105
|
+
res
|
|
106
|
+
else
|
|
107
|
+
res.last['content']
|
|
108
|
+
end
|
|
65
109
|
end
|
|
66
110
|
|
|
67
111
|
def self.embed(text, options = {})
|
|
@@ -31,7 +31,7 @@ module LLM
|
|
|
31
31
|
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENWEBUI_MODEL')
|
|
32
32
|
|
|
33
33
|
role = IndiferentHash.process_options options, :role
|
|
34
|
-
messages = LLM.
|
|
34
|
+
messages = LLM.messages(question, role)
|
|
35
35
|
|
|
36
36
|
parameters = options.merge(model: model, messages: messages)
|
|
37
37
|
|
|
@@ -24,9 +24,10 @@ module LLM
|
|
|
24
24
|
end
|
|
25
25
|
|
|
26
26
|
def self.ask(question, options = {}, &block)
|
|
27
|
-
|
|
27
|
+
server = IndiferentHash.process_options options, :server
|
|
28
|
+
server ||= Scout::Config.get :server, :ask_relay, :relay, :ask, env: 'ASK_ENDPOINT,LLM_ENDPOINT', default: :openai
|
|
28
29
|
|
|
29
|
-
|
|
30
|
+
options[:question] = question
|
|
30
31
|
TmpFile.with_file(options.to_json) do |file|
|
|
31
32
|
id = upload(server, file)
|
|
32
33
|
gather(server, id)
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
require_relative 'openai'
|
|
2
|
+
|
|
3
|
+
module LLM
|
|
4
|
+
module Responses
|
|
5
|
+
def self.encode_image(path)
|
|
6
|
+
path = path.find if Path === path
|
|
7
|
+
file_content = File.binread(path) # Replace with your file name
|
|
8
|
+
|
|
9
|
+
case extension = path.split('.').last.downcase
|
|
10
|
+
when 'jpg', 'jpeg'
|
|
11
|
+
mime = "image/jpeg"
|
|
12
|
+
when 'png'
|
|
13
|
+
mime = "image/png"
|
|
14
|
+
else
|
|
15
|
+
mime = "image/extension"
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
base64_string = Base64.strict_encode64(file_content)
|
|
19
|
+
|
|
20
|
+
"data:#{mime};base64,#{base64_string}"
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def self.encode_pdf(path)
|
|
24
|
+
file_content = File.binread(path) # Replace with your file name
|
|
25
|
+
base64_string = Base64.strict_encode64(file_content)
|
|
26
|
+
|
|
27
|
+
"data:application/pdf;base64,#{base64_string}"
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
#def self.tool_response(tool_call, &block)
|
|
31
|
+
# tool_call_id = tool_call.dig("call_id").sub(/^fc_/, '')
|
|
32
|
+
# function_name = tool_call.dig("function", "name")
|
|
33
|
+
# function_arguments = tool_call.dig("function", "arguments")
|
|
34
|
+
# function_arguments = JSON.parse(function_arguments, { symbolize_names: true }) if String === function_arguments
|
|
35
|
+
# IndiferentHash.setup function_arguments
|
|
36
|
+
# function_response = block.call function_name, function_arguments
|
|
37
|
+
|
|
38
|
+
# content = case function_response
|
|
39
|
+
# when nil
|
|
40
|
+
# "success"
|
|
41
|
+
# else
|
|
42
|
+
# function_response
|
|
43
|
+
# end
|
|
44
|
+
# content = content.to_s if Numeric === content
|
|
45
|
+
#end
|
|
46
|
+
|
|
47
|
+
def self.tools_to_responses(messages)
|
|
48
|
+
messages.collect do |message|
|
|
49
|
+
if message[:role] == 'function_call'
|
|
50
|
+
info = JSON.parse(message[:content])
|
|
51
|
+
IndiferentHash.setup info
|
|
52
|
+
name = info[:name] || IndiferentHash.dig(info,:function, :name)
|
|
53
|
+
IndiferentHash.setup info
|
|
54
|
+
id = info[:id].sub(/^fc_/, '')
|
|
55
|
+
IndiferentHash.setup({
|
|
56
|
+
"type" => "function_call",
|
|
57
|
+
"status" => "completed",
|
|
58
|
+
"name" => name,
|
|
59
|
+
"arguments" => (info[:arguments] || {}).to_json,
|
|
60
|
+
"call_id"=>id,
|
|
61
|
+
})
|
|
62
|
+
elsif message[:role] == 'function_call_output'
|
|
63
|
+
info = JSON.parse(message[:content])
|
|
64
|
+
IndiferentHash.setup info
|
|
65
|
+
id = info[:id].sub(/^fc_/, '')
|
|
66
|
+
{ # append result message
|
|
67
|
+
"type" => "function_call_output",
|
|
68
|
+
"output" => info[:content],
|
|
69
|
+
"call_id"=>id,
|
|
70
|
+
}
|
|
71
|
+
else
|
|
72
|
+
message
|
|
73
|
+
end
|
|
74
|
+
end.flatten
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def self.process_response(response, tools, &block)
|
|
78
|
+
Log.debug "Respose: #{Log.fingerprint response}"
|
|
79
|
+
|
|
80
|
+
response['output'].collect do |output|
|
|
81
|
+
case output['type']
|
|
82
|
+
when 'message'
|
|
83
|
+
output['content'].collect do |content|
|
|
84
|
+
case content['type']
|
|
85
|
+
when 'output_text'
|
|
86
|
+
IndiferentHash.setup({role: 'assistant', content: content['text']})
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
when 'reasoning'
|
|
90
|
+
next
|
|
91
|
+
when 'function_call'
|
|
92
|
+
LLM.process_calls(tools, [output], &block)
|
|
93
|
+
when 'web_search_call'
|
|
94
|
+
next
|
|
95
|
+
else
|
|
96
|
+
eee response
|
|
97
|
+
eee output
|
|
98
|
+
raise
|
|
99
|
+
end
|
|
100
|
+
end.compact.flatten
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
def self.process_input(messages)
|
|
104
|
+
messages = self.tools_to_responses messages
|
|
105
|
+
|
|
106
|
+
messages.collect do |message|
|
|
107
|
+
IndiferentHash.setup(message)
|
|
108
|
+
if message[:role] == 'image'
|
|
109
|
+
path = message[:content]
|
|
110
|
+
path = LLM.find_file path
|
|
111
|
+
if Open.remote?(path)
|
|
112
|
+
{role: :user, content: {type: :input_image, image_url: path }}
|
|
113
|
+
elsif Open.exists?(path)
|
|
114
|
+
path = self.encode_image(path)
|
|
115
|
+
{role: :user, content: [{type: :input_image, image_url: path }]}
|
|
116
|
+
else
|
|
117
|
+
raise
|
|
118
|
+
end
|
|
119
|
+
elsif message[:role] == 'pdf'
|
|
120
|
+
path = original_path = message[:content]
|
|
121
|
+
if Open.remote?(path)
|
|
122
|
+
{role: :user, content: {type: :input_file, file_url: path }}
|
|
123
|
+
elsif Open.exists?(path)
|
|
124
|
+
data = self.encode_pdf(path)
|
|
125
|
+
{role: :user, content: [{type: :input_file, file_data: data, filename: File.basename(path) }]}
|
|
126
|
+
else
|
|
127
|
+
raise
|
|
128
|
+
end
|
|
129
|
+
elsif message[:role] == 'websearch'
|
|
130
|
+
{role: :tool, content: {type: "web_search_preview"} }
|
|
131
|
+
else
|
|
132
|
+
message
|
|
133
|
+
end
|
|
134
|
+
end.flatten
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
def self.process_format(format)
|
|
138
|
+
case format
|
|
139
|
+
when :json, :json_object, "json", "json_object"
|
|
140
|
+
{format: {type: 'json_object'}}
|
|
141
|
+
when String, Symbol
|
|
142
|
+
{format: {type: format}}
|
|
143
|
+
when Hash
|
|
144
|
+
IndiferentHash.setup format
|
|
145
|
+
|
|
146
|
+
if format.include?('format')
|
|
147
|
+
format
|
|
148
|
+
elsif format['type'] == 'json_schema'
|
|
149
|
+
{format: format}
|
|
150
|
+
else
|
|
151
|
+
|
|
152
|
+
if ! format.include?('properties')
|
|
153
|
+
format = IndiferentHash.setup({properties: format})
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
properties = format['properties']
|
|
157
|
+
new_properties = {}
|
|
158
|
+
properties.each do |name,info|
|
|
159
|
+
case info
|
|
160
|
+
when Symbol, String
|
|
161
|
+
new_properties[name] = {type: info}
|
|
162
|
+
when Array
|
|
163
|
+
new_properties[name] = {type: info[0], description: info[1], default: info[2]}
|
|
164
|
+
else
|
|
165
|
+
new_properties[name] = info
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
format['properties'] = new_properties
|
|
169
|
+
|
|
170
|
+
required = format['properties'].reject{|p,i| i[:default] }.collect{|p,i| p }
|
|
171
|
+
|
|
172
|
+
name = format.include?('name') ? format.delete('name') : 'response'
|
|
173
|
+
|
|
174
|
+
format['type'] ||= 'object'
|
|
175
|
+
format[:additionalProperties] = required.empty? ? {type: :string} : false
|
|
176
|
+
format[:required] = required
|
|
177
|
+
{format: {name: name,
|
|
178
|
+
type: "json_schema",
|
|
179
|
+
schema: format,
|
|
180
|
+
}}
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
def self.ask(question, options = {}, &block)
|
|
186
|
+
original_options = options.dup
|
|
187
|
+
|
|
188
|
+
messages = LLM.chat(question)
|
|
189
|
+
options = options.merge LLM.options messages
|
|
190
|
+
|
|
191
|
+
client, url, key, model, log_errors, return_messages, format, websearch, previous_response_id, tools, = IndiferentHash.process_options options,
|
|
192
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format, :websearch, :previous_response_id, :tools,
|
|
193
|
+
log_errors: true
|
|
194
|
+
|
|
195
|
+
reasoning_options = IndiferentHash.pull_keys options, :reasoning
|
|
196
|
+
options[:reasoning] = reasoning_options if reasoning_options.any?
|
|
197
|
+
|
|
198
|
+
text_options = IndiferentHash.pull_keys options, :text
|
|
199
|
+
options[:text] = text_options if text_options.any?
|
|
200
|
+
|
|
201
|
+
if websearch
|
|
202
|
+
messages << {role: 'websearch', content: true}
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
if client.nil?
|
|
206
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
207
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
208
|
+
client = LLM::OpenAI.client url, key, log_errors
|
|
209
|
+
end
|
|
210
|
+
|
|
211
|
+
if model.nil?
|
|
212
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
213
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-4.1")
|
|
214
|
+
end
|
|
215
|
+
|
|
216
|
+
options['text'] = self.process_format format if format
|
|
217
|
+
|
|
218
|
+
parameters = options.merge(model: model)
|
|
219
|
+
|
|
220
|
+
case tools
|
|
221
|
+
when Array
|
|
222
|
+
tools = tools.inject({}) do |acc,definition|
|
|
223
|
+
IndiferentHash.setup definition
|
|
224
|
+
name = definition.dig('name') || definition.dig('function', 'name')
|
|
225
|
+
acc.merge(name => definition)
|
|
226
|
+
end
|
|
227
|
+
when nil
|
|
228
|
+
tools = {}
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
tools.merge!(LLM.tools messages)
|
|
232
|
+
tools.merge!(LLM.associations messages)
|
|
233
|
+
|
|
234
|
+
if tools.any?
|
|
235
|
+
parameters[:tools] = tools.values.collect{|obj,definition| Hash === obj ? obj : definition}
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
parameters['previous_response_id'] = previous_response_id if String === previous_response_id
|
|
239
|
+
Log.low "Calling client with parameters #{Log.fingerprint parameters}\n#{LLM.print messages}"
|
|
240
|
+
|
|
241
|
+
messages = self.process_input messages
|
|
242
|
+
input = []
|
|
243
|
+
messages.each do |message|
|
|
244
|
+
parameters[:tools] ||= []
|
|
245
|
+
if message[:role].to_s == 'tool'
|
|
246
|
+
parameters[:tools] << message[:content]
|
|
247
|
+
else
|
|
248
|
+
input << message
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
parameters[:input] = LLM.tools_to_openai input
|
|
253
|
+
|
|
254
|
+
response = client.responses.create(parameters: parameters)
|
|
255
|
+
|
|
256
|
+
Thread.current["previous_response_id"] = previous_response_id = response['id']
|
|
257
|
+
previous_response_message = {role: :previous_response_id, content: previous_response_id}
|
|
258
|
+
|
|
259
|
+
response = self.process_response response, tools, &block
|
|
260
|
+
|
|
261
|
+
res = if response.last[:role] == 'function_call_output'
|
|
262
|
+
case previous_response_id
|
|
263
|
+
when String
|
|
264
|
+
response + self.ask(response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools, previous_response_id: previous_response_id), &block)
|
|
265
|
+
else
|
|
266
|
+
response + self.ask(messages + response, original_options.except(:tool_choice).merge(return_messages: true, tools: tools), &block)
|
|
267
|
+
end
|
|
268
|
+
else
|
|
269
|
+
response
|
|
270
|
+
end
|
|
271
|
+
|
|
272
|
+
if return_messages
|
|
273
|
+
if res.last[:role] == :previous_response_id
|
|
274
|
+
res
|
|
275
|
+
else
|
|
276
|
+
res + [previous_response_message]
|
|
277
|
+
end
|
|
278
|
+
else
|
|
279
|
+
LLM.purge(res).last['content']
|
|
280
|
+
end
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def self.image(question, options = {}, &block)
|
|
285
|
+
original_options = options.dup
|
|
286
|
+
|
|
287
|
+
messages = LLM.chat(question)
|
|
288
|
+
options = options.merge LLM.options messages
|
|
289
|
+
tools = LLM.tools messages
|
|
290
|
+
associations = LLM.associations messages
|
|
291
|
+
|
|
292
|
+
client, url, key, model, log_errors, return_messages, format = IndiferentHash.process_options options,
|
|
293
|
+
:client, :url, :key, :model, :log_errors, :return_messages, :format,
|
|
294
|
+
log_errors: true
|
|
295
|
+
|
|
296
|
+
if client.nil?
|
|
297
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
298
|
+
key ||= LLM.get_url_config(:key, url, :openai_ask, :ask, :openai, env: 'OPENAI_KEY')
|
|
299
|
+
client = LLM::OpenAI.client url, key, log_errors
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
if model.nil?
|
|
303
|
+
url ||= Scout::Config.get(:url, :openai_ask, :ask, :openai, env: 'OPENAI_URL')
|
|
304
|
+
model ||= LLM.get_url_config(:model, url, :openai_ask, :ask, :openai, env: 'OPENAI_MODEL', default: "gpt-image-1")
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
messages = self.process_input messages
|
|
308
|
+
input = []
|
|
309
|
+
parameters = {}
|
|
310
|
+
messages.each do |message|
|
|
311
|
+
input << message
|
|
312
|
+
end
|
|
313
|
+
parameters[:prompt] = LLM.print(input)
|
|
314
|
+
|
|
315
|
+
response = client.images.generate(parameters: parameters)
|
|
316
|
+
|
|
317
|
+
response
|
|
318
|
+
end
|
|
319
|
+
end
|
|
320
|
+
end
|