scout-ai 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.vimproject +80 -15
- data/README.md +296 -0
- data/Rakefile +2 -0
- data/VERSION +1 -1
- data/doc/Agent.md +279 -0
- data/doc/Chat.md +258 -0
- data/doc/LLM.md +446 -0
- data/doc/Model.md +513 -0
- data/doc/RAG.md +129 -0
- data/lib/scout/llm/agent/chat.rb +51 -1
- data/lib/scout/llm/agent/delegate.rb +39 -0
- data/lib/scout/llm/agent/iterate.rb +44 -0
- data/lib/scout/llm/agent.rb +42 -21
- data/lib/scout/llm/ask.rb +38 -6
- data/lib/scout/llm/backends/anthropic.rb +147 -0
- data/lib/scout/llm/backends/bedrock.rb +1 -1
- data/lib/scout/llm/backends/ollama.rb +23 -29
- data/lib/scout/llm/backends/openai.rb +34 -40
- data/lib/scout/llm/backends/responses.rb +158 -110
- data/lib/scout/llm/chat.rb +250 -94
- data/lib/scout/llm/embed.rb +4 -4
- data/lib/scout/llm/mcp.rb +28 -0
- data/lib/scout/llm/parse.rb +1 -0
- data/lib/scout/llm/rag.rb +9 -0
- data/lib/scout/llm/tools/call.rb +66 -0
- data/lib/scout/llm/tools/knowledge_base.rb +158 -0
- data/lib/scout/llm/tools/mcp.rb +59 -0
- data/lib/scout/llm/tools/workflow.rb +69 -0
- data/lib/scout/llm/tools.rb +58 -143
- data/lib/scout-ai.rb +1 -0
- data/scout-ai.gemspec +31 -18
- data/scout_commands/agent/ask +28 -71
- data/scout_commands/documenter +148 -0
- data/scout_commands/llm/ask +2 -2
- data/scout_commands/llm/server +319 -0
- data/share/server/chat.html +138 -0
- data/share/server/chat.js +468 -0
- data/test/scout/llm/backends/test_anthropic.rb +134 -0
- data/test/scout/llm/backends/test_openai.rb +45 -6
- data/test/scout/llm/backends/test_responses.rb +124 -0
- data/test/scout/llm/test_agent.rb +0 -70
- data/test/scout/llm/test_ask.rb +3 -1
- data/test/scout/llm/test_chat.rb +43 -1
- data/test/scout/llm/test_mcp.rb +29 -0
- data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
- data/test/scout/llm/tools/test_mcp.rb +11 -0
- data/test/scout/llm/tools/test_workflow.rb +39 -0
- metadata +56 -17
- data/README.rdoc +0 -18
- data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
- data/python/scout_ai/atcold/plot_lib.py +0 -141
- data/python/scout_ai/atcold/spiral.py +0 -27
- data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
- data/python/scout_ai/language_model.py +0 -70
- /data/{python/scout_ai/atcold/__init__.py → test/scout/llm/tools/test_call.rb} +0 -0
@@ -2,7 +2,7 @@ require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
|
2
2
|
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
3
|
|
4
4
|
class TestLLMOpenAI < Test::Unit::TestCase
|
5
|
-
def
|
5
|
+
def test_ask
|
6
6
|
prompt =<<-EOF
|
7
7
|
system: you are a coding helper that only write code and comments without formatting so that it can work directly, avoid the initial and end commas ```.
|
8
8
|
user: write a script that sorts files in a directory
|
@@ -21,7 +21,7 @@ Some text
|
|
21
21
|
assert(Float === emb.first)
|
22
22
|
end
|
23
23
|
|
24
|
-
def
|
24
|
+
def test_tool_call_output
|
25
25
|
Log.severity = 0
|
26
26
|
prompt =<<-EOF
|
27
27
|
function_call:
|
@@ -41,7 +41,7 @@ know if you didn't get it.
|
|
41
41
|
ppp LLM::OpenAI.ask prompt, model: 'gpt-4.1-nano'
|
42
42
|
end
|
43
43
|
|
44
|
-
def
|
44
|
+
def test_tool_call_output_2
|
45
45
|
Log.severity = 0
|
46
46
|
prompt =<<-EOF
|
47
47
|
function_call:
|
@@ -59,7 +59,7 @@ should i take an umbrella?
|
|
59
59
|
ppp LLM::OpenAI.ask prompt, model: 'gpt-4.1-nano'
|
60
60
|
end
|
61
61
|
|
62
|
-
def
|
62
|
+
def test_tool_call_output_features
|
63
63
|
Log.severity = 0
|
64
64
|
prompt =<<-EOF
|
65
65
|
function_call:
|
@@ -79,7 +79,7 @@ know if you didn't get it.
|
|
79
79
|
ppp LLM::OpenAI.ask prompt, model: 'gpt-4.1-nano'
|
80
80
|
end
|
81
81
|
|
82
|
-
def
|
82
|
+
def test_tool_call_output_weather
|
83
83
|
Log.severity = 0
|
84
84
|
prompt =<<-EOF
|
85
85
|
function_call:
|
@@ -97,6 +97,45 @@ should i take an umbrella?
|
|
97
97
|
ppp LLM::OpenAI.ask prompt, model: 'gpt-4.1-nano'
|
98
98
|
end
|
99
99
|
|
100
|
+
|
101
|
+
def test_tool_gpt5
|
102
|
+
prompt =<<-EOF
|
103
|
+
user:
|
104
|
+
What is the weather in London. Should I take my umbrella?
|
105
|
+
EOF
|
106
|
+
|
107
|
+
tools = [
|
108
|
+
{
|
109
|
+
"type": "function",
|
110
|
+
"function": {
|
111
|
+
"name": "get_current_temperature",
|
112
|
+
"description": "Get the current temperature and raining conditions for a specific location",
|
113
|
+
"parameters": {
|
114
|
+
"type": "object",
|
115
|
+
"properties": {
|
116
|
+
"location": {
|
117
|
+
"type": "string",
|
118
|
+
"description": "The city and state, e.g., San Francisco, CA"
|
119
|
+
},
|
120
|
+
"unit": {
|
121
|
+
"type": "string",
|
122
|
+
"enum": ["Celsius", "Fahrenheit"],
|
123
|
+
"description": "The temperature unit to use. Infer this from the user's location."
|
124
|
+
}
|
125
|
+
},
|
126
|
+
"required": ["location", "unit"]
|
127
|
+
}
|
128
|
+
}
|
129
|
+
},
|
130
|
+
]
|
131
|
+
|
132
|
+
respose = LLM::OpenAI.ask prompt, tool_choice: 'required', tools: tools, model: "gpt-5", log_errors: true do |name,arguments|
|
133
|
+
"It's 15 degrees and raining."
|
134
|
+
end
|
135
|
+
|
136
|
+
ppp respose
|
137
|
+
end
|
138
|
+
|
100
139
|
def test_tool
|
101
140
|
prompt =<<-EOF
|
102
141
|
user:
|
@@ -136,7 +175,7 @@ What is the weather in London. Should I take my umbrella?
|
|
136
175
|
ppp respose
|
137
176
|
end
|
138
177
|
|
139
|
-
def
|
178
|
+
def test_json_output
|
140
179
|
prompt =<<-EOF
|
141
180
|
system:
|
142
181
|
|
@@ -111,5 +111,129 @@ What other movies have the protagonists of the original gost busters played on,
|
|
111
111
|
sss 0
|
112
112
|
ppp LLM::Responses.ask prompt, format: :json
|
113
113
|
end
|
114
|
+
|
115
|
+
def test_json_format
|
116
|
+
prompt =<<-EOF
|
117
|
+
user:
|
118
|
+
|
119
|
+
What other movies have the protagonists of the original gost busters played on.
|
120
|
+
Name each actor and the top movie they took part of
|
121
|
+
EOF
|
122
|
+
sss 0
|
123
|
+
|
124
|
+
format = {
|
125
|
+
name: 'actors_and_top_movies',
|
126
|
+
type: 'object',
|
127
|
+
properties: {},
|
128
|
+
additionalProperties: {type: :string}
|
129
|
+
}
|
130
|
+
ppp LLM::Responses.ask prompt, format: format
|
131
|
+
end
|
132
|
+
|
133
|
+
def test_json_format_list
|
134
|
+
prompt =<<-EOF
|
135
|
+
user:
|
136
|
+
|
137
|
+
What other movies have the protagonists of the original gost busters played on.
|
138
|
+
Name each actor as keys and the top 3 movies they took part of as values
|
139
|
+
EOF
|
140
|
+
sss 0
|
141
|
+
|
142
|
+
format = {
|
143
|
+
name: 'actors_and_top_movies',
|
144
|
+
type: 'object',
|
145
|
+
properties: {},
|
146
|
+
additionalProperties: {type: :array, items: {type: :string}}
|
147
|
+
}
|
148
|
+
ppp LLM::Responses.ask prompt, format: format
|
149
|
+
end
|
150
|
+
|
151
|
+
def test_json_format_actor_list
|
152
|
+
prompt =<<-EOF
|
153
|
+
user:
|
154
|
+
|
155
|
+
What other movies have the protagonists of the original gost busters played on.
|
156
|
+
Name each actor as keys and the top 3 movies they took part of as values
|
157
|
+
EOF
|
158
|
+
sss 0
|
159
|
+
|
160
|
+
format = {
|
161
|
+
name: 'actors_and_top_movies',
|
162
|
+
type: 'object',
|
163
|
+
properties: {},
|
164
|
+
additionalProperties: false,
|
165
|
+
items: {
|
166
|
+
type: 'object',
|
167
|
+
properties: {
|
168
|
+
name: {type: :string, description: 'actor name'},
|
169
|
+
movies: {type: :array, description: 'list of top 3 movies', items: {type: :string, description: 'movie title plus year in parenthesis'} },
|
170
|
+
additionalProperties: false
|
171
|
+
}
|
172
|
+
}
|
173
|
+
}
|
174
|
+
|
175
|
+
schema = {
|
176
|
+
"type": "object",
|
177
|
+
"properties": {
|
178
|
+
"people": {
|
179
|
+
"type": "array",
|
180
|
+
"items": {
|
181
|
+
"type": "object",
|
182
|
+
"properties": {
|
183
|
+
"name": { "type": "string" },
|
184
|
+
"movies": {
|
185
|
+
"type": "array",
|
186
|
+
"items": { "type": "string" },
|
187
|
+
"minItems": 3,
|
188
|
+
"maxItems": 3
|
189
|
+
}
|
190
|
+
},
|
191
|
+
"required": ["name", "movies"],
|
192
|
+
additionalProperties: false
|
193
|
+
}
|
194
|
+
}
|
195
|
+
},
|
196
|
+
additionalProperties: false,
|
197
|
+
"required": ["people"]
|
198
|
+
}
|
199
|
+
ppp LLM::Responses.ask prompt, format: schema
|
200
|
+
end
|
201
|
+
|
202
|
+
def test_tool_gpt5
|
203
|
+
prompt =<<-EOF
|
204
|
+
user:
|
205
|
+
What is the weather in London. Should I take my umbrella?
|
206
|
+
EOF
|
207
|
+
|
208
|
+
tools = [
|
209
|
+
{
|
210
|
+
"type": "function",
|
211
|
+
"name": "get_current_temperature",
|
212
|
+
"description": "Get the current temperature and raining conditions for a specific location",
|
213
|
+
"parameters": {
|
214
|
+
"type": "object",
|
215
|
+
"properties": {
|
216
|
+
"location": {
|
217
|
+
"type": "string",
|
218
|
+
"description": "The city and state, e.g., San Francisco, CA"
|
219
|
+
},
|
220
|
+
"unit": {
|
221
|
+
"type": "string",
|
222
|
+
"enum": ["Celsius", "Fahrenheit"],
|
223
|
+
"description": "The temperature unit to use. Infer this from the user's location."
|
224
|
+
}
|
225
|
+
},
|
226
|
+
"required": ["location", "unit"]
|
227
|
+
}
|
228
|
+
},
|
229
|
+
]
|
230
|
+
|
231
|
+
sss 0
|
232
|
+
respose = LLM::Responses.ask prompt, tool_choice: 'required', tools: tools, model: "gpt-5", log_errors: true do |name,arguments|
|
233
|
+
"It's 15 degrees and raining."
|
234
|
+
end
|
235
|
+
|
236
|
+
ppp respose
|
237
|
+
end
|
114
238
|
end
|
115
239
|
|
@@ -39,76 +39,6 @@ class TestLLMAgent < Test::Unit::TestCase
|
|
39
39
|
end
|
40
40
|
end
|
41
41
|
|
42
|
-
def test_workflow
|
43
|
-
m = Module.new do
|
44
|
-
extend Workflow
|
45
|
-
self.name = "Registration"
|
46
|
-
|
47
|
-
desc "Register a person"
|
48
|
-
input :name, :string, "Last, first name"
|
49
|
-
input :age, :integer, "Age"
|
50
|
-
input :gender, :select, "Gender", nil, :select_options => %w(male female)
|
51
|
-
task :person => :yaml do
|
52
|
-
iii inputs.to_hash
|
53
|
-
inputs.to_hash
|
54
|
-
end
|
55
|
-
end
|
56
|
-
|
57
|
-
sss 0
|
58
|
-
#ppp LLM.workflow_ask(m, "Register Eduard Smith, a 25 yo male", model: "Meta-Llama-3.3-70B-Instruct")
|
59
|
-
ppp LLM.workflow_ask(m, "Register Eduard Smith, a 25 yo male, using a tool call to the tool provided", backend: 'ollama', model: "llama3")
|
60
|
-
end
|
61
|
-
|
62
|
-
def _test_openai
|
63
|
-
TmpFile.with_dir do |dir|
|
64
|
-
kb = KnowledgeBase.new dir
|
65
|
-
kb.format = {"Person" => "Alias"}
|
66
|
-
kb.register :brothers, datafile_test(:person).brothers, undirected: true
|
67
|
-
kb.register :marriages, datafile_test(:person).marriages, undirected: true, source: "=>Alias", target: "=>Alias"
|
68
|
-
kb.register :parents, datafile_test(:person).parents
|
69
|
-
|
70
|
-
sss 3
|
71
|
-
agent = LLM::Agent.new knowledge_base: kb, model: 'gpt-4o'
|
72
|
-
|
73
|
-
agent.system = ""
|
74
|
-
|
75
|
-
ppp agent.ask "Who is Miguel's brother-in-law"
|
76
|
-
end
|
77
|
-
end
|
78
|
-
|
79
|
-
def _test_argonne
|
80
|
-
TmpFile.with_dir do |dir|
|
81
|
-
kb = KnowledgeBase.new dir
|
82
|
-
kb.format = {"Person" => "Alias"}
|
83
|
-
kb.register :brothers, datafile_test(:person).brothers, undirected: true
|
84
|
-
kb.register :marriages, datafile_test(:person).marriages, undirected: true, source: "=>Alias", target: "=>Alias"
|
85
|
-
kb.register :parents, datafile_test(:person).parents
|
86
|
-
|
87
|
-
agent.system = ""
|
88
|
-
|
89
|
-
ppp agent.ask "Who is Miguel's brother-in-law"
|
90
|
-
end
|
91
|
-
end
|
92
|
-
|
93
|
-
def _test_nvidia
|
94
|
-
TmpFile.with_dir do |dir|
|
95
|
-
kb = KnowledgeBase.new dir
|
96
|
-
kb.format = {"Person" => "Alias"}
|
97
|
-
kb.register :brothers, datafile_test(:person).brothers, undirected: true
|
98
|
-
kb.register :marriages, datafile_test(:person).marriages, undirected: true, source: "=>Alias", target: "=>Alias"
|
99
|
-
kb.register :parents, datafile_test(:person).parents
|
100
|
-
|
101
|
-
sss 0
|
102
|
-
|
103
|
-
ppp LLM::OpenAI.ask "Say Hi", url: "https://integrate.api.nvidia.com/v1", model: "deepseek-ai/deepseek-r1"
|
104
|
-
exit
|
105
|
-
|
106
|
-
|
107
|
-
agent.system = ""
|
108
|
-
|
109
|
-
ppp agent.ask "Who is Miguel's brother-in-law. Make use of the tools using tool_calls"
|
110
|
-
end
|
111
|
-
end
|
112
42
|
|
113
43
|
end
|
114
44
|
|
data/test/scout/llm/test_ask.rb
CHANGED
@@ -38,9 +38,11 @@ user: write a script that sorts files in a directory
|
|
38
38
|
"1 minute"
|
39
39
|
end
|
40
40
|
end
|
41
|
+
export :recipe_steps, :step_time
|
41
42
|
end
|
42
43
|
|
43
|
-
|
44
|
+
sss 0
|
45
|
+
ppp LLM.workflow_ask(m, "How much time does it take to prepare a 'vanilla' cake recipe, use the tools provided to find out")
|
44
46
|
end
|
45
47
|
|
46
48
|
def test_knowledbase
|
data/test/scout/llm/test_chat.rb
CHANGED
@@ -173,7 +173,25 @@ What other movies have the protagonists of the original gost busters played on,
|
|
173
173
|
end
|
174
174
|
end
|
175
175
|
|
176
|
-
def
|
176
|
+
def test_tool
|
177
|
+
require 'scout/llm/ask'
|
178
|
+
|
179
|
+
sss 0
|
180
|
+
question =<<-EOF
|
181
|
+
user:
|
182
|
+
|
183
|
+
Use the provided tool to learn the instructions of baking a tray of muffins. Don't
|
184
|
+
give me your own recipe, return the one provided by the tool
|
185
|
+
|
186
|
+
tool: Baking
|
187
|
+
EOF
|
188
|
+
|
189
|
+
TmpFile.with_file question do |file|
|
190
|
+
ppp LLM.ask file, endpoint: :nano
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
def test_tools_with_task
|
177
195
|
require 'scout/llm/ask'
|
178
196
|
|
179
197
|
question =<<-EOF
|
@@ -210,5 +228,29 @@ association: marriages #{datafile_test(:person).marriages} undirected=true sourc
|
|
210
228
|
ppp LLM.ask file
|
211
229
|
end
|
212
230
|
end
|
231
|
+
|
232
|
+
def test_previous_response
|
233
|
+
require 'scout/llm/ask'
|
234
|
+
sss 0
|
235
|
+
question =<<-EOF
|
236
|
+
user:
|
237
|
+
|
238
|
+
Say hi
|
239
|
+
|
240
|
+
assistant:
|
241
|
+
|
242
|
+
Hi
|
243
|
+
|
244
|
+
previous_response_id: asdfasdfasdfasdf
|
245
|
+
|
246
|
+
Bye
|
247
|
+
|
248
|
+
EOF
|
249
|
+
|
250
|
+
messages = LLM.messages question
|
251
|
+
|
252
|
+
iii messages
|
253
|
+
|
254
|
+
end
|
213
255
|
end
|
214
256
|
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
require "scout-ai"
|
5
|
+
class TestMCP < Test::Unit::TestCase
|
6
|
+
def test_workflow_stdio
|
7
|
+
require "mcp/server/transports/stdio_transport"
|
8
|
+
wf = Module.new do
|
9
|
+
extend Workflow
|
10
|
+
self.name = "TestWorkflow"
|
11
|
+
|
12
|
+
desc "Just say hi to someone"
|
13
|
+
input :name, :string, "Name", nil, required: true
|
14
|
+
task :hi => :string do |name|
|
15
|
+
"Hi #{name}"
|
16
|
+
end
|
17
|
+
|
18
|
+
desc "Just say bye to someone"
|
19
|
+
input :name, :string, "Name", nil, required: true
|
20
|
+
task :bye => :string do |name|
|
21
|
+
"Bye #{name}"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
transport = MCP::Server::Transports::StdioTransport.new(wf.mcp(:hi))
|
26
|
+
transport.open
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
@@ -0,0 +1,22 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestLLMToolKB < Test::Unit::TestCase
|
5
|
+
def test_knowledbase_definition
|
6
|
+
TmpFile.with_dir do |dir|
|
7
|
+
kb = KnowledgeBase.new dir
|
8
|
+
kb.register :brothers, datafile_test(:person).brothers, undirected: true
|
9
|
+
kb.register :parents, datafile_test(:person).parents
|
10
|
+
|
11
|
+
assert_include kb.all_databases, :brothers
|
12
|
+
|
13
|
+
assert_equal Person, kb.target_type(:parents)
|
14
|
+
|
15
|
+
knowledge_base_definition = LLM.knowledge_base_tool_definition(kb)
|
16
|
+
ppp JSON.pretty_generate knowledge_base_definition
|
17
|
+
|
18
|
+
assert_equal ['Isa~Miki', 'Miki~Isa', 'Guille~Clei'], LLM.call_knowledge_base(kb, :brothers, entities: %w(Isa Miki Guille))
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
@@ -0,0 +1,11 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestClass < Test::Unit::TestCase
|
5
|
+
def test_client
|
6
|
+
c = LLM.mcp_tools("https://api.githubcopilot.com/mcp/")
|
7
|
+
assert_include c.keys, "get_me"
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
|
@@ -0,0 +1,39 @@
|
|
1
|
+
require File.expand_path(__FILE__).sub(%r(/test/.*), '/test/test_helper.rb')
|
2
|
+
require File.expand_path(__FILE__).sub(%r(.*/test/), '').sub(/test_(.*)\.rb/,'\1')
|
3
|
+
|
4
|
+
class TestLLMToolWorkflow < Test::Unit::TestCase
|
5
|
+
def test_workflow_definition
|
6
|
+
m = Module.new do
|
7
|
+
extend Workflow
|
8
|
+
self.name = "RecipeWorkflow"
|
9
|
+
|
10
|
+
desc "List the steps to cook a recipe"
|
11
|
+
input :recipe, :string, "Recipe for which to extract steps"
|
12
|
+
task :recipe_steps => :array do |recipe|
|
13
|
+
["prepare batter", "bake"]
|
14
|
+
end
|
15
|
+
|
16
|
+
desc "Calculate time spent in each step of the recipe"
|
17
|
+
input :step, :string, "Cooking step"
|
18
|
+
task :step_time => :string do |step|
|
19
|
+
case step
|
20
|
+
when "prepare batter"
|
21
|
+
"2 hours"
|
22
|
+
when "bake"
|
23
|
+
"30 minutes"
|
24
|
+
else
|
25
|
+
"1 minute"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
LLM.task_tool_definition(m, :recipe_steps)
|
31
|
+
LLM.task_tool_definition(m, :step_time)
|
32
|
+
|
33
|
+
tool_definitions = LLM.workflow_tools(m)
|
34
|
+
ppp JSON.pretty_generate tool_definitions
|
35
|
+
|
36
|
+
assert_equal ["prepare batter", "bake"], LLM.call_workflow(m, :recipe_steps)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: scout-ai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Miguel Vazquez
|
8
8
|
bindir: bin
|
9
9
|
cert_chain: []
|
10
|
-
date:
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: scout-rig
|
@@ -23,6 +23,34 @@ dependencies:
|
|
23
23
|
- - ">="
|
24
24
|
- !ruby/object:Gem::Version
|
25
25
|
version: '0'
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: ruby-openai
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - ">="
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - ">="
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '0'
|
40
|
+
- !ruby/object:Gem::Dependency
|
41
|
+
name: ruby-mcp-client
|
42
|
+
requirement: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - ">="
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '0'
|
47
|
+
type: :runtime
|
48
|
+
prerelease: false
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - ">="
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: '0'
|
26
54
|
description: assorted functionalities to help scouts use AI
|
27
55
|
email: mikisvaz@gmail.com
|
28
56
|
executables:
|
@@ -31,20 +59,28 @@ extensions: []
|
|
31
59
|
extra_rdoc_files:
|
32
60
|
- LICENSE
|
33
61
|
- LICENSE.txt
|
34
|
-
- README.
|
62
|
+
- README.md
|
35
63
|
files:
|
36
64
|
- ".document"
|
37
65
|
- ".vimproject"
|
38
66
|
- LICENSE
|
39
67
|
- LICENSE.txt
|
40
|
-
- README.
|
68
|
+
- README.md
|
41
69
|
- Rakefile
|
42
70
|
- VERSION
|
43
71
|
- bin/scout-ai
|
72
|
+
- doc/Agent.md
|
73
|
+
- doc/Chat.md
|
74
|
+
- doc/LLM.md
|
75
|
+
- doc/Model.md
|
76
|
+
- doc/RAG.md
|
44
77
|
- lib/scout-ai.rb
|
45
78
|
- lib/scout/llm/agent.rb
|
46
79
|
- lib/scout/llm/agent/chat.rb
|
80
|
+
- lib/scout/llm/agent/delegate.rb
|
81
|
+
- lib/scout/llm/agent/iterate.rb
|
47
82
|
- lib/scout/llm/ask.rb
|
83
|
+
- lib/scout/llm/backends/anthropic.rb
|
48
84
|
- lib/scout/llm/backends/bedrock.rb
|
49
85
|
- lib/scout/llm/backends/huggingface.rb
|
50
86
|
- lib/scout/llm/backends/ollama.rb
|
@@ -54,9 +90,14 @@ files:
|
|
54
90
|
- lib/scout/llm/backends/responses.rb
|
55
91
|
- lib/scout/llm/chat.rb
|
56
92
|
- lib/scout/llm/embed.rb
|
93
|
+
- lib/scout/llm/mcp.rb
|
57
94
|
- lib/scout/llm/parse.rb
|
58
95
|
- lib/scout/llm/rag.rb
|
59
96
|
- lib/scout/llm/tools.rb
|
97
|
+
- lib/scout/llm/tools/call.rb
|
98
|
+
- lib/scout/llm/tools/knowledge_base.rb
|
99
|
+
- lib/scout/llm/tools/mcp.rb
|
100
|
+
- lib/scout/llm/tools/workflow.rb
|
60
101
|
- lib/scout/llm/utils.rb
|
61
102
|
- lib/scout/model/base.rb
|
62
103
|
- lib/scout/model/python/base.rb
|
@@ -73,37 +114,30 @@ files:
|
|
73
114
|
- lib/scout/model/util/run.rb
|
74
115
|
- lib/scout/model/util/save.rb
|
75
116
|
- python/scout_ai/__init__.py
|
76
|
-
- python/scout_ai/__pycache__/__init__.cpython-310.pyc
|
77
|
-
- python/scout_ai/__pycache__/__init__.cpython-311.pyc
|
78
|
-
- python/scout_ai/__pycache__/huggingface.cpython-310.pyc
|
79
|
-
- python/scout_ai/__pycache__/huggingface.cpython-311.pyc
|
80
|
-
- python/scout_ai/__pycache__/util.cpython-310.pyc
|
81
|
-
- python/scout_ai/__pycache__/util.cpython-311.pyc
|
82
|
-
- python/scout_ai/atcold/__init__.py
|
83
|
-
- python/scout_ai/atcold/plot_lib.py
|
84
|
-
- python/scout_ai/atcold/spiral.py
|
85
117
|
- python/scout_ai/huggingface/data.py
|
86
118
|
- python/scout_ai/huggingface/eval.py
|
87
119
|
- python/scout_ai/huggingface/model.py
|
88
120
|
- python/scout_ai/huggingface/rlhf.py
|
89
121
|
- python/scout_ai/huggingface/train/__init__.py
|
90
|
-
- python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc
|
91
|
-
- python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc
|
92
122
|
- python/scout_ai/huggingface/train/next_token.py
|
93
|
-
- python/scout_ai/language_model.py
|
94
123
|
- python/scout_ai/util.py
|
95
124
|
- scout-ai.gemspec
|
96
125
|
- scout_commands/agent/ask
|
97
126
|
- scout_commands/agent/kb
|
127
|
+
- scout_commands/documenter
|
98
128
|
- scout_commands/llm/ask
|
99
129
|
- scout_commands/llm/process
|
130
|
+
- scout_commands/llm/server
|
100
131
|
- scout_commands/llm/template
|
132
|
+
- share/server/chat.html
|
133
|
+
- share/server/chat.js
|
101
134
|
- test/data/cat.jpg
|
102
135
|
- test/data/person/brothers
|
103
136
|
- test/data/person/identifiers
|
104
137
|
- test/data/person/marriages
|
105
138
|
- test/data/person/parents
|
106
139
|
- test/scout/llm/agent/test_chat.rb
|
140
|
+
- test/scout/llm/backends/test_anthropic.rb
|
107
141
|
- test/scout/llm/backends/test_bedrock.rb
|
108
142
|
- test/scout/llm/backends/test_huggingface.rb
|
109
143
|
- test/scout/llm/backends/test_ollama.rb
|
@@ -115,10 +149,15 @@ files:
|
|
115
149
|
- test/scout/llm/test_ask.rb
|
116
150
|
- test/scout/llm/test_chat.rb
|
117
151
|
- test/scout/llm/test_embed.rb
|
152
|
+
- test/scout/llm/test_mcp.rb
|
118
153
|
- test/scout/llm/test_parse.rb
|
119
154
|
- test/scout/llm/test_rag.rb
|
120
155
|
- test/scout/llm/test_tools.rb
|
121
156
|
- test/scout/llm/test_utils.rb
|
157
|
+
- test/scout/llm/tools/test_call.rb
|
158
|
+
- test/scout/llm/tools/test_knowledge_base.rb
|
159
|
+
- test/scout/llm/tools/test_mcp.rb
|
160
|
+
- test/scout/llm/tools/test_workflow.rb
|
122
161
|
- test/scout/model/python/huggingface/causal/test_next_token.rb
|
123
162
|
- test/scout/model/python/huggingface/test_causal.rb
|
124
163
|
- test/scout/model/python/huggingface/test_classification.rb
|
@@ -147,7 +186,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
147
186
|
- !ruby/object:Gem::Version
|
148
187
|
version: '0'
|
149
188
|
requirements: []
|
150
|
-
rubygems_version: 3.
|
189
|
+
rubygems_version: 3.7.0.dev
|
151
190
|
specification_version: 4
|
152
191
|
summary: AI gear for scouts
|
153
192
|
test_files: []
|
data/README.rdoc
DELETED
@@ -1,18 +0,0 @@
|
|
1
|
-
= scout-ai
|
2
|
-
|
3
|
-
Description goes here.
|
4
|
-
|
5
|
-
== Contributing to scout-ai
|
6
|
-
|
7
|
-
* Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet.
|
8
|
-
* Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it.
|
9
|
-
* Fork the project.
|
10
|
-
* Start a feature/bugfix branch.
|
11
|
-
* Commit and push until you are happy with your contribution.
|
12
|
-
* Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
|
13
|
-
* Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
|
14
|
-
|
15
|
-
== Copyright
|
16
|
-
|
17
|
-
Copyright (c) 2025 Miguel Vazquez. See LICENSE.txt for
|
18
|
-
further details.
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|