scout-ai 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.vimproject +80 -15
- data/README.md +296 -0
- data/Rakefile +2 -0
- data/VERSION +1 -1
- data/doc/Agent.md +279 -0
- data/doc/Chat.md +258 -0
- data/doc/LLM.md +446 -0
- data/doc/Model.md +513 -0
- data/doc/RAG.md +129 -0
- data/lib/scout/llm/agent/chat.rb +51 -1
- data/lib/scout/llm/agent/delegate.rb +39 -0
- data/lib/scout/llm/agent/iterate.rb +44 -0
- data/lib/scout/llm/agent.rb +42 -21
- data/lib/scout/llm/ask.rb +38 -6
- data/lib/scout/llm/backends/anthropic.rb +147 -0
- data/lib/scout/llm/backends/bedrock.rb +1 -1
- data/lib/scout/llm/backends/ollama.rb +23 -29
- data/lib/scout/llm/backends/openai.rb +34 -40
- data/lib/scout/llm/backends/responses.rb +158 -110
- data/lib/scout/llm/chat.rb +250 -94
- data/lib/scout/llm/embed.rb +4 -4
- data/lib/scout/llm/mcp.rb +28 -0
- data/lib/scout/llm/parse.rb +1 -0
- data/lib/scout/llm/rag.rb +9 -0
- data/lib/scout/llm/tools/call.rb +66 -0
- data/lib/scout/llm/tools/knowledge_base.rb +158 -0
- data/lib/scout/llm/tools/mcp.rb +59 -0
- data/lib/scout/llm/tools/workflow.rb +69 -0
- data/lib/scout/llm/tools.rb +58 -143
- data/lib/scout-ai.rb +1 -0
- data/scout-ai.gemspec +31 -18
- data/scout_commands/agent/ask +28 -71
- data/scout_commands/documenter +148 -0
- data/scout_commands/llm/ask +2 -2
- data/scout_commands/llm/server +319 -0
- data/share/server/chat.html +138 -0
- data/share/server/chat.js +468 -0
- data/test/scout/llm/backends/test_anthropic.rb +134 -0
- data/test/scout/llm/backends/test_openai.rb +45 -6
- data/test/scout/llm/backends/test_responses.rb +124 -0
- data/test/scout/llm/test_agent.rb +0 -70
- data/test/scout/llm/test_ask.rb +3 -1
- data/test/scout/llm/test_chat.rb +43 -1
- data/test/scout/llm/test_mcp.rb +29 -0
- data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
- data/test/scout/llm/tools/test_mcp.rb +11 -0
- data/test/scout/llm/tools/test_workflow.rb +39 -0
- metadata +56 -17
- data/README.rdoc +0 -18
- data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
- data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
- data/python/scout_ai/atcold/plot_lib.py +0 -141
- data/python/scout_ai/atcold/spiral.py +0 -27
- data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
- data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
- data/python/scout_ai/language_model.py +0 -70
- /data/{python/scout_ai/atcold/__init__.py → test/scout/llm/tools/test_call.rb} +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a13a09db3a006c0a8a2c5bc2487d5248c7b599ea9013924ab588d25540e8848d
|
4
|
+
data.tar.gz: 73f87320f05289b9818aef772b205b39745ee5553733664614988e3d45c66c64
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 72bea9d93dc2d374580c763688d9a2b5e7a576ca5806e5169f4f88d0640a7fe29e9a7013cf55c0d89763699c630c662e918ebb9b9033948be6a70d2491eb7575
|
7
|
+
data.tar.gz: dfe39bc0d5f624df7ecced479f379902492088555bd6afbe140e89b94dadaa6e8fed82b8c04d6369ca8f8dac5014b8c3cb0a0d64657f6e5d37b4306435f8bbf1
|
data/.vimproject
CHANGED
@@ -1,47 +1,90 @@
|
|
1
1
|
scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.txt *.conf" {
|
2
2
|
Rakefile
|
3
|
+
README.md
|
4
|
+
LICENSE.txt
|
3
5
|
bin=bin filter="*"{
|
4
6
|
scout-ai
|
5
7
|
}
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
8
|
+
chats=chats filter="*"{
|
9
|
+
test_github
|
10
|
+
|
11
|
+
test_stdio
|
12
|
+
|
13
|
+
test_claude
|
14
|
+
|
15
|
+
mcp_server
|
16
|
+
|
17
|
+
chat
|
18
|
+
text_options
|
19
|
+
|
20
|
+
multi_agent.rb
|
21
|
+
|
22
|
+
pdf
|
23
|
+
agent
|
24
|
+
AGS
|
25
|
+
documenter.rb
|
26
|
+
genome_editing
|
27
|
+
hello
|
28
|
+
high_level
|
29
|
+
test_chat
|
10
30
|
system=system{
|
11
31
|
scout-ai
|
12
32
|
}
|
13
33
|
doc=doc{
|
34
|
+
agent
|
14
35
|
chat
|
15
|
-
model.analysis
|
16
36
|
model
|
37
|
+
model.analysis
|
17
38
|
}
|
18
39
|
develop=develop{
|
40
|
+
causalLM
|
41
|
+
chat
|
42
|
+
digest
|
43
|
+
finetuning
|
44
|
+
json_format
|
45
|
+
model
|
46
|
+
rf
|
19
47
|
training=training{
|
20
|
-
intro
|
21
48
|
basics
|
49
|
+
data.tmp
|
50
|
+
intro
|
51
|
+
python
|
22
52
|
data=data{
|
23
53
|
main
|
24
54
|
next_token
|
55
|
+
test.rb
|
25
56
|
}
|
26
|
-
data.tmp
|
27
|
-
python
|
28
57
|
}
|
29
|
-
chat
|
30
|
-
model
|
31
|
-
causalLM
|
32
|
-
rf
|
33
58
|
}
|
34
59
|
refactor=refactor{
|
35
60
|
chat
|
36
61
|
}
|
37
62
|
help=help{
|
38
|
-
tools
|
39
|
-
from_pretrained
|
40
63
|
fine-tunning
|
64
|
+
from_pretrained
|
65
|
+
previous_response
|
41
66
|
}
|
42
67
|
test=test{
|
68
|
+
agent_chat.rb
|
69
|
+
block
|
70
|
+
file
|
71
|
+
imports
|
72
|
+
imports.rb
|
73
|
+
prev
|
43
74
|
rf.rb
|
75
|
+
tool
|
44
76
|
}
|
77
|
+
web=web{
|
78
|
+
chat
|
79
|
+
cmd
|
80
|
+
web
|
81
|
+
}
|
82
|
+
}
|
83
|
+
doc=doc filter="*"{
|
84
|
+
Agent.md
|
85
|
+
Chat.md
|
86
|
+
LLM.md
|
87
|
+
Model.md
|
45
88
|
}
|
46
89
|
lib=lib {
|
47
90
|
scout-ai.rb
|
@@ -50,12 +93,20 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
|
|
50
93
|
utils.rb
|
51
94
|
parse.rb
|
52
95
|
tools.rb
|
96
|
+
tools=tools{
|
97
|
+
mcp.rb
|
98
|
+
workflow.rb
|
99
|
+
knowledge_base.rb
|
100
|
+
call.rb
|
101
|
+
}
|
53
102
|
chat.rb
|
54
103
|
|
55
104
|
backends=backends{
|
56
105
|
openai.rb
|
106
|
+
anthropic.rb
|
57
107
|
responses.rb
|
58
108
|
ollama.rb
|
109
|
+
bedrock.rb
|
59
110
|
openwebui.rb
|
60
111
|
huggingface.rb
|
61
112
|
relay.rb
|
@@ -67,7 +118,14 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
|
|
67
118
|
|
68
119
|
rag.rb
|
69
120
|
|
121
|
+
mcp.rb
|
122
|
+
|
70
123
|
agent.rb
|
124
|
+
agent=agent{
|
125
|
+
chat.rb
|
126
|
+
iterate.rb
|
127
|
+
delegate.rb
|
128
|
+
}
|
71
129
|
}
|
72
130
|
model=model{
|
73
131
|
util=util{
|
@@ -114,7 +172,6 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
|
|
114
172
|
}
|
115
173
|
}
|
116
174
|
}
|
117
|
-
|
118
175
|
test=test {
|
119
176
|
data=data filter="*"{
|
120
177
|
person=person{
|
@@ -129,14 +186,22 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
|
|
129
186
|
data=data filter="*"{
|
130
187
|
}
|
131
188
|
scout_commands=scout_commands filter="*"{
|
189
|
+
documenter
|
132
190
|
llm=llm{
|
133
191
|
ask
|
134
192
|
template
|
135
193
|
process
|
194
|
+
server
|
136
195
|
}
|
137
196
|
agent=agent{
|
138
197
|
ask
|
139
198
|
kb
|
140
199
|
}
|
141
200
|
}
|
201
|
+
share=share{
|
202
|
+
server=server files="*"{
|
203
|
+
chat.html
|
204
|
+
chat.js
|
205
|
+
}
|
206
|
+
}
|
142
207
|
}
|
data/README.md
ADDED
@@ -0,0 +1,296 @@
|
|
1
|
+
# scout-ai
|
2
|
+
|
3
|
+
Agentic AI and machine‑learning for Scout: a compact layer to train/evaluate models (Ruby, Python/PyTorch, Hugging Face), talk to LLMs across multiple backends, wire Workflow tasks as tools, and build persistent, declarative conversations and agents.
|
4
|
+
|
5
|
+
This package sits on top of the Scout stack:
|
6
|
+
|
7
|
+
- scout-essentials — low level functionality (Open, TSV, Persist, Path, ConcurrentStream, Log, etc.)
|
8
|
+
- scout-gear — core data modules (TSV, KnowledgeBase, Entity, Association, Workflow, WorkQueue, etc.)
|
9
|
+
- scout-rig — language bridges (notably Python via PyCall)
|
10
|
+
- scout-camp — remote servers, cloud deployments, web interfaces
|
11
|
+
- scout-ai — LLMs, agents and model wrappers (this repository)
|
12
|
+
|
13
|
+
All packages are available under github.com/mikisvaz:
|
14
|
+
- https://github.com/mikisvaz/scout-essentials
|
15
|
+
- https://github.com/mikisvaz/scout-gear
|
16
|
+
- https://github.com/mikisvaz/scout-rig
|
17
|
+
- https://github.com/mikisvaz/scout-camp
|
18
|
+
- https://github.com/mikisvaz/scout-ai
|
19
|
+
|
20
|
+
Scout originates from the Rbbt ecosystem (bioinformatics workflows). Numerous end‑to‑end examples live in the Rbbt‑Workflows organization:
|
21
|
+
- https://github.com/Rbbt-Workflows
|
22
|
+
|
23
|
+
The sections below summarize the main components (LLM, Chat, Agent, Model), quick starts, and the command‑line interface. For full APIs, see the doc/ directory.
|
24
|
+
|
25
|
+
- doc/LLM.md — multi‑backend LLM orchestration, tool calling, embeddings
|
26
|
+
- doc/Chat.md — conversation builder/serializer
|
27
|
+
- doc/Agent.md — stateful agents wired to Workflows and KnowledgeBases
|
28
|
+
- doc/Model.md — model wrappers (ScoutModel, Python/Torch/Hugging Face)
|
29
|
+
|
30
|
+
|
31
|
+
## Installation and requirements
|
32
|
+
|
33
|
+
Scout is a Ruby framework. Add scout-ai (and the other packages you need) to your project and require as needed.
|
34
|
+
|
35
|
+
- Ruby 3.x recommended
|
36
|
+
- For Python‑backed models (Torch/Hugging Face):
|
37
|
+
- Python 3 (installed and visible in PATH)
|
38
|
+
- pycall gem (Ruby ↔ Python bridge)
|
39
|
+
- Python packages: torch, transformers, numpy, pandas (as needed)
|
40
|
+
- For OpenAI or similar backends: set API keys in environment or config (see LLM backend docs)
|
41
|
+
|
42
|
+
Typical Gemfile fragment:
|
43
|
+
```ruby
|
44
|
+
gem 'scout-essentials', git: 'https://github.com/mikisvaz/scout-essentials'
|
45
|
+
gem 'scout-gear', git: 'https://github.com/mikisvaz/scout-gear'
|
46
|
+
gem 'scout-rig', git: 'https://github.com/mikisvaz/scout-rig'
|
47
|
+
gem 'scout-ai', git: 'https://github.com/mikisvaz/scout-ai'
|
48
|
+
```
|
49
|
+
|
50
|
+
Backends and endpoints can be configured under Scout.etc.AI/<endpoint>.yaml (merged into asks), or via environment variables per backend (see doc/LLM.md).
|
51
|
+
|
52
|
+
|
53
|
+
## Quick starts
|
54
|
+
|
55
|
+
### Ask a model
|
56
|
+
|
57
|
+
```ruby
|
58
|
+
require 'scout-ai'
|
59
|
+
answer = LLM.ask "What is the capital of France?", backend: :openai, model: "gpt-4.1-mini"
|
60
|
+
puts answer
|
61
|
+
```
|
62
|
+
|
63
|
+
Chat builder:
|
64
|
+
|
65
|
+
```ruby
|
66
|
+
chat = Chat.setup []
|
67
|
+
chat.system "You are a terse assistant"
|
68
|
+
chat.user "List three colors"
|
69
|
+
puts chat.ask
|
70
|
+
```
|
71
|
+
|
72
|
+
### Tool calling with a Workflow
|
73
|
+
|
74
|
+
Export Workflow tasks as callable tools—let the model call them functionally.
|
75
|
+
|
76
|
+
```ruby
|
77
|
+
require 'scout-gear' # defines Workflow
|
78
|
+
|
79
|
+
m = Module.new do
|
80
|
+
extend Workflow
|
81
|
+
self.name = "Registration"
|
82
|
+
|
83
|
+
input :name, :string
|
84
|
+
input :age, :integer
|
85
|
+
input :gender, :select, nil, select_options: %w(male female)
|
86
|
+
task :person => :yaml do inputs.to_hash end
|
87
|
+
end
|
88
|
+
|
89
|
+
puts LLM.workflow_ask(m, "Register Eduard Smith, a 25 yo male, using a tool call",
|
90
|
+
backend: 'ollama', model: 'llama3')
|
91
|
+
```
|
92
|
+
|
93
|
+
### Stateful agent with a KnowledgeBase
|
94
|
+
|
95
|
+
```ruby
|
96
|
+
require 'scout-gear' # defines KnowledgeBase
|
97
|
+
|
98
|
+
TmpFile.with_dir do |dir|
|
99
|
+
kb = KnowledgeBase.new dir
|
100
|
+
kb.register :brothers, datafile_test(:person).brothers, undirected: true
|
101
|
+
kb.register :marriages, datafile_test(:person).marriages,
|
102
|
+
undirected: true, source: "=>Alias", target: "=>Alias"
|
103
|
+
kb.register :parents, datafile_test(:person).parents
|
104
|
+
|
105
|
+
agent = LLM::Agent.new knowledge_base: kb
|
106
|
+
puts agent.ask "Who is Miki's brother in law?"
|
107
|
+
end
|
108
|
+
```
|
109
|
+
|
110
|
+
### Structured iteration
|
111
|
+
|
112
|
+
```ruby
|
113
|
+
agent = LLM::Agent.new
|
114
|
+
agent.iterate("List three steps to bake bread") { |step| puts "- #{step}" }
|
115
|
+
|
116
|
+
agent.iterate_dictionary("Give capital cities for FR, ES, IT") do |country, capital|
|
117
|
+
puts "#{country}: #{capital}"
|
118
|
+
end
|
119
|
+
```
|
120
|
+
|
121
|
+
### Use a Hugging Face classifier inside a Workflow
|
122
|
+
|
123
|
+
From the ExTRI2 workflow (see below):
|
124
|
+
|
125
|
+
```ruby
|
126
|
+
model = HuggingfaceModel.new 'SequenceClassification', tri_model_dir, nil,
|
127
|
+
tokenizer_args: { model_max_length: 512, truncation: true },
|
128
|
+
return_logits: true
|
129
|
+
|
130
|
+
model.extract_features do |_, rows|
|
131
|
+
rows.map do |text, tf, tg|
|
132
|
+
text.sub("[TF]", "<TF>#{tf}</TF>").sub("[TG]", "<TG>#{tg}</TG>")
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
model.init
|
137
|
+
preds = model.eval_list tsv.slice(%w(Text TF Gene)).values
|
138
|
+
tsv.add_field "Valid score" do
|
139
|
+
non_valid, valid = preds.shift
|
140
|
+
Misc.softmax([valid, non_valid]).first rescue 0
|
141
|
+
end
|
142
|
+
```
|
143
|
+
|
144
|
+
|
145
|
+
## Components overview
|
146
|
+
|
147
|
+
### LLM (doc/LLM.md)
|
148
|
+
|
149
|
+
A compact, multi‑backend layer to ask LLMs, wire function‑calling tools, parse/print chats, and compute embeddings.
|
150
|
+
|
151
|
+
- ask(question, options={}, &block) — normalize a question to messages (LLM.chat), merge endpoint/model/format, run backend, and return assistant output (or messages with return_messages: true)
|
152
|
+
- Backends: OpenAI‑style, Responses (multimodal, JSON schema), Ollama, OpenWebUI, AWS Bedrock, and a simple Relay
|
153
|
+
- Tools: export Workflow tasks (LLM.workflow_tools) and KnowledgeBase lookups; tool calls are handled via a block
|
154
|
+
- Embeddings and a tiny RAG helper
|
155
|
+
- Chat/print pipeline: imports, clean, tasks/jobs as function calls, files/directories as tagged content
|
156
|
+
- Configuration: endpoint defaults in Scout.etc.AI/endpoint.yaml are merged into options automatically
|
157
|
+
|
158
|
+
### Chat (doc/Chat.md)
|
159
|
+
|
160
|
+
A lightweight builder over an Array of {role:, content:} messages with helpers:
|
161
|
+
|
162
|
+
- user/system/assistant, file/directory tagging, import/continue
|
163
|
+
- tool/workflow task declarations, jobs/inline jobs
|
164
|
+
- association declarations (KnowledgeBase)
|
165
|
+
- option, endpoint, model, format (including JSON schema requests)
|
166
|
+
- ask, chat, json/json_format, print/save/write/write_answer, branch/shed
|
167
|
+
|
168
|
+
Use Chat to author “chat files” on disk or build conversations programmatically.
|
169
|
+
|
170
|
+
### Agent (doc/Agent.md)
|
171
|
+
|
172
|
+
A thin orchestrator around Chat and LLM that keeps state and injects tools:
|
173
|
+
|
174
|
+
- Maintains a live conversation (start_chat, start, current_chat)
|
175
|
+
- Auto‑exports Workflow tasks and a KnowledgeBase traversal tool
|
176
|
+
- ask/chat/json/iterate helpers; structured iteration over lists/dictionaries
|
177
|
+
- load_from_path(dir) — bootstrap from a directory containing workflow.rb, knowledge_base, start_chat
|
178
|
+
|
179
|
+
### Model (doc/Model.md)
|
180
|
+
|
181
|
+
A composable framework to wrap models with a consistent API:
|
182
|
+
|
183
|
+
- ScoutModel — base: define init/eval/eval_list/extract_features/post_process/train; persist behavior and state to a directory
|
184
|
+
- PythonModel — initialize and drive a Python class via ScoutPython
|
185
|
+
- TorchModel — helpers for PyTorch: training loop, tensors, save/load state, layer introspection
|
186
|
+
- HuggingfaceModel — Transformers convenience; specializations:
|
187
|
+
- SequenceClassificationModel — text classification, logits→labels
|
188
|
+
- CausalModel — chat/causal generation (supports apply_chat_template)
|
189
|
+
- NextTokenModel — simple next‑token fine‑tuning loop
|
190
|
+
|
191
|
+
Pattern:
|
192
|
+
- Keep feature extraction separate from evaluation
|
193
|
+
- Use eval_list to batch large tables
|
194
|
+
- Persist directory state and behavior to reuse
|
195
|
+
|
196
|
+
|
197
|
+
## Example: ExTRI2 workflow (models in practice)
|
198
|
+
|
199
|
+
The ExTRI2 Workflow (Rbbt‑Workflows) uses HuggingfaceModel to score TRI sentences and determine Mode of Regulation (MoR):
|
200
|
+
|
201
|
+
- Feature extraction marks [TF]/[TG] spans as inline tags for the model
|
202
|
+
- Batch evaluation over a TSV (“Text”, “TF”, “Gene” columns)
|
203
|
+
- Adds fields “Valid score” and “Valid” to the TSV
|
204
|
+
- Runs a second SequenceClassification model to produce “MoR” and “MoR scores”
|
205
|
+
|
206
|
+
See workflows/ExTRI2/workflow.rb in that repository for the full implementation.
|
207
|
+
|
208
|
+
|
209
|
+
## Command‑Line Interface
|
210
|
+
|
211
|
+
The bin/scout dispatcher locates scripts under scout_commands across installed packages and workflows using the Path subsystem. Resolution works by adding terms until a file is found to execute:
|
212
|
+
|
213
|
+
- If the fragment maps to a directory, a listing of available subcommands is shown
|
214
|
+
- Scripts can be nested arbitrarily (e.g., agent/kb)
|
215
|
+
- Other packages or workflows can define their own scripts under share/scout_commands, and bin/scout will find them
|
216
|
+
|
217
|
+
### scout llm …
|
218
|
+
|
219
|
+
Ask an LLM, manage chat files, run a minimal web UI, or process queued requests. Scripts live under scout_commands/llm.
|
220
|
+
|
221
|
+
- Ask
|
222
|
+
- scout llm ask [options] [question]
|
223
|
+
- -t|--template <file_or_key> — load a prompt template; substitutes “???” or appends
|
224
|
+
- -c|--chat <chat_file> — load/extend a conversation (appends the reply)
|
225
|
+
- -i|--inline <file> — answer “# ask: …” directives inline in a source file
|
226
|
+
- -f|--file <file> — prepend file content or substitute where “...” appears
|
227
|
+
- -m|--model, -e|--endpoint, -b|--backend — select backend/model; merged with Scout.etc.AI
|
228
|
+
- -d|--dry_run — expand and print the conversation (no ask)
|
229
|
+
|
230
|
+
- Relay processor (for the Relay backend)
|
231
|
+
- scout llm process [directory] — watches a queue directory and answers ask JSONs
|
232
|
+
|
233
|
+
- Web UI server
|
234
|
+
- scout llm server — static chat UI over ./chats with a small JSON API
|
235
|
+
|
236
|
+
- Templates
|
237
|
+
- scout llm template — list installed prompt templates (Scout.questions)
|
238
|
+
|
239
|
+
Run “scout llm” alone to see available subcommands. If you target a directory (e.g., “scout llm”), a help‑like listing is printed.
|
240
|
+
|
241
|
+
### scout agent …
|
242
|
+
|
243
|
+
Stateful agents with Workflow and KnowledgeBase tooled up. Scripts live under scout_commands/agent.
|
244
|
+
|
245
|
+
- Ask via an Agent
|
246
|
+
- scout agent ask [options] [agent_name] [question]
|
247
|
+
- -l|--log <level> — set log severity
|
248
|
+
- -t|--template <file_or_key>
|
249
|
+
- -c|--chat <chat_file>
|
250
|
+
- -m|--model, -e|--endpoint
|
251
|
+
- -f|--file <path>
|
252
|
+
- -wt|--workflow_tasks <comma_list> — export only selected tasks
|
253
|
+
- agent_name resolves via Scout.workflows[agent_name] (a workflow) or Scout.chats[agent_name] (an agent directory with workflow.rb/knowledge_base/start_chat)
|
254
|
+
|
255
|
+
- KnowledgeBase passthrough
|
256
|
+
- scout agent kb <agent_name> <kb subcommand...>
|
257
|
+
- Loads the agent’s knowledge base and forwards to “scout kb …” (see scout-gear doc/KnowledgeBase.md for kb CLI)
|
258
|
+
|
259
|
+
As with other Scout CLIs, if you target a directory of commands (e.g., “scout agent”), bin/scout will show the subcommand listing.
|
260
|
+
|
261
|
+
Note: Workflows also have extensive CLI commands (scout workflow …) for job execution, provenance, orchestration, and queue processing. When you integrate models inside tasks, you drive them through the workflow CLI (see scout-gear doc/Workflow.md).
|
262
|
+
|
263
|
+
|
264
|
+
## Configuration, persistence and reproducibility
|
265
|
+
|
266
|
+
- Endpoint presets: place YAML under Scout.etc.AI/<endpoint>.yaml to preconfigure URLs, models, headers, etc.; CLI options and chat inline options override defaults
|
267
|
+
- Tool calling: Workflow tasks are exported as JSON schemas per backend; results are serialized back to the model as tool replies
|
268
|
+
- Caching: LLM.ask persists responses (by default) using Persist.persist; disable with persist: false
|
269
|
+
- Models: pass a directory to persist options/behavior/state (Torch/HF use state files or save_pretrained directories); save/restore to reuse
|
270
|
+
- Chats: save printable conversations with Chat#save; reuse with “scout llm ask -c <file>”
|
271
|
+
|
272
|
+
For Python models, ensure scout-rig (ScoutPython) is installed and Python packages are present. See doc/Python.md in scout-rig for details.
|
273
|
+
|
274
|
+
|
275
|
+
## Where to go next
|
276
|
+
|
277
|
+
- Explore the API docs shipped in this repository:
|
278
|
+
- doc/LLM.md — orchestration, backends, tools, CLI
|
279
|
+
- doc/Chat.md — conversation DSL and file format
|
280
|
+
- doc/Agent.md — stateful agents, Workflow/KB wiring, iterate helpers
|
281
|
+
- doc/Model.md — model wrappers; ScoutModel, Python/Torch/Hugging Face
|
282
|
+
|
283
|
+
- Browse real‑world workflows (including ExTRI2) in Rbbt‑Workflows:
|
284
|
+
- https://github.com/Rbbt-Workflows
|
285
|
+
|
286
|
+
- Learn core building blocks (TSV, KnowledgeBase, Workflow, etc.) in scout-gear and scout-essentials:
|
287
|
+
- https://github.com/mikisvaz/scout-gear
|
288
|
+
- https://github.com/mikisvaz/scout-essentials
|
289
|
+
|
290
|
+
- Integrate Python with scout-rig:
|
291
|
+
- https://github.com/mikisvaz/scout-rig
|
292
|
+
|
293
|
+
|
294
|
+
## License and contributions
|
295
|
+
|
296
|
+
Issues and PRs are welcome across the Scout repositories. Please open tickets in the relevant package (e.g., scout-ai for LLM/Agent/Model topics).
|
data/Rakefile
CHANGED
@@ -17,6 +17,8 @@ Juwelier::Tasks.new do |gem|
|
|
17
17
|
|
18
18
|
# dependencies defined in Gemfile
|
19
19
|
gem.add_runtime_dependency 'scout-rig', '>= 0'
|
20
|
+
gem.add_runtime_dependency 'ruby-openai', '>= 0'
|
21
|
+
gem.add_runtime_dependency 'ruby-mcp-client', '>= 0'
|
20
22
|
end
|
21
23
|
Juwelier::RubygemsDotOrgTasks.new
|
22
24
|
require 'rake/testtask'
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
1.0.
|
1
|
+
1.0.1
|