scout-ai 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. checksums.yaml +4 -4
  2. data/.vimproject +87 -15
  3. data/README.md +296 -0
  4. data/Rakefile +2 -0
  5. data/VERSION +1 -1
  6. data/doc/Agent.md +279 -0
  7. data/doc/Chat.md +258 -0
  8. data/doc/LLM.md +446 -0
  9. data/doc/Model.md +513 -0
  10. data/doc/RAG.md +129 -0
  11. data/lib/scout/llm/agent/chat.rb +48 -1
  12. data/lib/scout/llm/agent/delegate.rb +51 -0
  13. data/lib/scout/llm/agent/iterate.rb +44 -0
  14. data/lib/scout/llm/agent.rb +43 -22
  15. data/lib/scout/llm/ask.rb +47 -7
  16. data/lib/scout/llm/backends/anthropic.rb +147 -0
  17. data/lib/scout/llm/backends/bedrock.rb +1 -1
  18. data/lib/scout/llm/backends/ollama.rb +27 -30
  19. data/lib/scout/llm/backends/openai.rb +36 -41
  20. data/lib/scout/llm/backends/responses.rb +166 -113
  21. data/lib/scout/llm/chat.rb +270 -102
  22. data/lib/scout/llm/embed.rb +4 -4
  23. data/lib/scout/llm/mcp.rb +28 -0
  24. data/lib/scout/llm/parse.rb +1 -0
  25. data/lib/scout/llm/rag.rb +9 -0
  26. data/lib/scout/llm/tools/call.rb +76 -0
  27. data/lib/scout/llm/tools/knowledge_base.rb +159 -0
  28. data/lib/scout/llm/tools/mcp.rb +59 -0
  29. data/lib/scout/llm/tools/workflow.rb +106 -0
  30. data/lib/scout/llm/tools.rb +98 -141
  31. data/lib/scout-ai.rb +1 -0
  32. data/scout-ai.gemspec +31 -18
  33. data/scout_commands/agent/ask +59 -78
  34. data/scout_commands/documenter +148 -0
  35. data/scout_commands/llm/ask +3 -2
  36. data/scout_commands/llm/server +319 -0
  37. data/share/server/chat.html +138 -0
  38. data/share/server/chat.js +468 -0
  39. data/test/scout/llm/backends/test_anthropic.rb +134 -0
  40. data/test/scout/llm/backends/test_ollama.rb +1 -1
  41. data/test/scout/llm/backends/test_openai.rb +45 -6
  42. data/test/scout/llm/backends/test_responses.rb +124 -0
  43. data/test/scout/llm/test_agent.rb +1 -93
  44. data/test/scout/llm/test_ask.rb +3 -1
  45. data/test/scout/llm/test_chat.rb +43 -1
  46. data/test/scout/llm/test_mcp.rb +29 -0
  47. data/test/scout/llm/tools/test_knowledge_base.rb +22 -0
  48. data/test/scout/llm/tools/test_mcp.rb +11 -0
  49. data/test/scout/llm/tools/test_workflow.rb +39 -0
  50. metadata +56 -17
  51. data/README.rdoc +0 -18
  52. data/python/scout_ai/__pycache__/__init__.cpython-310.pyc +0 -0
  53. data/python/scout_ai/__pycache__/__init__.cpython-311.pyc +0 -0
  54. data/python/scout_ai/__pycache__/huggingface.cpython-310.pyc +0 -0
  55. data/python/scout_ai/__pycache__/huggingface.cpython-311.pyc +0 -0
  56. data/python/scout_ai/__pycache__/util.cpython-310.pyc +0 -0
  57. data/python/scout_ai/__pycache__/util.cpython-311.pyc +0 -0
  58. data/python/scout_ai/atcold/plot_lib.py +0 -141
  59. data/python/scout_ai/atcold/spiral.py +0 -27
  60. data/python/scout_ai/huggingface/train/__pycache__/__init__.cpython-310.pyc +0 -0
  61. data/python/scout_ai/huggingface/train/__pycache__/next_token.cpython-310.pyc +0 -0
  62. data/python/scout_ai/language_model.py +0 -70
  63. /data/{python/scout_ai/atcold/__init__.py → test/scout/llm/tools/test_call.rb} +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1c66f7d74bdf6583e77cb7dd132bfc7d677399cc5ddeb42d59cacdef2c432d72
4
- data.tar.gz: 9dcdc99b701696dd724394124a02bac3bde5706d94d9efad81c0ab11641d4e47
3
+ metadata.gz: cc083461a140b5149d6965f5aa635e651015786cfe312f220a33967fa10c52cb
4
+ data.tar.gz: 694ac0ff0b626d99ee993c77a1dba12022f9e7a545bbab67b94627a4698ade09
5
5
  SHA512:
6
- metadata.gz: a72b0575c3c92eb4dfa413bada73c4448f85434ee99fe446b204b29e789142ac7cdd0fab3f422b994c3eb663c254dbbb097ae2fe0bd27aa7c56815690ee79289
7
- data.tar.gz: 15616e4363983823e17ace7c866f49772e954e75eeae5b6677fb32bda322d9608be508594393cd0e032b0bc1666fe2a561b444a0b2af27360577c86f51f6c028
6
+ metadata.gz: ef373e303af56478ecde91faf87093226936e3537b9cb3346192e230af43cc8350091e589080b7c4e32993bf0cbcc43ef380582a4d38b82ca5da33bc60754dfd
7
+ data.tar.gz: cccb6e93d1b827a02f5b9159a11eb7839c11c48b0cdd5a0096644b19479e0609d9d641efff286a545625fc17863e2f106faa2ea775e7cde121f313260fdef779
data/.vimproject CHANGED
@@ -1,47 +1,97 @@
1
1
  scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.txt *.conf" {
2
2
  Rakefile
3
+ README.md
4
+ LICENSE.txt
3
5
  bin=bin filter="*"{
4
6
  scout-ai
5
7
  }
6
- agents=agents{
7
- analyst.rb
8
- }
9
- chats=chats{
8
+ chats=chats filter="*"{
9
+
10
+ test_tool
11
+
12
+ ask_agent
13
+
14
+ test_ollama_tool
15
+
16
+ test_github
17
+
18
+ test_stdio
19
+
20
+ test_claude
21
+
22
+ mcp_server
23
+
24
+ chat
25
+ text_options
26
+
27
+ multi_agent.rb
28
+
29
+ pdf
30
+ agent
31
+ AGS
32
+ documenter.rb
33
+ genome_editing
34
+ hello
35
+ high_level
36
+ test_chat
10
37
  system=system{
11
38
  scout-ai
12
39
  }
13
40
  doc=doc{
41
+ agent
14
42
  chat
15
- model.analysis
16
43
  model
44
+ model.analysis
17
45
  }
18
46
  develop=develop{
47
+ causalLM
48
+ chat
49
+ digest
50
+ finetuning
51
+ json_format
52
+ model
53
+ rf
19
54
  training=training{
20
- intro
21
55
  basics
56
+ data.tmp
57
+ intro
58
+ python
22
59
  data=data{
23
60
  main
24
61
  next_token
62
+ test.rb
25
63
  }
26
- data.tmp
27
- python
28
64
  }
29
- chat
30
- model
31
- causalLM
32
- rf
33
65
  }
34
66
  refactor=refactor{
35
67
  chat
36
68
  }
37
69
  help=help{
38
- tools
39
- from_pretrained
40
70
  fine-tunning
71
+ from_pretrained
72
+ previous_response
41
73
  }
42
74
  test=test{
75
+ agent_chat.rb
76
+ block
77
+ file
78
+ imports
79
+ imports.rb
80
+ prev
43
81
  rf.rb
82
+ tool
44
83
  }
84
+ web=web{
85
+ chat
86
+ cmd
87
+ web
88
+ }
89
+ }
90
+ doc=doc filter="*"{
91
+ Agent.md
92
+ Chat.md
93
+ LLM.md
94
+ Model.md
45
95
  }
46
96
  lib=lib {
47
97
  scout-ai.rb
@@ -50,12 +100,20 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
50
100
  utils.rb
51
101
  parse.rb
52
102
  tools.rb
103
+ tools=tools{
104
+ mcp.rb
105
+ workflow.rb
106
+ knowledge_base.rb
107
+ call.rb
108
+ }
53
109
  chat.rb
54
110
 
55
111
  backends=backends{
56
112
  openai.rb
57
113
  responses.rb
114
+ anthropic.rb
58
115
  ollama.rb
116
+ bedrock.rb
59
117
  openwebui.rb
60
118
  huggingface.rb
61
119
  relay.rb
@@ -67,7 +125,14 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
67
125
 
68
126
  rag.rb
69
127
 
128
+ mcp.rb
129
+
70
130
  agent.rb
131
+ agent=agent{
132
+ chat.rb
133
+ iterate.rb
134
+ delegate.rb
135
+ }
71
136
  }
72
137
  model=model{
73
138
  util=util{
@@ -114,7 +179,6 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
114
179
  }
115
180
  }
116
181
  }
117
-
118
182
  test=test {
119
183
  data=data filter="*"{
120
184
  person=person{
@@ -129,14 +193,22 @@ scout-ai=$PWD filter="*.rb *.rake Rakefile *.rdoc *.R *.sh *.js *.haml *.sass *.
129
193
  data=data filter="*"{
130
194
  }
131
195
  scout_commands=scout_commands filter="*"{
196
+ documenter
132
197
  llm=llm{
133
198
  ask
134
199
  template
135
200
  process
201
+ server
136
202
  }
137
203
  agent=agent{
138
204
  ask
139
205
  kb
140
206
  }
141
207
  }
208
+ share=share{
209
+ server=server files="*"{
210
+ chat.html
211
+ chat.js
212
+ }
213
+ }
142
214
  }
data/README.md ADDED
@@ -0,0 +1,296 @@
1
+ # scout-ai
2
+
3
+ Agentic AI and machine‑learning for Scout: a compact layer to train/evaluate models (Ruby, Python/PyTorch, Hugging Face), talk to LLMs across multiple backends, wire Workflow tasks as tools, and build persistent, declarative conversations and agents.
4
+
5
+ This package sits on top of the Scout stack:
6
+
7
+ - scout-essentials — low level functionality (Open, TSV, Persist, Path, ConcurrentStream, Log, etc.)
8
+ - scout-gear — core data modules (TSV, KnowledgeBase, Entity, Association, Workflow, WorkQueue, etc.)
9
+ - scout-rig — language bridges (notably Python via PyCall)
10
+ - scout-camp — remote servers, cloud deployments, web interfaces
11
+ - scout-ai — LLMs, agents and model wrappers (this repository)
12
+
13
+ All packages are available under github.com/mikisvaz:
14
+ - https://github.com/mikisvaz/scout-essentials
15
+ - https://github.com/mikisvaz/scout-gear
16
+ - https://github.com/mikisvaz/scout-rig
17
+ - https://github.com/mikisvaz/scout-camp
18
+ - https://github.com/mikisvaz/scout-ai
19
+
20
+ Scout originates from the Rbbt ecosystem (bioinformatics workflows). Numerous end‑to‑end examples live in the Rbbt‑Workflows organization:
21
+ - https://github.com/Rbbt-Workflows
22
+
23
+ The sections below summarize the main components (LLM, Chat, Agent, Model), quick starts, and the command‑line interface. For full APIs, see the doc/ directory.
24
+
25
+ - doc/LLM.md — multi‑backend LLM orchestration, tool calling, embeddings
26
+ - doc/Chat.md — conversation builder/serializer
27
+ - doc/Agent.md — stateful agents wired to Workflows and KnowledgeBases
28
+ - doc/Model.md — model wrappers (ScoutModel, Python/Torch/Hugging Face)
29
+
30
+
31
+ ## Installation and requirements
32
+
33
+ Scout is a Ruby framework. Add scout-ai (and the other packages you need) to your project and require as needed.
34
+
35
+ - Ruby 3.x recommended
36
+ - For Python‑backed models (Torch/Hugging Face):
37
+ - Python 3 (installed and visible in PATH)
38
+ - pycall gem (Ruby ↔ Python bridge)
39
+ - Python packages: torch, transformers, numpy, pandas (as needed)
40
+ - For OpenAI or similar backends: set API keys in environment or config (see LLM backend docs)
41
+
42
+ Typical Gemfile fragment:
43
+ ```ruby
44
+ gem 'scout-essentials', git: 'https://github.com/mikisvaz/scout-essentials'
45
+ gem 'scout-gear', git: 'https://github.com/mikisvaz/scout-gear'
46
+ gem 'scout-rig', git: 'https://github.com/mikisvaz/scout-rig'
47
+ gem 'scout-ai', git: 'https://github.com/mikisvaz/scout-ai'
48
+ ```
49
+
50
+ Backends and endpoints can be configured under Scout.etc.AI/<endpoint>.yaml (merged into asks), or via environment variables per backend (see doc/LLM.md).
51
+
52
+
53
+ ## Quick starts
54
+
55
+ ### Ask a model
56
+
57
+ ```ruby
58
+ require 'scout-ai'
59
+ answer = LLM.ask "What is the capital of France?", backend: :openai, model: "gpt-4.1-mini"
60
+ puts answer
61
+ ```
62
+
63
+ Chat builder:
64
+
65
+ ```ruby
66
+ chat = Chat.setup []
67
+ chat.system "You are a terse assistant"
68
+ chat.user "List three colors"
69
+ puts chat.ask
70
+ ```
71
+
72
+ ### Tool calling with a Workflow
73
+
74
+ Export Workflow tasks as callable tools—let the model call them functionally.
75
+
76
+ ```ruby
77
+ require 'scout-gear' # defines Workflow
78
+
79
+ m = Module.new do
80
+ extend Workflow
81
+ self.name = "Registration"
82
+
83
+ input :name, :string
84
+ input :age, :integer
85
+ input :gender, :select, nil, select_options: %w(male female)
86
+ task :person => :yaml do inputs.to_hash end
87
+ end
88
+
89
+ puts LLM.workflow_ask(m, "Register Eduard Smith, a 25 yo male, using a tool call",
90
+ backend: 'ollama', model: 'llama3')
91
+ ```
92
+
93
+ ### Stateful agent with a KnowledgeBase
94
+
95
+ ```ruby
96
+ require 'scout-gear' # defines KnowledgeBase
97
+
98
+ TmpFile.with_dir do |dir|
99
+ kb = KnowledgeBase.new dir
100
+ kb.register :brothers, datafile_test(:person).brothers, undirected: true
101
+ kb.register :marriages, datafile_test(:person).marriages,
102
+ undirected: true, source: "=>Alias", target: "=>Alias"
103
+ kb.register :parents, datafile_test(:person).parents
104
+
105
+ agent = LLM::Agent.new knowledge_base: kb
106
+ puts agent.ask "Who is Miki's brother in law?"
107
+ end
108
+ ```
109
+
110
+ ### Structured iteration
111
+
112
+ ```ruby
113
+ agent = LLM::Agent.new
114
+ agent.iterate("List three steps to bake bread") { |step| puts "- #{step}" }
115
+
116
+ agent.iterate_dictionary("Give capital cities for FR, ES, IT") do |country, capital|
117
+ puts "#{country}: #{capital}"
118
+ end
119
+ ```
120
+
121
+ ### Use a Hugging Face classifier inside a Workflow
122
+
123
+ From the ExTRI2 workflow (see below):
124
+
125
+ ```ruby
126
+ model = HuggingfaceModel.new 'SequenceClassification', tri_model_dir, nil,
127
+ tokenizer_args: { model_max_length: 512, truncation: true },
128
+ return_logits: true
129
+
130
+ model.extract_features do |_, rows|
131
+ rows.map do |text, tf, tg|
132
+ text.sub("[TF]", "<TF>#{tf}</TF>").sub("[TG]", "<TG>#{tg}</TG>")
133
+ end
134
+ end
135
+
136
+ model.init
137
+ preds = model.eval_list tsv.slice(%w(Text TF Gene)).values
138
+ tsv.add_field "Valid score" do
139
+ non_valid, valid = preds.shift
140
+ Misc.softmax([valid, non_valid]).first rescue 0
141
+ end
142
+ ```
143
+
144
+
145
+ ## Components overview
146
+
147
+ ### LLM (doc/LLM.md)
148
+
149
+ A compact, multi‑backend layer to ask LLMs, wire function‑calling tools, parse/print chats, and compute embeddings.
150
+
151
+ - ask(question, options={}, &block) — normalize a question to messages (LLM.chat), merge endpoint/model/format, run backend, and return assistant output (or messages with return_messages: true)
152
+ - Backends: OpenAI‑style, Responses (multimodal, JSON schema), Ollama, OpenWebUI, AWS Bedrock, and a simple Relay
153
+ - Tools: export Workflow tasks (LLM.workflow_tools) and KnowledgeBase lookups; tool calls are handled via a block
154
+ - Embeddings and a tiny RAG helper
155
+ - Chat/print pipeline: imports, clean, tasks/jobs as function calls, files/directories as tagged content
156
+ - Configuration: endpoint defaults in Scout.etc.AI/endpoint.yaml are merged into options automatically
157
+
158
+ ### Chat (doc/Chat.md)
159
+
160
+ A lightweight builder over an Array of {role:, content:} messages with helpers:
161
+
162
+ - user/system/assistant, file/directory tagging, import/continue
163
+ - tool/workflow task declarations, jobs/inline jobs
164
+ - association declarations (KnowledgeBase)
165
+ - option, endpoint, model, format (including JSON schema requests)
166
+ - ask, chat, json/json_format, print/save/write/write_answer, branch/shed
167
+
168
+ Use Chat to author “chat files” on disk or build conversations programmatically.
169
+
170
+ ### Agent (doc/Agent.md)
171
+
172
+ A thin orchestrator around Chat and LLM that keeps state and injects tools:
173
+
174
+ - Maintains a live conversation (start_chat, start, current_chat)
175
+ - Auto‑exports Workflow tasks and a KnowledgeBase traversal tool
176
+ - ask/chat/json/iterate helpers; structured iteration over lists/dictionaries
177
+ - load_from_path(dir) — bootstrap from a directory containing workflow.rb, knowledge_base, start_chat
178
+
179
+ ### Model (doc/Model.md)
180
+
181
+ A composable framework to wrap models with a consistent API:
182
+
183
+ - ScoutModel — base: define init/eval/eval_list/extract_features/post_process/train; persist behavior and state to a directory
184
+ - PythonModel — initialize and drive a Python class via ScoutPython
185
+ - TorchModel — helpers for PyTorch: training loop, tensors, save/load state, layer introspection
186
+ - HuggingfaceModel — Transformers convenience; specializations:
187
+ - SequenceClassificationModel — text classification, logits→labels
188
+ - CausalModel — chat/causal generation (supports apply_chat_template)
189
+ - NextTokenModel — simple next‑token fine‑tuning loop
190
+
191
+ Pattern:
192
+ - Keep feature extraction separate from evaluation
193
+ - Use eval_list to batch large tables
194
+ - Persist directory state and behavior to reuse
195
+
196
+
197
+ ## Example: ExTRI2 workflow (models in practice)
198
+
199
+ The ExTRI2 Workflow (Rbbt‑Workflows) uses HuggingfaceModel to score TRI sentences and determine Mode of Regulation (MoR):
200
+
201
+ - Feature extraction marks [TF]/[TG] spans as inline tags for the model
202
+ - Batch evaluation over a TSV (“Text”, “TF”, “Gene” columns)
203
+ - Adds fields “Valid score” and “Valid” to the TSV
204
+ - Runs a second SequenceClassification model to produce “MoR” and “MoR scores”
205
+
206
+ See workflows/ExTRI2/workflow.rb in that repository for the full implementation.
207
+
208
+
209
+ ## Command‑Line Interface
210
+
211
+ The bin/scout dispatcher locates scripts under scout_commands across installed packages and workflows using the Path subsystem. Resolution works by adding terms until a file is found to execute:
212
+
213
+ - If the fragment maps to a directory, a listing of available subcommands is shown
214
+ - Scripts can be nested arbitrarily (e.g., agent/kb)
215
+ - Other packages or workflows can define their own scripts under share/scout_commands, and bin/scout will find them
216
+
217
+ ### scout llm …
218
+
219
+ Ask an LLM, manage chat files, run a minimal web UI, or process queued requests. Scripts live under scout_commands/llm.
220
+
221
+ - Ask
222
+ - scout llm ask [options] [question]
223
+ - -t|--template <file_or_key> — load a prompt template; substitutes “???” or appends
224
+ - -c|--chat <chat_file> — load/extend a conversation (appends the reply)
225
+ - -i|--inline <file> — answer “# ask: …” directives inline in a source file
226
+ - -f|--file <file> — prepend file content or substitute where “...” appears
227
+ - -m|--model, -e|--endpoint, -b|--backend — select backend/model; merged with Scout.etc.AI
228
+ - -d|--dry_run — expand and print the conversation (no ask)
229
+
230
+ - Relay processor (for the Relay backend)
231
+ - scout llm process [directory] — watches a queue directory and answers ask JSONs
232
+
233
+ - Web UI server
234
+ - scout llm server — static chat UI over ./chats with a small JSON API
235
+
236
+ - Templates
237
+ - scout llm template — list installed prompt templates (Scout.questions)
238
+
239
+ Run “scout llm” alone to see available subcommands. If you target a directory (e.g., “scout llm”), a help‑like listing is printed.
240
+
241
+ ### scout agent …
242
+
243
+ Stateful agents with Workflow and KnowledgeBase tooled up. Scripts live under scout_commands/agent.
244
+
245
+ - Ask via an Agent
246
+ - scout agent ask [options] [agent_name] [question]
247
+ - -l|--log <level> — set log severity
248
+ - -t|--template <file_or_key>
249
+ - -c|--chat <chat_file>
250
+ - -m|--model, -e|--endpoint
251
+ - -f|--file <path>
252
+ - -wt|--workflow_tasks <comma_list> — export only selected tasks
253
+ - agent_name resolves via Scout.workflows[agent_name] (a workflow) or Scout.chats[agent_name] (an agent directory with workflow.rb/knowledge_base/start_chat)
254
+
255
+ - KnowledgeBase passthrough
256
+ - scout agent kb <agent_name> <kb subcommand...>
257
+ - Loads the agent’s knowledge base and forwards to “scout kb …” (see scout-gear doc/KnowledgeBase.md for kb CLI)
258
+
259
+ As with other Scout CLIs, if you target a directory of commands (e.g., “scout agent”), bin/scout will show the subcommand listing.
260
+
261
+ Note: Workflows also have extensive CLI commands (scout workflow …) for job execution, provenance, orchestration, and queue processing. When you integrate models inside tasks, you drive them through the workflow CLI (see scout-gear doc/Workflow.md).
262
+
263
+
264
+ ## Configuration, persistence and reproducibility
265
+
266
+ - Endpoint presets: place YAML under Scout.etc.AI/<endpoint>.yaml to preconfigure URLs, models, headers, etc.; CLI options and chat inline options override defaults
267
+ - Tool calling: Workflow tasks are exported as JSON schemas per backend; results are serialized back to the model as tool replies
268
+ - Caching: LLM.ask persists responses (by default) using Persist.persist; disable with persist: false
269
+ - Models: pass a directory to persist options/behavior/state (Torch/HF use state files or save_pretrained directories); save/restore to reuse
270
+ - Chats: save printable conversations with Chat#save; reuse with “scout llm ask -c <file>”
271
+
272
+ For Python models, ensure scout-rig (ScoutPython) is installed and Python packages are present. See doc/Python.md in scout-rig for details.
273
+
274
+
275
+ ## Where to go next
276
+
277
+ - Explore the API docs shipped in this repository:
278
+ - doc/LLM.md — orchestration, backends, tools, CLI
279
+ - doc/Chat.md — conversation DSL and file format
280
+ - doc/Agent.md — stateful agents, Workflow/KB wiring, iterate helpers
281
+ - doc/Model.md — model wrappers; ScoutModel, Python/Torch/Hugging Face
282
+
283
+ - Browse real‑world workflows (including ExTRI2) in Rbbt‑Workflows:
284
+ - https://github.com/Rbbt-Workflows
285
+
286
+ - Learn core building blocks (TSV, KnowledgeBase, Workflow, etc.) in scout-gear and scout-essentials:
287
+ - https://github.com/mikisvaz/scout-gear
288
+ - https://github.com/mikisvaz/scout-essentials
289
+
290
+ - Integrate Python with scout-rig:
291
+ - https://github.com/mikisvaz/scout-rig
292
+
293
+
294
+ ## License and contributions
295
+
296
+ Issues and PRs are welcome across the Scout repositories. Please open tickets in the relevant package (e.g., scout-ai for LLM/Agent/Model topics).
data/Rakefile CHANGED
@@ -17,6 +17,8 @@ Juwelier::Tasks.new do |gem|
17
17
 
18
18
  # dependencies defined in Gemfile
19
19
  gem.add_runtime_dependency 'scout-rig', '>= 0'
20
+ gem.add_runtime_dependency 'ruby-openai', '>= 0'
21
+ gem.add_runtime_dependency 'ruby-mcp-client', '>= 0'
20
22
  end
21
23
  Juwelier::RubygemsDotOrgTasks.new
22
24
  require 'rake/testtask'
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.0.0
1
+ 1.1.0