robot_lab 0.0.1 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/deploy-github-pages.yml +9 -9
- data/.irbrc +6 -0
- data/CHANGELOG.md +140 -0
- data/README.md +263 -48
- data/Rakefile +71 -1
- data/docs/api/core/index.md +53 -46
- data/docs/api/core/memory.md +200 -154
- data/docs/api/core/network.md +13 -3
- data/docs/api/core/robot.md +490 -130
- data/docs/api/core/state.md +55 -73
- data/docs/api/core/tool.md +205 -209
- data/docs/api/index.md +7 -28
- data/docs/api/mcp/client.md +119 -48
- data/docs/api/mcp/index.md +75 -60
- data/docs/api/mcp/server.md +120 -136
- data/docs/api/mcp/transports.md +172 -184
- data/docs/api/messages/index.md +35 -20
- data/docs/api/messages/text-message.md +67 -21
- data/docs/api/messages/tool-call-message.md +80 -41
- data/docs/api/messages/tool-result-message.md +119 -50
- data/docs/api/messages/user-message.md +48 -24
- data/docs/api/streaming/context.md +157 -74
- data/docs/api/streaming/events.md +114 -166
- data/docs/api/streaming/index.md +74 -72
- data/docs/architecture/core-concepts.md +360 -116
- data/docs/architecture/index.md +97 -59
- data/docs/architecture/message-flow.md +138 -129
- data/docs/architecture/network-orchestration.md +197 -50
- data/docs/architecture/robot-execution.md +199 -146
- data/docs/architecture/state-management.md +255 -187
- data/docs/concepts.md +311 -49
- data/docs/examples/basic-chat.md +89 -77
- data/docs/examples/index.md +222 -47
- data/docs/examples/mcp-server.md +207 -203
- data/docs/examples/multi-robot-network.md +129 -35
- data/docs/examples/rails-application.md +159 -160
- data/docs/examples/tool-usage.md +295 -204
- data/docs/getting-started/configuration.md +347 -154
- data/docs/getting-started/index.md +1 -1
- data/docs/getting-started/installation.md +22 -13
- data/docs/getting-started/quick-start.md +166 -121
- data/docs/guides/building-robots.md +418 -212
- data/docs/guides/creating-networks.md +143 -24
- data/docs/guides/index.md +0 -5
- data/docs/guides/mcp-integration.md +152 -113
- data/docs/guides/memory.md +220 -164
- data/docs/guides/rails-integration.md +244 -162
- data/docs/guides/streaming.md +137 -187
- data/docs/guides/using-tools.md +259 -212
- data/docs/index.md +46 -41
- data/examples/01_simple_robot.rb +6 -9
- data/examples/02_tools.rb +6 -9
- data/examples/03_network.rb +19 -17
- data/examples/04_mcp.rb +5 -8
- data/examples/05_streaming.rb +5 -8
- data/examples/06_prompt_templates.rb +42 -37
- data/examples/07_network_memory.rb +13 -14
- data/examples/08_llm_config.rb +169 -0
- data/examples/09_chaining.rb +262 -0
- data/examples/10_memory.rb +331 -0
- data/examples/11_network_introspection.rb +253 -0
- data/examples/12_message_bus.rb +74 -0
- data/examples/13_spawn.rb +90 -0
- data/examples/14_rusty_circuit/comic.rb +143 -0
- data/examples/14_rusty_circuit/display.rb +203 -0
- data/examples/14_rusty_circuit/heckler.rb +63 -0
- data/examples/14_rusty_circuit/open_mic.rb +123 -0
- data/examples/14_rusty_circuit/prompts/open_mic_comic.md +20 -0
- data/examples/14_rusty_circuit/prompts/open_mic_heckler.md +23 -0
- data/examples/14_rusty_circuit/prompts/open_mic_scout.md +20 -0
- data/examples/14_rusty_circuit/scout.rb +156 -0
- data/examples/14_rusty_circuit/scout_notes.md +89 -0
- data/examples/14_rusty_circuit/show.log +234 -0
- data/examples/15_memory_network_and_bus/editor_in_chief.rb +24 -0
- data/examples/15_memory_network_and_bus/editorial_pipeline.rb +206 -0
- data/examples/15_memory_network_and_bus/linux_writer.rb +80 -0
- data/examples/15_memory_network_and_bus/os_editor.rb +46 -0
- data/examples/15_memory_network_and_bus/os_writer.rb +46 -0
- data/examples/15_memory_network_and_bus/output/combined_article.md +13 -0
- data/examples/15_memory_network_and_bus/output/final_article.md +15 -0
- data/examples/15_memory_network_and_bus/output/linux_draft.md +5 -0
- data/examples/15_memory_network_and_bus/output/mac_draft.md +7 -0
- data/examples/15_memory_network_and_bus/output/memory.json +13 -0
- data/examples/15_memory_network_and_bus/output/revision_1.md +19 -0
- data/examples/15_memory_network_and_bus/output/revision_2.md +15 -0
- data/examples/15_memory_network_and_bus/output/windows_draft.md +7 -0
- data/examples/15_memory_network_and_bus/prompts/os_advocate.md +13 -0
- data/examples/15_memory_network_and_bus/prompts/os_chief.md +13 -0
- data/examples/15_memory_network_and_bus/prompts/os_editor.md +13 -0
- data/examples/16_writers_room/display.rb +158 -0
- data/examples/16_writers_room/output/.gitignore +2 -0
- data/examples/16_writers_room/output/opus_001.md +263 -0
- data/examples/16_writers_room/output/opus_001_notes.log +470 -0
- data/examples/16_writers_room/prompts/writer.md +37 -0
- data/examples/16_writers_room/room.rb +150 -0
- data/examples/16_writers_room/tools.rb +162 -0
- data/examples/16_writers_room/writer.rb +121 -0
- data/examples/16_writers_room/writers_room.rb +162 -0
- data/examples/README.md +197 -0
- data/examples/prompts/{assistant/system.txt.erb → assistant.md} +3 -0
- data/examples/prompts/{billing/system.txt.erb → billing.md} +3 -0
- data/examples/prompts/{classifier/system.txt.erb → classifier.md} +3 -0
- data/examples/prompts/comedian.md +6 -0
- data/examples/prompts/comedy_critic.md +10 -0
- data/examples/prompts/configurable.md +9 -0
- data/examples/prompts/dispatcher.md +12 -0
- data/examples/prompts/{entity_extractor/system.txt.erb → entity_extractor.md} +3 -0
- data/examples/prompts/{escalation/system.txt.erb → escalation.md} +7 -0
- data/examples/prompts/frontmatter_mcp_test.md +9 -0
- data/examples/prompts/frontmatter_named_test.md +5 -0
- data/examples/prompts/frontmatter_tools_test.md +6 -0
- data/examples/prompts/{general/system.txt.erb → general.md} +3 -0
- data/examples/prompts/{github_assistant/system.txt.erb → github_assistant.md} +8 -0
- data/examples/prompts/{helper/system.txt.erb → helper.md} +3 -0
- data/examples/prompts/{keyword_extractor/system.txt.erb → keyword_extractor.md} +3 -0
- data/examples/prompts/llm_config_demo.md +20 -0
- data/examples/prompts/{order_support/system.txt.erb → order_support.md} +8 -0
- data/examples/prompts/os_advocate.md +13 -0
- data/examples/prompts/os_chief.md +13 -0
- data/examples/prompts/os_editor.md +13 -0
- data/examples/prompts/{product_support/system.txt.erb → product_support.md} +7 -0
- data/examples/prompts/{sentiment_analyzer/system.txt.erb → sentiment_analyzer.md} +3 -0
- data/examples/prompts/{synthesizer/system.txt.erb → synthesizer.md} +3 -0
- data/examples/prompts/{technical/system.txt.erb → technical.md} +3 -0
- data/examples/prompts/{triage/system.txt.erb → triage.md} +6 -0
- data/lib/generators/robot_lab/templates/initializer.rb.tt +0 -13
- data/lib/robot_lab/ask_user.rb +75 -0
- data/lib/robot_lab/config/defaults.yml +121 -0
- data/lib/robot_lab/config.rb +183 -0
- data/lib/robot_lab/error.rb +6 -0
- data/lib/robot_lab/mcp/client.rb +1 -1
- data/lib/robot_lab/memory.rb +10 -34
- data/lib/robot_lab/network.rb +13 -20
- data/lib/robot_lab/robot/bus_messaging.rb +239 -0
- data/lib/robot_lab/robot/mcp_management.rb +88 -0
- data/lib/robot_lab/robot/template_rendering.rb +130 -0
- data/lib/robot_lab/robot.rb +240 -330
- data/lib/robot_lab/robot_message.rb +44 -0
- data/lib/robot_lab/robot_result.rb +1 -0
- data/lib/robot_lab/run_config.rb +184 -0
- data/lib/robot_lab/state_proxy.rb +2 -12
- data/lib/robot_lab/streaming/context.rb +1 -1
- data/lib/robot_lab/task.rb +8 -1
- data/lib/robot_lab/tool.rb +108 -172
- data/lib/robot_lab/tool_config.rb +1 -1
- data/lib/robot_lab/tool_manifest.rb +2 -18
- data/lib/robot_lab/utils.rb +39 -0
- data/lib/robot_lab/version.rb +1 -1
- data/lib/robot_lab.rb +89 -57
- data/mkdocs.yml +0 -11
- metadata +121 -135
- data/docs/api/adapters/anthropic.md +0 -121
- data/docs/api/adapters/gemini.md +0 -133
- data/docs/api/adapters/index.md +0 -104
- data/docs/api/adapters/openai.md +0 -134
- data/docs/api/history/active-record-adapter.md +0 -195
- data/docs/api/history/config.md +0 -191
- data/docs/api/history/index.md +0 -132
- data/docs/api/history/thread-manager.md +0 -144
- data/docs/guides/history.md +0 -359
- data/examples/prompts/assistant/user.txt.erb +0 -1
- data/examples/prompts/billing/user.txt.erb +0 -1
- data/examples/prompts/classifier/user.txt.erb +0 -1
- data/examples/prompts/entity_extractor/user.txt.erb +0 -3
- data/examples/prompts/escalation/user.txt.erb +0 -34
- data/examples/prompts/general/user.txt.erb +0 -1
- data/examples/prompts/github_assistant/user.txt.erb +0 -1
- data/examples/prompts/helper/user.txt.erb +0 -1
- data/examples/prompts/keyword_extractor/user.txt.erb +0 -3
- data/examples/prompts/order_support/user.txt.erb +0 -22
- data/examples/prompts/product_support/user.txt.erb +0 -32
- data/examples/prompts/sentiment_analyzer/user.txt.erb +0 -3
- data/examples/prompts/synthesizer/user.txt.erb +0 -15
- data/examples/prompts/technical/user.txt.erb +0 -1
- data/examples/prompts/triage/user.txt.erb +0 -17
- data/lib/robot_lab/adapters/anthropic.rb +0 -163
- data/lib/robot_lab/adapters/base.rb +0 -85
- data/lib/robot_lab/adapters/gemini.rb +0 -193
- data/lib/robot_lab/adapters/openai.rb +0 -159
- data/lib/robot_lab/adapters/registry.rb +0 -81
- data/lib/robot_lab/configuration.rb +0 -143
- data/lib/robot_lab/errors.rb +0 -70
- data/lib/robot_lab/history/active_record_adapter.rb +0 -146
- data/lib/robot_lab/history/config.rb +0 -115
- data/lib/robot_lab/history/thread_manager.rb +0 -93
- data/lib/robot_lab/robotic_model.rb +0 -324
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Dispatcher that decides which specialist to spawn
|
|
3
|
+
temperature: 0.2
|
|
4
|
+
---
|
|
5
|
+
You are a dispatcher. Given a user question, decide which ONE specialist
|
|
6
|
+
should handle it. Reply with EXACTLY two lines:
|
|
7
|
+
|
|
8
|
+
Line 1: the specialist role as a single lowercase_snake_case word
|
|
9
|
+
(e.g. historian, scientist, poet, philosopher, mathematician)
|
|
10
|
+
Line 2: a one-sentence system prompt that tells that specialist how to behave
|
|
11
|
+
|
|
12
|
+
Nothing else. No preamble, no explanation.
|
|
@@ -1,3 +1,10 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Senior specialist for escalated cases
|
|
3
|
+
parameters:
|
|
4
|
+
company_name: null
|
|
5
|
+
authorities: null
|
|
6
|
+
customer: null
|
|
7
|
+
---
|
|
1
8
|
You are a senior customer experience specialist for <%= company_name %>, handling escalated and complex cases.
|
|
2
9
|
|
|
3
10
|
## Your Role
|
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: GitHub assistant with MCP tool access
|
|
3
|
+
mcp:
|
|
4
|
+
- name: github
|
|
5
|
+
transport: stdio
|
|
6
|
+
command: npx
|
|
7
|
+
args: ["-y", "@modelcontextprotocol/server-github"]
|
|
8
|
+
---
|
|
1
9
|
You are a helpful GitHub assistant with access to GitHub tools via MCP.
|
|
2
10
|
|
|
3
11
|
You can search repositories, view issues, read file contents, and more.
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: LLM configuration demo assistant
|
|
3
|
+
parameters:
|
|
4
|
+
environment: "development"
|
|
5
|
+
model: null
|
|
6
|
+
provider: null
|
|
7
|
+
---
|
|
8
|
+
You are a helpful coding assistant that demonstrates the MywayConfig configuration system.
|
|
9
|
+
|
|
10
|
+
Current environment: <%= environment %>
|
|
11
|
+
<% if model %>
|
|
12
|
+
Configured model: <%= model %>
|
|
13
|
+
<% end %>
|
|
14
|
+
<% if provider %>
|
|
15
|
+
Provider: <%= provider %>
|
|
16
|
+
<% end %>
|
|
17
|
+
|
|
18
|
+
Be concise and informative in your responses. When asked about configuration,
|
|
19
|
+
explain how MywayConfig works with YAML defaults, environment-specific overrides,
|
|
20
|
+
XDG config files, and environment variables.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: OS advocate for home AI research labs
|
|
3
|
+
parameters:
|
|
4
|
+
os_name: null
|
|
5
|
+
strengths: null
|
|
6
|
+
---
|
|
7
|
+
You are a passionate advocate for <%= os_name %> as the ideal operating system for a home AI research lab.
|
|
8
|
+
|
|
9
|
+
Key strengths to emphasize: <%= strengths %>
|
|
10
|
+
|
|
11
|
+
Write a 2-3 paragraph advocacy piece arguing why <%= os_name %> is the best choice for someone building a home AI research lab. Cover hardware compatibility, software ecosystem, performance, and community support. Be specific about AI/ML frameworks, GPU support, and developer tooling.
|
|
12
|
+
|
|
13
|
+
Be persuasive but factual. Use concrete examples of tools and workflows.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Editor-in-chief who gives final approval on articles
|
|
3
|
+
---
|
|
4
|
+
You are the editor-in-chief of a technology publication. Review the submitted article for:
|
|
5
|
+
|
|
6
|
+
1. Balance — does it fairly represent all platforms?
|
|
7
|
+
2. Accuracy — are technical claims correct?
|
|
8
|
+
3. Recommendation quality — is the conclusion well-supported?
|
|
9
|
+
4. Readability — is it clear and well-structured?
|
|
10
|
+
|
|
11
|
+
If the article meets your standards, respond starting with APPROVED followed by a brief note on what makes it strong.
|
|
12
|
+
|
|
13
|
+
If it needs work, respond starting with REVISE: followed by specific, actionable feedback on what to improve. Be concise.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Editor who synthesizes OS advocacy drafts into a balanced article
|
|
3
|
+
---
|
|
4
|
+
You are a senior technology editor specializing in AI infrastructure.
|
|
5
|
+
|
|
6
|
+
You will receive three advocacy drafts arguing for different operating systems (macOS, Windows, Linux/BSD) for home AI research labs. Synthesize them into a balanced 4-6 paragraph article that:
|
|
7
|
+
|
|
8
|
+
1. Acknowledges the strengths of each platform
|
|
9
|
+
2. Compares them fairly across key dimensions (cost, GPU support, software ecosystem, ease of use)
|
|
10
|
+
3. Identifies which scenarios favor which OS
|
|
11
|
+
4. Ends with a clear, nuanced recommendation
|
|
12
|
+
|
|
13
|
+
Write in a professional editorial voice. Be objective even though your sources are advocates.
|
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Customer support triage classifier
|
|
3
|
+
parameters:
|
|
4
|
+
company_name: null
|
|
5
|
+
categories: null
|
|
6
|
+
---
|
|
1
7
|
You are an intelligent customer support triage specialist for <%= company_name %>.
|
|
2
8
|
|
|
3
9
|
Your job is to analyze incoming customer requests and classify them into the appropriate category so they can be routed to the right specialist.
|
|
@@ -26,16 +26,3 @@ end
|
|
|
26
26
|
# config.openai_api_key = ENV["OPENAI_API_KEY"]
|
|
27
27
|
# config.gemini_api_key = ENV["GEMINI_API_KEY"]
|
|
28
28
|
# end
|
|
29
|
-
|
|
30
|
-
# History Persistence (optional)
|
|
31
|
-
#
|
|
32
|
-
# Uncomment to enable conversation history storage:
|
|
33
|
-
#
|
|
34
|
-
# Rails.application.config.after_initialize do
|
|
35
|
-
# adapter = RobotLab::History::ActiveRecordAdapter.new(
|
|
36
|
-
# thread_model: RobotLabThread,
|
|
37
|
-
# result_model: RobotLabResult
|
|
38
|
-
# )
|
|
39
|
-
#
|
|
40
|
-
# RobotLab.configuration.history = adapter.to_config
|
|
41
|
-
# end
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module RobotLab
|
|
4
|
+
# Tool that lets a robot ask the user a question via the terminal.
|
|
5
|
+
#
|
|
6
|
+
# The LLM decides when human input is needed and calls this tool
|
|
7
|
+
# with a question. Supports open-ended text, multiple choice,
|
|
8
|
+
# and default values for confirmation-style prompts.
|
|
9
|
+
#
|
|
10
|
+
# IO is sourced from the owning robot's +input+ / +output+ accessors,
|
|
11
|
+
# falling back to +$stdin+ / +$stdout+.
|
|
12
|
+
#
|
|
13
|
+
# @example Open-ended question
|
|
14
|
+
# # LLM calls: ask_user(question: "What is your name?")
|
|
15
|
+
# # Terminal shows:
|
|
16
|
+
# # [helper] What is your name?
|
|
17
|
+
# # >
|
|
18
|
+
#
|
|
19
|
+
# @example Multiple choice
|
|
20
|
+
# # LLM calls: ask_user(question: "Pick a language:", choices: ["Ruby", "Python", "Go"])
|
|
21
|
+
# # Terminal shows:
|
|
22
|
+
# # [helper] Pick a language:
|
|
23
|
+
# # 1. Ruby
|
|
24
|
+
# # 2. Python
|
|
25
|
+
# # 3. Go
|
|
26
|
+
# # >
|
|
27
|
+
#
|
|
28
|
+
# @example With default
|
|
29
|
+
# # LLM calls: ask_user(question: "Continue?", default: "yes")
|
|
30
|
+
# # Terminal shows:
|
|
31
|
+
# # [helper] Continue?
|
|
32
|
+
# # > [yes]
|
|
33
|
+
#
|
|
34
|
+
class AskUser < Tool
|
|
35
|
+
description "Ask the user a question and wait for their typed response"
|
|
36
|
+
param :question, type: "string", desc: "The question to ask the user"
|
|
37
|
+
param :choices, type: "array", desc: "Optional list of choices to present", required: false
|
|
38
|
+
param :default, type: "string", desc: "Default value if user presses Enter", required: false
|
|
39
|
+
|
|
40
|
+
def execute(question:, choices: nil, default: nil)
|
|
41
|
+
out = output_io
|
|
42
|
+
label = robot&.name || "Robot"
|
|
43
|
+
|
|
44
|
+
out.puts "\n[#{label}] #{question}"
|
|
45
|
+
|
|
46
|
+
if choices.is_a?(Array) && choices.any?
|
|
47
|
+
choices.each_with_index { |c, i| out.puts " #{i + 1}. #{c}" }
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
prompt = default ? "> [#{default}] " : "> "
|
|
51
|
+
out.print prompt
|
|
52
|
+
out.flush
|
|
53
|
+
|
|
54
|
+
response = input_io.gets&.chomp || ""
|
|
55
|
+
response = default if response.empty? && default
|
|
56
|
+
|
|
57
|
+
if choices.is_a?(Array) && choices.any? && response.match?(/\A\d+\z/)
|
|
58
|
+
idx = response.to_i - 1
|
|
59
|
+
response = choices[idx] if idx >= 0 && idx < choices.size
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
response
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
private
|
|
66
|
+
|
|
67
|
+
def input_io
|
|
68
|
+
robot&.respond_to?(:input) && robot.input ? robot.input : $stdin
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
def output_io
|
|
72
|
+
robot&.respond_to?(:output) && robot.output ? robot.output : $stdout
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
# frozen_string_literal: false
|
|
2
|
+
#
|
|
3
|
+
# RobotLab Configuration Defaults
|
|
4
|
+
#
|
|
5
|
+
# This file defines the default configuration values for RobotLab.
|
|
6
|
+
# Values are loaded in the following priority order (lowest to highest):
|
|
7
|
+
# 1. Bundled defaults (this file, 'defaults' section)
|
|
8
|
+
# 2. Environment-specific overrides (development, test, production sections)
|
|
9
|
+
# 3. XDG user config (~/.config/robot_lab/config.yml)
|
|
10
|
+
# 4. Project config (./config/robot_lab.yml)
|
|
11
|
+
# 5. Environment variables (ROBOT_LAB_*)
|
|
12
|
+
# 6. Constructor parameters
|
|
13
|
+
#
|
|
14
|
+
# Environment variables use double underscores for nested values:
|
|
15
|
+
# ROBOT_LAB_RUBY_LLM__MODEL=gpt-4
|
|
16
|
+
# ROBOT_LAB_RUBY_LLM__ANTHROPIC_API_KEY=sk-ant-...
|
|
17
|
+
# ROBOT_LAB_RUBY_LLM__REQUEST_TIMEOUT=180
|
|
18
|
+
|
|
19
|
+
defaults:
|
|
20
|
+
# RobotLab Core Settings
|
|
21
|
+
max_iterations: 10
|
|
22
|
+
max_tool_iterations: 10
|
|
23
|
+
streaming_enabled: true
|
|
24
|
+
template_path: null
|
|
25
|
+
mcp: :none
|
|
26
|
+
tools: :none
|
|
27
|
+
|
|
28
|
+
# Chat Configuration (passed to RubyLLM.chat)
|
|
29
|
+
# These are global defaults that can be overridden per-robot or per-run
|
|
30
|
+
chat:
|
|
31
|
+
with_model:
|
|
32
|
+
provider: null # chat-specific provider
|
|
33
|
+
model: null # chat-specific model
|
|
34
|
+
assume_exists: null # assume the model exists primarily for local providers
|
|
35
|
+
with_temperature: 0.7 # Controls randomness (0.0-2.0, null = model default)
|
|
36
|
+
with_tools: null # ?? not sure about this one.
|
|
37
|
+
with_params:
|
|
38
|
+
top_p: null # Nucleus sampling threshold (0.0-1.0)
|
|
39
|
+
top_k: null # Top-k sampling (integer, provider-specific)
|
|
40
|
+
max_tokens: null # Maximum tokens in response
|
|
41
|
+
presence_penalty: null # Penalize new tokens based on presence (-2.0 to 2.0)
|
|
42
|
+
frequency_penalty: null # Penalize new tokens based on frequency (-2.0 to 2.0)
|
|
43
|
+
stop: null # Stop sequences (string or array of strings)
|
|
44
|
+
|
|
45
|
+
# RubyLLM Configuration Section
|
|
46
|
+
ruby_llm:
|
|
47
|
+
provider: :anthropic
|
|
48
|
+
model: claude-sonnet-4
|
|
49
|
+
assume_model_exists: false # (dep of assume_exists in chat section) set true for Ollama and other local LLM providers
|
|
50
|
+
# Provider API Keys (null = use env vars directly)
|
|
51
|
+
anthropic_api_key: null
|
|
52
|
+
openai_api_key: null
|
|
53
|
+
gemini_api_key: null
|
|
54
|
+
deepseek_api_key: null
|
|
55
|
+
mistral_api_key: null
|
|
56
|
+
perplexity_api_key: null
|
|
57
|
+
openrouter_api_key: null
|
|
58
|
+
gpustack_api_key: null
|
|
59
|
+
xai_api_key: null
|
|
60
|
+
|
|
61
|
+
# AWS Bedrock
|
|
62
|
+
bedrock_api_key: null
|
|
63
|
+
bedrock_secret_key: null
|
|
64
|
+
bedrock_region: null
|
|
65
|
+
bedrock_session_token: null
|
|
66
|
+
|
|
67
|
+
# Google Vertex AI
|
|
68
|
+
vertexai_project_id: null
|
|
69
|
+
vertexai_location: null
|
|
70
|
+
|
|
71
|
+
# Provider Endpoints (for self-hosted models)
|
|
72
|
+
openai_api_base: null
|
|
73
|
+
gemini_api_base: null
|
|
74
|
+
ollama_api_base: null
|
|
75
|
+
gpustack_api_base: null
|
|
76
|
+
xai_api_base: null
|
|
77
|
+
|
|
78
|
+
# OpenAI-Specific Options
|
|
79
|
+
openai_organization_id: null
|
|
80
|
+
openai_project_id: null
|
|
81
|
+
openai_use_system_role: true
|
|
82
|
+
|
|
83
|
+
# Default Models
|
|
84
|
+
default_model: null
|
|
85
|
+
default_embedding_model: null
|
|
86
|
+
default_image_model: null
|
|
87
|
+
default_moderation_model: null
|
|
88
|
+
|
|
89
|
+
# Connection Settings
|
|
90
|
+
request_timeout: 120
|
|
91
|
+
max_retries: 3
|
|
92
|
+
retry_interval: 1
|
|
93
|
+
retry_backoff_factor: 2
|
|
94
|
+
retry_interval_randomness: 0.5
|
|
95
|
+
http_proxy: null
|
|
96
|
+
|
|
97
|
+
# Logging Options
|
|
98
|
+
log_file: null
|
|
99
|
+
log_level: :info
|
|
100
|
+
log_stream_debug: false
|
|
101
|
+
|
|
102
|
+
development:
|
|
103
|
+
ruby_llm:
|
|
104
|
+
log_level: :debug
|
|
105
|
+
|
|
106
|
+
test:
|
|
107
|
+
max_iterations: 3
|
|
108
|
+
streaming_enabled: false
|
|
109
|
+
ruby_llm:
|
|
110
|
+
model: claude-3-haiku-20240307
|
|
111
|
+
request_timeout: 30
|
|
112
|
+
max_retries: 1
|
|
113
|
+
log_level: :warn
|
|
114
|
+
|
|
115
|
+
production:
|
|
116
|
+
streaming_enabled: false
|
|
117
|
+
max_iterations: 20
|
|
118
|
+
ruby_llm:
|
|
119
|
+
request_timeout: 180
|
|
120
|
+
max_retries: 5
|
|
121
|
+
log_level: :warn
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'myway_config'
|
|
4
|
+
|
|
5
|
+
module RobotLab
|
|
6
|
+
# Modern configuration class using MywayConfig for RobotLab.
|
|
7
|
+
#
|
|
8
|
+
# Provides:
|
|
9
|
+
# - Nested configuration with a dedicated `ruby_llm:` section
|
|
10
|
+
# - Environment-specific settings (development, test, production)
|
|
11
|
+
# - XDG config file loading (~/.config/robot_lab/config.yml)
|
|
12
|
+
# - Environment variable overrides (ROBOT_LAB_*)
|
|
13
|
+
# - Automatic RubyLLM configuration application
|
|
14
|
+
#
|
|
15
|
+
# @example Access configuration values
|
|
16
|
+
# RobotLab.config.ruby_llm.model #=> "claude-sonnet-4"
|
|
17
|
+
# RobotLab.config.ruby_llm.request_timeout #=> 120
|
|
18
|
+
# RobotLab.config.development? #=> true
|
|
19
|
+
#
|
|
20
|
+
# @example Override via environment variables
|
|
21
|
+
# # ROBOT_LAB_RUBY_LLM__MODEL=gpt-4
|
|
22
|
+
# # ROBOT_LAB_RUBY_LLM__ANTHROPIC_API_KEY=sk-ant-...
|
|
23
|
+
#
|
|
24
|
+
# @example User config file (~/.config/robot_lab/config.yml)
|
|
25
|
+
# defaults:
|
|
26
|
+
# ruby_llm:
|
|
27
|
+
# anthropic_api_key: <%= ENV['ANTHROPIC_API_KEY'] %>
|
|
28
|
+
#
|
|
29
|
+
class Config < MywayConfig::Base
|
|
30
|
+
config_name :robot_lab
|
|
31
|
+
env_prefix :robot_lab
|
|
32
|
+
defaults_path File.expand_path('config/defaults.yml', __dir__)
|
|
33
|
+
auto_configure!
|
|
34
|
+
|
|
35
|
+
# @!attribute [rw] logger
|
|
36
|
+
# @return [Logger] the logger instance (runtime-only, not from config file)
|
|
37
|
+
attr_writer :logger
|
|
38
|
+
|
|
39
|
+
# Returns the logger instance.
|
|
40
|
+
#
|
|
41
|
+
# @return [Logger] the configured logger or default
|
|
42
|
+
def logger
|
|
43
|
+
@logger ||= default_logger
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# Apply RubyLLM configuration after loading.
|
|
48
|
+
#
|
|
49
|
+
# This method should be called after initialization to configure
|
|
50
|
+
# the RubyLLM gem with the values from the ruby_llm section,
|
|
51
|
+
# and to set up the template library.
|
|
52
|
+
#
|
|
53
|
+
# @return [void]
|
|
54
|
+
def after_load
|
|
55
|
+
apply_ruby_llm_config!
|
|
56
|
+
apply_prompt_manager!
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# Apply all RubyLLM settings from the ruby_llm configuration section.
|
|
61
|
+
#
|
|
62
|
+
# @return [void]
|
|
63
|
+
def apply_ruby_llm_config!
|
|
64
|
+
return unless ruby_llm
|
|
65
|
+
|
|
66
|
+
RubyLLM.configure do |c|
|
|
67
|
+
apply_provider_api_keys(c)
|
|
68
|
+
apply_provider_endpoints(c)
|
|
69
|
+
apply_openai_options(c)
|
|
70
|
+
apply_default_models(c)
|
|
71
|
+
apply_connection_settings(c)
|
|
72
|
+
apply_logging_options(c)
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
private
|
|
77
|
+
|
|
78
|
+
def apply_provider_api_keys(c)
|
|
79
|
+
# Fall back to standard provider env vars when not set in config.
|
|
80
|
+
# This lets users set ANTHROPIC_API_KEY (etc.) directly without
|
|
81
|
+
# needing the ROBOT_LAB_RUBY_LLM__ANTHROPIC_API_KEY prefix.
|
|
82
|
+
set_if_present(c, :anthropic_api_key, :anthropic_api_key, 'ANTHROPIC_API_KEY')
|
|
83
|
+
set_if_present(c, :openai_api_key, :openai_api_key, 'OPENAI_API_KEY')
|
|
84
|
+
set_if_present(c, :gemini_api_key, :gemini_api_key, 'GEMINI_API_KEY')
|
|
85
|
+
set_if_present(c, :deepseek_api_key, :deepseek_api_key, 'DEEPSEEK_API_KEY')
|
|
86
|
+
set_if_present(c, :mistral_api_key, :mistral_api_key, 'MISTRAL_API_KEY')
|
|
87
|
+
set_if_present(c, :perplexity_api_key, :perplexity_api_key, 'PERPLEXITY_API_KEY')
|
|
88
|
+
set_if_present(c, :openrouter_api_key, :openrouter_api_key, 'OPENROUTER_API_KEY')
|
|
89
|
+
set_if_present(c, :gpustack_api_key, :gpustack_api_key, 'GPUSTACK_API_KEY')
|
|
90
|
+
set_if_present(c, :xai_api_key, :xai_api_key, 'XAI_API_KEY')
|
|
91
|
+
|
|
92
|
+
# AWS Bedrock
|
|
93
|
+
set_if_present(c, :bedrock_api_key, :bedrock_api_key, 'AWS_ACCESS_KEY_ID')
|
|
94
|
+
set_if_present(c, :bedrock_secret_key, :bedrock_secret_key, 'AWS_SECRET_ACCESS_KEY')
|
|
95
|
+
set_if_present(c, :bedrock_region, :bedrock_region, 'AWS_REGION')
|
|
96
|
+
set_if_present(c, :bedrock_session_token, :bedrock_session_token, 'AWS_SESSION_TOKEN')
|
|
97
|
+
|
|
98
|
+
# Google Vertex AI
|
|
99
|
+
set_if_present(c, :vertexai_project_id, :vertexai_project_id, 'GOOGLE_CLOUD_PROJECT')
|
|
100
|
+
set_if_present(c, :vertexai_location, :vertexai_location, 'GOOGLE_CLOUD_LOCATION')
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def apply_provider_endpoints(c)
|
|
105
|
+
c.openai_api_base = ruby_llm.openai_api_base if ruby_llm.openai_api_base
|
|
106
|
+
c.gemini_api_base = ruby_llm.gemini_api_base if ruby_llm.gemini_api_base
|
|
107
|
+
c.ollama_api_base = ruby_llm.ollama_api_base if ruby_llm.ollama_api_base
|
|
108
|
+
c.gpustack_api_base = ruby_llm.gpustack_api_base if ruby_llm.gpustack_api_base
|
|
109
|
+
c.xai_api_base = ruby_llm.xai_api_base if ruby_llm.xai_api_base
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def apply_openai_options(c)
|
|
114
|
+
c.openai_organization_id = ruby_llm.openai_organization_id if ruby_llm.openai_organization_id
|
|
115
|
+
c.openai_project_id = ruby_llm.openai_project_id if ruby_llm.openai_project_id
|
|
116
|
+
c.openai_use_system_role = ruby_llm.openai_use_system_role unless ruby_llm.openai_use_system_role.nil?
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def apply_default_models(c)
|
|
121
|
+
c.default_model = ruby_llm.default_model if ruby_llm.default_model
|
|
122
|
+
c.default_embedding_model = ruby_llm.default_embedding_model if ruby_llm.default_embedding_model
|
|
123
|
+
c.default_image_model = ruby_llm.default_image_model if ruby_llm.default_image_model
|
|
124
|
+
c.default_moderation_model = ruby_llm.default_moderation_model if ruby_llm.default_moderation_model
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def apply_connection_settings(c)
|
|
129
|
+
c.request_timeout = ruby_llm.request_timeout if ruby_llm.request_timeout
|
|
130
|
+
c.max_retries = ruby_llm.max_retries if ruby_llm.max_retries
|
|
131
|
+
c.retry_interval = ruby_llm.retry_interval if ruby_llm.retry_interval
|
|
132
|
+
c.retry_backoff_factor = ruby_llm.retry_backoff_factor if ruby_llm.retry_backoff_factor
|
|
133
|
+
c.retry_interval_randomness = ruby_llm.retry_interval_randomness if ruby_llm.retry_interval_randomness
|
|
134
|
+
c.http_proxy = ruby_llm.http_proxy if ruby_llm.http_proxy
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def apply_logging_options(c)
|
|
139
|
+
c.log_file = ruby_llm.log_file if ruby_llm.log_file
|
|
140
|
+
c.log_level = ruby_llm.log_level if ruby_llm.log_level
|
|
141
|
+
c.log_stream_debug = ruby_llm.log_stream_debug unless ruby_llm.log_stream_debug.nil?
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def apply_prompt_manager!
|
|
146
|
+
path = resolved_template_path
|
|
147
|
+
return unless path
|
|
148
|
+
|
|
149
|
+
PM.configure do |c|
|
|
150
|
+
c.prompts_dir = path
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# Set a RubyLLM config attribute from config value or standard env var.
|
|
156
|
+
# Only sets when a non-nil value is found, to avoid overwriting defaults.
|
|
157
|
+
def set_if_present(c, setter, config_key, env_var)
|
|
158
|
+
value = ruby_llm.public_send(config_key) || ENV[env_var]
|
|
159
|
+
c.public_send(:"#{setter}=", value) if value
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def resolved_template_path
|
|
164
|
+
return template_path if template_path
|
|
165
|
+
|
|
166
|
+
if defined?(Rails) && Rails.root
|
|
167
|
+
Rails.root.join('app', 'prompts').to_s
|
|
168
|
+
else
|
|
169
|
+
'prompts'
|
|
170
|
+
end
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def default_logger
|
|
175
|
+
if defined?(Rails) && Rails.respond_to?(:logger)
|
|
176
|
+
Rails.logger
|
|
177
|
+
else
|
|
178
|
+
require 'logger'
|
|
179
|
+
Logger.new($stdout, level: Logger::INFO)
|
|
180
|
+
end
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
end
|
data/lib/robot_lab/error.rb
CHANGED
|
@@ -29,4 +29,10 @@ module RobotLab
|
|
|
29
29
|
# @example
|
|
30
30
|
# raise MCPError, "Connection to MCP server refused"
|
|
31
31
|
class MCPError < Error; end
|
|
32
|
+
|
|
33
|
+
# Raised when message bus communication fails.
|
|
34
|
+
#
|
|
35
|
+
# @example
|
|
36
|
+
# raise BusError, "No bus configured on this robot"
|
|
37
|
+
class BusError < Error; end
|
|
32
38
|
end
|
data/lib/robot_lab/mcp/client.rb
CHANGED
|
@@ -51,7 +51,7 @@ module RobotLab
|
|
|
51
51
|
|
|
52
52
|
self
|
|
53
53
|
rescue StandardError => e
|
|
54
|
-
RobotLab.
|
|
54
|
+
RobotLab.config.logger.warn("MCP connection failed for #{@server.name}: #{e.message}")
|
|
55
55
|
@connected = false
|
|
56
56
|
self
|
|
57
57
|
end
|