robot_lab 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.envrc +1 -0
- data/.github/workflows/deploy-github-pages.yml +52 -0
- data/.github/workflows/deploy-yard-docs.yml +52 -0
- data/CHANGELOG.md +55 -0
- data/COMMITS.md +196 -0
- data/LICENSE.txt +21 -0
- data/README.md +332 -0
- data/Rakefile +67 -0
- data/docs/api/adapters/anthropic.md +121 -0
- data/docs/api/adapters/gemini.md +133 -0
- data/docs/api/adapters/index.md +104 -0
- data/docs/api/adapters/openai.md +134 -0
- data/docs/api/core/index.md +113 -0
- data/docs/api/core/memory.md +314 -0
- data/docs/api/core/network.md +291 -0
- data/docs/api/core/robot.md +273 -0
- data/docs/api/core/state.md +273 -0
- data/docs/api/core/tool.md +353 -0
- data/docs/api/history/active-record-adapter.md +195 -0
- data/docs/api/history/config.md +191 -0
- data/docs/api/history/index.md +132 -0
- data/docs/api/history/thread-manager.md +144 -0
- data/docs/api/index.md +82 -0
- data/docs/api/mcp/client.md +221 -0
- data/docs/api/mcp/index.md +111 -0
- data/docs/api/mcp/server.md +225 -0
- data/docs/api/mcp/transports.md +264 -0
- data/docs/api/messages/index.md +67 -0
- data/docs/api/messages/text-message.md +102 -0
- data/docs/api/messages/tool-call-message.md +144 -0
- data/docs/api/messages/tool-result-message.md +154 -0
- data/docs/api/messages/user-message.md +171 -0
- data/docs/api/streaming/context.md +174 -0
- data/docs/api/streaming/events.md +237 -0
- data/docs/api/streaming/index.md +108 -0
- data/docs/architecture/core-concepts.md +243 -0
- data/docs/architecture/index.md +138 -0
- data/docs/architecture/message-flow.md +320 -0
- data/docs/architecture/network-orchestration.md +216 -0
- data/docs/architecture/robot-execution.md +243 -0
- data/docs/architecture/state-management.md +323 -0
- data/docs/assets/css/custom.css +56 -0
- data/docs/assets/images/robot_lab.jpg +0 -0
- data/docs/concepts.md +216 -0
- data/docs/examples/basic-chat.md +193 -0
- data/docs/examples/index.md +129 -0
- data/docs/examples/mcp-server.md +290 -0
- data/docs/examples/multi-robot-network.md +312 -0
- data/docs/examples/rails-application.md +420 -0
- data/docs/examples/tool-usage.md +310 -0
- data/docs/getting-started/configuration.md +230 -0
- data/docs/getting-started/index.md +56 -0
- data/docs/getting-started/installation.md +179 -0
- data/docs/getting-started/quick-start.md +203 -0
- data/docs/guides/building-robots.md +376 -0
- data/docs/guides/creating-networks.md +366 -0
- data/docs/guides/history.md +359 -0
- data/docs/guides/index.md +68 -0
- data/docs/guides/mcp-integration.md +356 -0
- data/docs/guides/memory.md +309 -0
- data/docs/guides/rails-integration.md +432 -0
- data/docs/guides/streaming.md +314 -0
- data/docs/guides/using-tools.md +394 -0
- data/docs/index.md +160 -0
- data/examples/01_simple_robot.rb +38 -0
- data/examples/02_tools.rb +106 -0
- data/examples/03_network.rb +103 -0
- data/examples/04_mcp.rb +219 -0
- data/examples/05_streaming.rb +124 -0
- data/examples/06_prompt_templates.rb +324 -0
- data/examples/07_network_memory.rb +329 -0
- data/examples/prompts/assistant/system.txt.erb +2 -0
- data/examples/prompts/assistant/user.txt.erb +1 -0
- data/examples/prompts/billing/system.txt.erb +7 -0
- data/examples/prompts/billing/user.txt.erb +1 -0
- data/examples/prompts/classifier/system.txt.erb +4 -0
- data/examples/prompts/classifier/user.txt.erb +1 -0
- data/examples/prompts/entity_extractor/system.txt.erb +11 -0
- data/examples/prompts/entity_extractor/user.txt.erb +3 -0
- data/examples/prompts/escalation/system.txt.erb +35 -0
- data/examples/prompts/escalation/user.txt.erb +34 -0
- data/examples/prompts/general/system.txt.erb +4 -0
- data/examples/prompts/general/user.txt.erb +1 -0
- data/examples/prompts/github_assistant/system.txt.erb +6 -0
- data/examples/prompts/github_assistant/user.txt.erb +1 -0
- data/examples/prompts/helper/system.txt.erb +1 -0
- data/examples/prompts/helper/user.txt.erb +1 -0
- data/examples/prompts/keyword_extractor/system.txt.erb +8 -0
- data/examples/prompts/keyword_extractor/user.txt.erb +3 -0
- data/examples/prompts/order_support/system.txt.erb +27 -0
- data/examples/prompts/order_support/user.txt.erb +22 -0
- data/examples/prompts/product_support/system.txt.erb +30 -0
- data/examples/prompts/product_support/user.txt.erb +32 -0
- data/examples/prompts/sentiment_analyzer/system.txt.erb +9 -0
- data/examples/prompts/sentiment_analyzer/user.txt.erb +3 -0
- data/examples/prompts/synthesizer/system.txt.erb +14 -0
- data/examples/prompts/synthesizer/user.txt.erb +15 -0
- data/examples/prompts/technical/system.txt.erb +7 -0
- data/examples/prompts/technical/user.txt.erb +1 -0
- data/examples/prompts/triage/system.txt.erb +16 -0
- data/examples/prompts/triage/user.txt.erb +17 -0
- data/lib/generators/robot_lab/install_generator.rb +78 -0
- data/lib/generators/robot_lab/robot_generator.rb +55 -0
- data/lib/generators/robot_lab/templates/initializer.rb.tt +41 -0
- data/lib/generators/robot_lab/templates/migration.rb.tt +32 -0
- data/lib/generators/robot_lab/templates/result_model.rb.tt +52 -0
- data/lib/generators/robot_lab/templates/robot.rb.tt +46 -0
- data/lib/generators/robot_lab/templates/robot_test.rb.tt +32 -0
- data/lib/generators/robot_lab/templates/routing_robot.rb.tt +53 -0
- data/lib/generators/robot_lab/templates/thread_model.rb.tt +40 -0
- data/lib/robot_lab/adapters/anthropic.rb +163 -0
- data/lib/robot_lab/adapters/base.rb +85 -0
- data/lib/robot_lab/adapters/gemini.rb +193 -0
- data/lib/robot_lab/adapters/openai.rb +159 -0
- data/lib/robot_lab/adapters/registry.rb +81 -0
- data/lib/robot_lab/configuration.rb +143 -0
- data/lib/robot_lab/error.rb +32 -0
- data/lib/robot_lab/errors.rb +70 -0
- data/lib/robot_lab/history/active_record_adapter.rb +146 -0
- data/lib/robot_lab/history/config.rb +115 -0
- data/lib/robot_lab/history/thread_manager.rb +93 -0
- data/lib/robot_lab/mcp/client.rb +210 -0
- data/lib/robot_lab/mcp/server.rb +84 -0
- data/lib/robot_lab/mcp/transports/base.rb +56 -0
- data/lib/robot_lab/mcp/transports/sse.rb +117 -0
- data/lib/robot_lab/mcp/transports/stdio.rb +133 -0
- data/lib/robot_lab/mcp/transports/streamable_http.rb +139 -0
- data/lib/robot_lab/mcp/transports/websocket.rb +108 -0
- data/lib/robot_lab/memory.rb +882 -0
- data/lib/robot_lab/memory_change.rb +123 -0
- data/lib/robot_lab/message.rb +357 -0
- data/lib/robot_lab/network.rb +350 -0
- data/lib/robot_lab/rails/engine.rb +29 -0
- data/lib/robot_lab/rails/railtie.rb +42 -0
- data/lib/robot_lab/robot.rb +560 -0
- data/lib/robot_lab/robot_result.rb +205 -0
- data/lib/robot_lab/robotic_model.rb +324 -0
- data/lib/robot_lab/state_proxy.rb +188 -0
- data/lib/robot_lab/streaming/context.rb +144 -0
- data/lib/robot_lab/streaming/events.rb +95 -0
- data/lib/robot_lab/streaming/sequence_counter.rb +48 -0
- data/lib/robot_lab/task.rb +117 -0
- data/lib/robot_lab/tool.rb +223 -0
- data/lib/robot_lab/tool_config.rb +112 -0
- data/lib/robot_lab/tool_manifest.rb +234 -0
- data/lib/robot_lab/user_message.rb +118 -0
- data/lib/robot_lab/version.rb +5 -0
- data/lib/robot_lab/waiter.rb +73 -0
- data/lib/robot_lab.rb +195 -0
- data/mkdocs.yml +214 -0
- data/sig/robot_lab.rbs +4 -0
- metadata +442 -0
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
# Network Orchestration
|
|
2
|
+
|
|
3
|
+
Networks coordinate multiple robots using [SimpleFlow](https://github.com/MadBomber/simple_flow) pipelines for DAG-based execution.
|
|
4
|
+
|
|
5
|
+
## Network Structure
|
|
6
|
+
|
|
7
|
+
A network is a thin wrapper around `SimpleFlow::Pipeline`:
|
|
8
|
+
|
|
9
|
+
- **Pipeline**: DAG-based execution engine
|
|
10
|
+
- **Robots**: Named collection of task handlers
|
|
11
|
+
- **Tasks**: Define dependencies and execution order
|
|
12
|
+
|
|
13
|
+
```ruby
|
|
14
|
+
network = RobotLab.create_network(name: "customer_service") do
|
|
15
|
+
task :classifier, classifier_robot, depends_on: :none
|
|
16
|
+
task :billing, billing_robot, depends_on: :optional
|
|
17
|
+
task :technical, technical_robot, depends_on: :optional
|
|
18
|
+
end
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Task Configuration
|
|
22
|
+
|
|
23
|
+
Tasks can have per-task configuration that's deep-merged with network run params:
|
|
24
|
+
|
|
25
|
+
```ruby
|
|
26
|
+
network = RobotLab.create_network(name: "support") do
|
|
27
|
+
task :classifier, classifier_robot, depends_on: :none
|
|
28
|
+
task :billing, billing_robot,
|
|
29
|
+
context: { department: "billing", escalation_level: 2 },
|
|
30
|
+
tools: [RefundTool],
|
|
31
|
+
depends_on: :optional
|
|
32
|
+
task :technical, technical_robot,
|
|
33
|
+
context: { department: "technical" },
|
|
34
|
+
mcp: [FilesystemServer],
|
|
35
|
+
depends_on: :optional
|
|
36
|
+
end
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Execution Model
|
|
40
|
+
|
|
41
|
+
```mermaid
|
|
42
|
+
stateDiagram-v2
|
|
43
|
+
[*] --> Start
|
|
44
|
+
Start --> ExecuteTask: next ready task
|
|
45
|
+
ExecuteTask --> CheckDependents: task complete
|
|
46
|
+
CheckDependents --> ExecuteTask: more tasks ready
|
|
47
|
+
CheckDependents --> Complete: all tasks done
|
|
48
|
+
ExecuteTask --> Halted: task halts
|
|
49
|
+
Complete --> [*]
|
|
50
|
+
Halted --> [*]
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
### Task Dependency Types
|
|
54
|
+
|
|
55
|
+
| Type | Description |
|
|
56
|
+
|------|-------------|
|
|
57
|
+
| `:none` | No dependencies, runs first |
|
|
58
|
+
| `[:task1, :task2]` | Waits for listed tasks |
|
|
59
|
+
| `:optional` | Only runs when activated |
|
|
60
|
+
|
|
61
|
+
## Robot#call Interface
|
|
62
|
+
|
|
63
|
+
Each robot implements the SimpleFlow step interface:
|
|
64
|
+
|
|
65
|
+
```ruby
|
|
66
|
+
class Robot
|
|
67
|
+
def call(result)
|
|
68
|
+
# Run the LLM
|
|
69
|
+
robot_result = run(**extract_run_context(result))
|
|
70
|
+
|
|
71
|
+
# Return new result with context
|
|
72
|
+
result
|
|
73
|
+
.with_context(@name.to_sym, robot_result)
|
|
74
|
+
.continue(robot_result)
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Result Methods
|
|
80
|
+
|
|
81
|
+
| Method | Description |
|
|
82
|
+
|--------|-------------|
|
|
83
|
+
| `continue(value)` | Continue to next tasks |
|
|
84
|
+
| `halt(value)` | Stop pipeline execution |
|
|
85
|
+
| `with_context(key, val)` | Add data to context |
|
|
86
|
+
| `activate(task_name)` | Enable optional task |
|
|
87
|
+
|
|
88
|
+
## SimpleFlow::Result
|
|
89
|
+
|
|
90
|
+
The result object flows through the pipeline:
|
|
91
|
+
|
|
92
|
+
```ruby
|
|
93
|
+
result.value # Current task's output
|
|
94
|
+
result.context # Accumulated context from all tasks
|
|
95
|
+
result.halted? # Whether execution stopped early
|
|
96
|
+
result.continued? # Whether execution continues
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Context Structure
|
|
100
|
+
|
|
101
|
+
```ruby
|
|
102
|
+
{
|
|
103
|
+
run_params: { message: "...", customer_id: 123 },
|
|
104
|
+
classifier: RobotResult,
|
|
105
|
+
billing: RobotResult,
|
|
106
|
+
# ... other task results
|
|
107
|
+
}
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Optional Task Activation
|
|
111
|
+
|
|
112
|
+
Optional tasks don't run automatically. They must be activated:
|
|
113
|
+
|
|
114
|
+
```ruby
|
|
115
|
+
class ClassifierRobot < RobotLab::Robot
|
|
116
|
+
def call(result)
|
|
117
|
+
robot_result = run(**extract_run_context(result))
|
|
118
|
+
|
|
119
|
+
new_result = result
|
|
120
|
+
.with_context(@name.to_sym, robot_result)
|
|
121
|
+
.continue(robot_result)
|
|
122
|
+
|
|
123
|
+
# Analyze output and activate appropriate task
|
|
124
|
+
category = robot_result.last_text_content.to_s.downcase
|
|
125
|
+
|
|
126
|
+
case category
|
|
127
|
+
when /billing/
|
|
128
|
+
new_result.activate(:billing)
|
|
129
|
+
when /technical/
|
|
130
|
+
new_result.activate(:technical)
|
|
131
|
+
else
|
|
132
|
+
new_result.activate(:general)
|
|
133
|
+
end
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## Parallel Execution
|
|
139
|
+
|
|
140
|
+
Tasks with the same dependencies can run in parallel:
|
|
141
|
+
|
|
142
|
+
```ruby
|
|
143
|
+
network = RobotLab.create_network(name: "analysis", concurrency: :threads) do
|
|
144
|
+
task :fetch, fetcher, depends_on: :none
|
|
145
|
+
|
|
146
|
+
# These three run in parallel
|
|
147
|
+
task :sentiment, sentiment_bot, depends_on: [:fetch]
|
|
148
|
+
task :entities, entity_bot, depends_on: [:fetch]
|
|
149
|
+
task :keywords, keyword_bot, depends_on: [:fetch]
|
|
150
|
+
|
|
151
|
+
# Waits for all three
|
|
152
|
+
task :merge, merger, depends_on: [:sentiment, :entities, :keywords]
|
|
153
|
+
end
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
### Concurrency Modes
|
|
157
|
+
|
|
158
|
+
| Mode | Description |
|
|
159
|
+
|------|-------------|
|
|
160
|
+
| `:auto` | SimpleFlow chooses best mode |
|
|
161
|
+
| `:threads` | Use Ruby threads |
|
|
162
|
+
| `:async` | Use async/fiber |
|
|
163
|
+
|
|
164
|
+
## Data Flow
|
|
165
|
+
|
|
166
|
+
1. **Initial Value**: `network.run(**params)` creates initial result
|
|
167
|
+
2. **Run Params**: Stored in `result.context[:run_params]`
|
|
168
|
+
3. **Task Results**: Each task adds to context
|
|
169
|
+
4. **Final Value**: Last task's output becomes `result.value`
|
|
170
|
+
|
|
171
|
+
```ruby
|
|
172
|
+
# Run with context
|
|
173
|
+
result = network.run(
|
|
174
|
+
message: "Help with billing",
|
|
175
|
+
customer_id: 123
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Access the flow
|
|
179
|
+
result.context[:run_params] # { message: "...", customer_id: 123 }
|
|
180
|
+
result.context[:classifier] # First robot's RobotResult
|
|
181
|
+
result.context[:billing] # Billing robot's RobotResult
|
|
182
|
+
result.value # Final RobotResult
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
## Visualization
|
|
186
|
+
|
|
187
|
+
Networks provide visualization methods:
|
|
188
|
+
|
|
189
|
+
```ruby
|
|
190
|
+
# ASCII representation
|
|
191
|
+
puts network.visualize
|
|
192
|
+
|
|
193
|
+
# Mermaid diagram
|
|
194
|
+
puts network.to_mermaid
|
|
195
|
+
|
|
196
|
+
# Execution plan description
|
|
197
|
+
puts network.execution_plan
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Network Configuration
|
|
201
|
+
|
|
202
|
+
```ruby
|
|
203
|
+
network = RobotLab.create_network(
|
|
204
|
+
name: "support",
|
|
205
|
+
concurrency: :threads # :auto, :threads, or :async
|
|
206
|
+
) do
|
|
207
|
+
task :classifier, classifier, depends_on: :none
|
|
208
|
+
task :handler, handler, depends_on: [:classifier]
|
|
209
|
+
end
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## Next Steps
|
|
213
|
+
|
|
214
|
+
- [Creating Networks](../guides/creating-networks.md) - Practical patterns
|
|
215
|
+
- [Robot Execution](robot-execution.md) - How robots process messages
|
|
216
|
+
- [API Reference: Network](../api/core/network.md) - Complete API
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
# Robot Execution
|
|
2
|
+
|
|
3
|
+
This page details how a robot processes messages and generates responses.
|
|
4
|
+
|
|
5
|
+
## Execution Overview
|
|
6
|
+
|
|
7
|
+
When you call `robot.run(state:, network:)`, several steps occur:
|
|
8
|
+
|
|
9
|
+
```mermaid
|
|
10
|
+
sequenceDiagram
|
|
11
|
+
participant App as Application
|
|
12
|
+
participant Robot
|
|
13
|
+
participant Model as RoboticModel
|
|
14
|
+
participant Adapter
|
|
15
|
+
participant LLM
|
|
16
|
+
|
|
17
|
+
App->>Robot: run(state, network)
|
|
18
|
+
Robot->>Robot: resolve_tools()
|
|
19
|
+
Robot->>Model: infer(messages, tools)
|
|
20
|
+
Model->>Adapter: format_messages()
|
|
21
|
+
Model->>LLM: API Request
|
|
22
|
+
|
|
23
|
+
loop Tool Calls
|
|
24
|
+
LLM-->>Model: tool_call response
|
|
25
|
+
Model->>Robot: execute_tool()
|
|
26
|
+
Robot->>Model: tool_result
|
|
27
|
+
Model->>LLM: continue
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
LLM-->>Model: final response
|
|
31
|
+
Model->>Adapter: parse_response()
|
|
32
|
+
Model-->>Robot: InferenceResponse
|
|
33
|
+
Robot-->>App: RobotResult
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Step-by-Step Flow
|
|
37
|
+
|
|
38
|
+
### 1. Tool Resolution
|
|
39
|
+
|
|
40
|
+
Before making any LLM call, the robot resolves available tools:
|
|
41
|
+
|
|
42
|
+
```ruby
|
|
43
|
+
# Internal process
|
|
44
|
+
tools = []
|
|
45
|
+
tools += local_tools # Tools defined on robot
|
|
46
|
+
tools += mcp_tools # Tools from MCP servers
|
|
47
|
+
tools = apply_whitelist(tools) # Filter by allowed tools
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
### 2. Message Preparation
|
|
51
|
+
|
|
52
|
+
The robot prepares messages from state:
|
|
53
|
+
|
|
54
|
+
```ruby
|
|
55
|
+
messages = []
|
|
56
|
+
messages << system_message # From template
|
|
57
|
+
messages += state.messages # Conversation history
|
|
58
|
+
messages << user_message # Current input
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 3. LLM Inference
|
|
62
|
+
|
|
63
|
+
Messages are sent to the LLM via `RoboticModel`:
|
|
64
|
+
|
|
65
|
+
```ruby
|
|
66
|
+
response = model.infer(
|
|
67
|
+
messages,
|
|
68
|
+
tools,
|
|
69
|
+
tool_choice: "auto",
|
|
70
|
+
streaming: streaming_callback
|
|
71
|
+
)
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### 4. Tool Execution Loop
|
|
75
|
+
|
|
76
|
+
If the LLM requests tool calls:
|
|
77
|
+
|
|
78
|
+
```ruby
|
|
79
|
+
loop do
|
|
80
|
+
if response.wants_tools?
|
|
81
|
+
response.tool_calls.each do |tool_call|
|
|
82
|
+
result = execute_tool(tool_call)
|
|
83
|
+
# Result sent back to LLM
|
|
84
|
+
end
|
|
85
|
+
response = model.continue_with_results(results)
|
|
86
|
+
else
|
|
87
|
+
break
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### 5. Result Construction
|
|
93
|
+
|
|
94
|
+
Finally, a `RobotResult` is created:
|
|
95
|
+
|
|
96
|
+
```ruby
|
|
97
|
+
RobotResult.new(
|
|
98
|
+
robot_name: name,
|
|
99
|
+
output: response.output,
|
|
100
|
+
tool_calls: executed_tools,
|
|
101
|
+
stop_reason: response.stop_reason
|
|
102
|
+
)
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## Tool Execution
|
|
106
|
+
|
|
107
|
+
### Tool Call Processing
|
|
108
|
+
|
|
109
|
+
When the LLM requests a tool:
|
|
110
|
+
|
|
111
|
+
1. **Identify Tool**: Match tool name to registered tools
|
|
112
|
+
2. **Validate Input**: Check parameters against schema
|
|
113
|
+
3. **Execute Handler**: Call the tool's handler function
|
|
114
|
+
4. **Capture Result**: Wrap response in ToolResultMessage
|
|
115
|
+
5. **Return to LLM**: Send result for continued processing
|
|
116
|
+
|
|
117
|
+
### Execution Context
|
|
118
|
+
|
|
119
|
+
Tools receive context about their execution environment:
|
|
120
|
+
|
|
121
|
+
```ruby
|
|
122
|
+
tool.handler.call(
|
|
123
|
+
**tool_call.input, # User-provided arguments
|
|
124
|
+
robot: self, # The executing robot
|
|
125
|
+
network: network, # Network context
|
|
126
|
+
state: state # Current state
|
|
127
|
+
)
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Error Handling
|
|
131
|
+
|
|
132
|
+
Tool errors are captured and returned to the LLM:
|
|
133
|
+
|
|
134
|
+
```ruby
|
|
135
|
+
begin
|
|
136
|
+
result = tool.handler.call(**args)
|
|
137
|
+
ToolResultMessage.new(tool: tool_call, content: { data: result })
|
|
138
|
+
rescue StandardError => e
|
|
139
|
+
ToolResultMessage.new(tool: tool_call, content: { error: e.message })
|
|
140
|
+
end
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## Iteration Limits
|
|
144
|
+
|
|
145
|
+
Robot execution has safeguards:
|
|
146
|
+
|
|
147
|
+
| Limit | Default | Purpose |
|
|
148
|
+
|-------|---------|---------|
|
|
149
|
+
| `max_tool_iterations` | 10 | Max tool calls per robot run |
|
|
150
|
+
|
|
151
|
+
When limits are reached, execution stops with the current state.
|
|
152
|
+
|
|
153
|
+
## Streaming
|
|
154
|
+
|
|
155
|
+
Robots support streaming responses:
|
|
156
|
+
|
|
157
|
+
```ruby
|
|
158
|
+
robot.run(
|
|
159
|
+
state: state,
|
|
160
|
+
network: network,
|
|
161
|
+
streaming: ->(event) {
|
|
162
|
+
case event.type
|
|
163
|
+
when :delta then print event.content
|
|
164
|
+
when :tool_call then puts "Calling: #{event.tool_name}"
|
|
165
|
+
end
|
|
166
|
+
}
|
|
167
|
+
)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
### Streaming Events
|
|
171
|
+
|
|
172
|
+
| Event Type | Description |
|
|
173
|
+
|------------|-------------|
|
|
174
|
+
| `run.started` | Robot execution began |
|
|
175
|
+
| `delta` | Text content chunk |
|
|
176
|
+
| `tool_call` | Tool execution starting |
|
|
177
|
+
| `tool_result` | Tool execution complete |
|
|
178
|
+
| `run.completed` | Robot execution finished |
|
|
179
|
+
| `run.failed` | Error occurred |
|
|
180
|
+
|
|
181
|
+
## Model Selection
|
|
182
|
+
|
|
183
|
+
The model is determined by:
|
|
184
|
+
|
|
185
|
+
1. Robot's explicit `model` setting
|
|
186
|
+
2. Network's `default_model`
|
|
187
|
+
3. Global `RobotLab.configuration.default_model`
|
|
188
|
+
|
|
189
|
+
```ruby
|
|
190
|
+
robot = RobotLab.build do
|
|
191
|
+
model "claude-sonnet-4" # Takes precedence
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
network = RobotLab.create_network do
|
|
195
|
+
default_model "gpt-4" # Fallback for robots without model
|
|
196
|
+
end
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
## Provider Detection
|
|
200
|
+
|
|
201
|
+
If no provider is specified, it's detected from model name:
|
|
202
|
+
|
|
203
|
+
| Model Pattern | Provider |
|
|
204
|
+
|--------------|----------|
|
|
205
|
+
| `claude-*`, `anthropic-*` | `:anthropic` |
|
|
206
|
+
| `gpt-*`, `o1-*`, `chatgpt-*` | `:openai` |
|
|
207
|
+
| `gemini-*` | `:gemini` |
|
|
208
|
+
| `llama-*`, `mistral-*` | `:ollama` |
|
|
209
|
+
|
|
210
|
+
## RoboticModel
|
|
211
|
+
|
|
212
|
+
The `RoboticModel` class handles LLM communication:
|
|
213
|
+
|
|
214
|
+
```ruby
|
|
215
|
+
model = RoboticModel.new("claude-sonnet-4", provider: :anthropic)
|
|
216
|
+
|
|
217
|
+
# Full inference
|
|
218
|
+
response = model.infer(messages, tools)
|
|
219
|
+
|
|
220
|
+
# Quick ask
|
|
221
|
+
response = model.ask("What is 2+2?", system: "You are a math tutor")
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
### InferenceResponse
|
|
225
|
+
|
|
226
|
+
The response object provides:
|
|
227
|
+
|
|
228
|
+
```ruby
|
|
229
|
+
response.output # Array<Message> - parsed output
|
|
230
|
+
response.raw # Original LLM response
|
|
231
|
+
response.stop_reason # "stop", "tool", etc.
|
|
232
|
+
response.stopped? # true if naturally completed
|
|
233
|
+
response.wants_tools? # true if tool calls pending
|
|
234
|
+
response.tool_calls # Array<ToolMessage>
|
|
235
|
+
response.text_content # Combined text from output
|
|
236
|
+
response.captured_tool_results # Auto-executed tool results
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
## Next Steps
|
|
240
|
+
|
|
241
|
+
- [Network Orchestration](network-orchestration.md) - Multi-robot coordination
|
|
242
|
+
- [State Management](state-management.md) - Managing state across robots
|
|
243
|
+
- [Using Tools](../guides/using-tools.md) - Creating and using tools
|