langgraph_rb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +9 -0
- data/README.md +350 -0
- data/SUMMARY.md +170 -0
- data/examples/advanced_example.rb +388 -0
- data/examples/basic_example.rb +211 -0
- data/examples/simple_test.rb +266 -0
- data/langgraph_rb.gemspec +43 -0
- data/lib/langgraph_rb/command.rb +132 -0
- data/lib/langgraph_rb/edge.rb +141 -0
- data/lib/langgraph_rb/graph.rb +268 -0
- data/lib/langgraph_rb/node.rb +112 -0
- data/lib/langgraph_rb/runner.rb +360 -0
- data/lib/langgraph_rb/state.rb +70 -0
- data/lib/langgraph_rb/stores/memory.rb +265 -0
- data/lib/langgraph_rb/version.rb +3 -0
- data/lib/langgraph_rb.rb +15 -0
- data/test_runner.rb +160 -0
- metadata +151 -0
@@ -0,0 +1,388 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require_relative '../lib/langgraph_rb'
|
4
|
+
|
5
|
+
# Mock research tools
|
6
|
+
class MockSearchTool
|
7
|
+
def self.call(query)
|
8
|
+
puts " š Searching for: #{query}"
|
9
|
+
sleep(0.3) # Simulate API call
|
10
|
+
{
|
11
|
+
results: [
|
12
|
+
"Result 1 for #{query}: Important finding about #{query}",
|
13
|
+
"Result 2 for #{query}: Another insight on #{query}",
|
14
|
+
"Result 3 for #{query}: #{query} research conclusion"
|
15
|
+
],
|
16
|
+
source: "MockSearch"
|
17
|
+
}
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
class MockSummarizer
|
22
|
+
def self.call(content)
|
23
|
+
puts " š Summarizing content..."
|
24
|
+
sleep(0.2)
|
25
|
+
{
|
26
|
+
summary: "Summary: #{content[:results]&.first&.slice(0, 50)}...",
|
27
|
+
word_count: content[:results]&.join(' ')&.length || 0
|
28
|
+
}
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
# Example: Research assistant with parallel processing
|
33
|
+
def research_assistant_example
|
34
|
+
puts "=== Advanced Research Assistant Example ==="
|
35
|
+
|
36
|
+
# Create graph with parallel processing
|
37
|
+
graph = LangGraphRB::Graph.new do
|
38
|
+
# Entry point - process research request
|
39
|
+
node :process_request do |state|
|
40
|
+
query = state[:query]
|
41
|
+
puts "š Processing research request: #{query}"
|
42
|
+
|
43
|
+
# Extract topics for parallel research
|
44
|
+
topics = query.downcase.split(/\s+and\s+|\s*,\s*|\s*&\s*/)
|
45
|
+
|
46
|
+
puts " Topics identified: #{topics.inspect}"
|
47
|
+
|
48
|
+
{
|
49
|
+
original_query: query,
|
50
|
+
topics: topics,
|
51
|
+
research_tasks: topics.map.with_index { |topic, i| { id: i, topic: topic } }
|
52
|
+
}
|
53
|
+
end
|
54
|
+
|
55
|
+
# Fan out to parallel research tasks
|
56
|
+
node :distribute_research do |state|
|
57
|
+
tasks = state[:research_tasks] || []
|
58
|
+
|
59
|
+
puts "š Distributing #{tasks.length} research tasks in parallel"
|
60
|
+
|
61
|
+
# Create Send commands for each task
|
62
|
+
sends = tasks.map do |task|
|
63
|
+
LangGraphRB::Send.new(
|
64
|
+
to: :research_topic,
|
65
|
+
payload: {
|
66
|
+
task_id: task[:id],
|
67
|
+
topic: task[:topic],
|
68
|
+
parent_query: state[:original_query]
|
69
|
+
}
|
70
|
+
)
|
71
|
+
end
|
72
|
+
|
73
|
+
LangGraphRB::MultiSend.new(sends)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Parallel research node (will be executed multiple times)
|
77
|
+
node :research_topic do |state|
|
78
|
+
topic = state[:topic]
|
79
|
+
task_id = state[:task_id]
|
80
|
+
|
81
|
+
puts "š¬ [Task #{task_id}] Researching: #{topic}"
|
82
|
+
|
83
|
+
# Simulate research
|
84
|
+
search_results = MockSearchTool.call(topic)
|
85
|
+
summary = MockSummarizer.call(search_results)
|
86
|
+
|
87
|
+
{
|
88
|
+
task_id: task_id,
|
89
|
+
topic: topic,
|
90
|
+
research_complete: true,
|
91
|
+
findings: {
|
92
|
+
topic: topic,
|
93
|
+
results: search_results[:results],
|
94
|
+
summary: summary[:summary],
|
95
|
+
word_count: summary[:word_count]
|
96
|
+
}
|
97
|
+
}
|
98
|
+
end
|
99
|
+
|
100
|
+
# Collect and synthesize results
|
101
|
+
node :synthesize_results do |state|
|
102
|
+
puts "š§© Synthesizing research results..."
|
103
|
+
|
104
|
+
# In a real implementation, this would collect results from all parallel tasks
|
105
|
+
# For now, we'll simulate having collected results
|
106
|
+
findings = state[:findings] || {}
|
107
|
+
|
108
|
+
synthesis = {
|
109
|
+
total_topics_researched: 1, # This would be dynamic in real implementation
|
110
|
+
key_findings: findings[:summary] || "Research completed",
|
111
|
+
confidence_score: 0.85,
|
112
|
+
synthesis_complete: true
|
113
|
+
}
|
114
|
+
|
115
|
+
puts " ā
Synthesis complete - Confidence: #{synthesis[:confidence_score]}"
|
116
|
+
|
117
|
+
{
|
118
|
+
synthesis: synthesis,
|
119
|
+
ready_for_review: true
|
120
|
+
}
|
121
|
+
end
|
122
|
+
|
123
|
+
# Human review checkpoint (simplified for demo)
|
124
|
+
node :request_human_review do |state|
|
125
|
+
puts "š¤ Requesting human review of research..."
|
126
|
+
|
127
|
+
review_data = {
|
128
|
+
findings: state[:synthesis],
|
129
|
+
original_query: state[:original_query],
|
130
|
+
topics_covered: state[:topics] || [],
|
131
|
+
timestamp: Time.now
|
132
|
+
}
|
133
|
+
|
134
|
+
puts " š Review data prepared"
|
135
|
+
puts " š¤ Auto-approving for demo (in real app, this would wait for human input)"
|
136
|
+
|
137
|
+
# Instead of interrupting, automatically approve for demo
|
138
|
+
{
|
139
|
+
review_data: review_data,
|
140
|
+
human_feedback: 'approve' # Simulate human approval
|
141
|
+
}
|
142
|
+
end
|
143
|
+
|
144
|
+
# Process human feedback
|
145
|
+
node :process_feedback do |state|
|
146
|
+
feedback = state[:human_feedback] || 'approve'
|
147
|
+
|
148
|
+
puts "š Processing human feedback: #{feedback}"
|
149
|
+
|
150
|
+
case feedback.downcase
|
151
|
+
when 'approve'
|
152
|
+
{
|
153
|
+
status: 'approved',
|
154
|
+
final_report_ready: true
|
155
|
+
}
|
156
|
+
when 'revise'
|
157
|
+
LangGraphRB::Commands.update_and_goto(
|
158
|
+
{ status: 'needs_revision', revision_requested: true },
|
159
|
+
:process_request # Go back to start with revisions
|
160
|
+
)
|
161
|
+
else
|
162
|
+
{
|
163
|
+
status: 'unclear_feedback',
|
164
|
+
needs_clarification: true
|
165
|
+
}
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
# Generate final report
|
170
|
+
node :generate_final_report do |state|
|
171
|
+
puts "š Generating final research report..."
|
172
|
+
|
173
|
+
report = {
|
174
|
+
title: "Research Report: #{state[:original_query]}",
|
175
|
+
executive_summary: state[:synthesis][:key_findings],
|
176
|
+
confidence: state[:synthesis][:confidence_score],
|
177
|
+
status: state[:status],
|
178
|
+
generated_at: Time.now,
|
179
|
+
report_id: SecureRandom.hex(6)
|
180
|
+
}
|
181
|
+
|
182
|
+
puts " š Report generated: #{report[:report_id]}"
|
183
|
+
|
184
|
+
{
|
185
|
+
final_report: report,
|
186
|
+
completed: true
|
187
|
+
}
|
188
|
+
end
|
189
|
+
|
190
|
+
# Define the flow
|
191
|
+
set_entry_point :process_request
|
192
|
+
edge :process_request, :distribute_research
|
193
|
+
edge :distribute_research, :research_topic
|
194
|
+
edge :research_topic, :synthesize_results
|
195
|
+
edge :synthesize_results, :request_human_review
|
196
|
+
edge :request_human_review, :process_feedback
|
197
|
+
|
198
|
+
# Conditional routing based on feedback
|
199
|
+
conditional_edge :process_feedback, ->(state) {
|
200
|
+
case state[:status]
|
201
|
+
when 'approved'
|
202
|
+
:generate_final_report
|
203
|
+
when 'needs_revision'
|
204
|
+
:process_request
|
205
|
+
else
|
206
|
+
:request_human_review
|
207
|
+
end
|
208
|
+
}
|
209
|
+
|
210
|
+
set_finish_point :generate_final_report
|
211
|
+
end
|
212
|
+
|
213
|
+
# Compile and set up human-in-the-loop handler
|
214
|
+
graph.compile!
|
215
|
+
|
216
|
+
puts "\nš Research Assistant Graph Structure:"
|
217
|
+
puts graph.to_mermaid
|
218
|
+
puts
|
219
|
+
|
220
|
+
# Set up persistence (this will be handled by the graph's stream method)
|
221
|
+
store = LangGraphRB::Stores::InMemoryStore.new
|
222
|
+
thread_id = "research_#{SecureRandom.hex(4)}"
|
223
|
+
|
224
|
+
# Define interrupt handler for human-in-the-loop
|
225
|
+
interrupt_handler = proc do |interrupt|
|
226
|
+
puts "\nāøļø EXECUTION PAUSED"
|
227
|
+
puts " Message: #{interrupt.message}"
|
228
|
+
puts " Data: #{interrupt.data.keys.inspect}"
|
229
|
+
|
230
|
+
# Simulate human input (in real app, this would be user input)
|
231
|
+
puts "\nš¤ Simulating human approval..."
|
232
|
+
sleep(1)
|
233
|
+
|
234
|
+
{ human_feedback: 'approve' } # Return the human input
|
235
|
+
end
|
236
|
+
|
237
|
+
# Run the research assistant using graph's stream method
|
238
|
+
puts "š Starting research assistant..."
|
239
|
+
|
240
|
+
result = graph.stream({
|
241
|
+
query: "artificial intelligence and machine learning trends"
|
242
|
+
}, store: store, thread_id: thread_id) do |step_result|
|
243
|
+
puts " š Step #{step_result[:step]}: #{step_result[:active_nodes].inspect}"
|
244
|
+
puts " Completed: #{step_result[:completed]}"
|
245
|
+
|
246
|
+
# Handle interrupts
|
247
|
+
if step_result[:interrupted]
|
248
|
+
user_input = interrupt_handler.call(step_result[:interrupt])
|
249
|
+
# Resume with user input - this would be handled differently in a real implementation
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
puts "\nā
Research completed!"
|
254
|
+
puts "Final report ID: #{result[:state][:final_report][:report_id]}" if result[:state][:final_report]
|
255
|
+
puts "Thread ID: #{thread_id} (can be used to resume if needed)"
|
256
|
+
|
257
|
+
# Show checkpoints
|
258
|
+
puts "\nš Execution checkpoints:"
|
259
|
+
store.list_steps(thread_id).each do |step|
|
260
|
+
checkpoint = store.load(thread_id, step)
|
261
|
+
puts " Step #{step}: #{checkpoint[:timestamp]} - #{checkpoint[:state].keys.inspect}"
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
# Example of map-reduce pattern with Send commands
|
266
|
+
def map_reduce_example
|
267
|
+
puts "\n=== Map-Reduce Processing Example ==="
|
268
|
+
|
269
|
+
# Simulate processing a large dataset
|
270
|
+
graph = LangGraphRB::Graph.new do
|
271
|
+
node :prepare_data do |state|
|
272
|
+
data = state[:input_data] || (1..10).to_a
|
273
|
+
chunks = data.each_slice(3).to_a # Split into chunks of 3
|
274
|
+
|
275
|
+
puts "š¦ Preparing data for processing: #{chunks.length} chunks"
|
276
|
+
|
277
|
+
{
|
278
|
+
original_data: data,
|
279
|
+
chunks: chunks,
|
280
|
+
total_items: data.length
|
281
|
+
}
|
282
|
+
end
|
283
|
+
|
284
|
+
node :map_phase do |state|
|
285
|
+
chunks = state[:chunks] || []
|
286
|
+
|
287
|
+
puts "šŗļø Starting map phase with #{chunks.length} chunks"
|
288
|
+
|
289
|
+
# Send each chunk to parallel processing
|
290
|
+
sends = chunks.map.with_index do |chunk, index|
|
291
|
+
LangGraphRB::Send.new(
|
292
|
+
to: :process_chunk,
|
293
|
+
payload: {
|
294
|
+
chunk_id: index,
|
295
|
+
chunk_data: chunk,
|
296
|
+
chunk_size: chunk.length
|
297
|
+
}
|
298
|
+
)
|
299
|
+
end
|
300
|
+
|
301
|
+
LangGraphRB::MultiSend.new(sends)
|
302
|
+
end
|
303
|
+
|
304
|
+
node :process_chunk do |state|
|
305
|
+
chunk_id = state[:chunk_id]
|
306
|
+
chunk_data = state[:chunk_data]
|
307
|
+
|
308
|
+
puts " āļø Processing chunk #{chunk_id}: #{chunk_data.inspect}"
|
309
|
+
|
310
|
+
# Simulate processing (square each number)
|
311
|
+
processed = chunk_data.map { |x| x * x }
|
312
|
+
sum = processed.sum
|
313
|
+
|
314
|
+
sleep(0.1) # Simulate work
|
315
|
+
|
316
|
+
{
|
317
|
+
chunk_id: chunk_id,
|
318
|
+
processed_data: processed,
|
319
|
+
chunk_sum: sum,
|
320
|
+
processing_complete: true
|
321
|
+
}
|
322
|
+
end
|
323
|
+
|
324
|
+
node :reduce_phase do |state|
|
325
|
+
puts "š Reduce phase - collecting results..."
|
326
|
+
|
327
|
+
# In a real implementation, this would collect from all chunks
|
328
|
+
processed_data = state[:processed_data] || []
|
329
|
+
chunk_sum = state[:chunk_sum] || 0
|
330
|
+
|
331
|
+
puts " š Chunk sum: #{chunk_sum}"
|
332
|
+
|
333
|
+
{
|
334
|
+
partial_results: {
|
335
|
+
data: processed_data,
|
336
|
+
sum: chunk_sum
|
337
|
+
},
|
338
|
+
reduce_step_complete: true
|
339
|
+
}
|
340
|
+
end
|
341
|
+
|
342
|
+
node :final_aggregation do |state|
|
343
|
+
puts "šÆ Final aggregation of all results..."
|
344
|
+
|
345
|
+
# Simulate final aggregation
|
346
|
+
final_result = {
|
347
|
+
total_processed_items: state[:total_items] || 0,
|
348
|
+
sample_result: state[:partial_results],
|
349
|
+
processing_time: Time.now,
|
350
|
+
status: 'completed'
|
351
|
+
}
|
352
|
+
|
353
|
+
puts " ā
Processing complete: #{final_result[:total_processed_items]} items"
|
354
|
+
|
355
|
+
{
|
356
|
+
final_result: final_result,
|
357
|
+
completed: true
|
358
|
+
}
|
359
|
+
end
|
360
|
+
|
361
|
+
# Define flow
|
362
|
+
set_entry_point :prepare_data
|
363
|
+
edge :prepare_data, :map_phase
|
364
|
+
edge :map_phase, :process_chunk
|
365
|
+
edge :process_chunk, :reduce_phase
|
366
|
+
edge :reduce_phase, :final_aggregation
|
367
|
+
set_finish_point :final_aggregation
|
368
|
+
end
|
369
|
+
|
370
|
+
graph.compile!
|
371
|
+
|
372
|
+
# Execute with timing
|
373
|
+
start_time = Time.now
|
374
|
+
|
375
|
+
result = graph.invoke({ input_data: (1..12).to_a })
|
376
|
+
|
377
|
+
end_time = Time.now
|
378
|
+
|
379
|
+
puts "\nā±ļø Processing time: #{(end_time - start_time).round(2)} seconds"
|
380
|
+
puts "š Final result: #{result[:final_result][:status]}"
|
381
|
+
puts "ā
Map-reduce example completed!"
|
382
|
+
end
|
383
|
+
|
384
|
+
# Run examples
|
385
|
+
if __FILE__ == $0
|
386
|
+
research_assistant_example
|
387
|
+
map_reduce_example
|
388
|
+
end
|
@@ -0,0 +1,211 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require_relative '../lib/langgraph_rb'
|
4
|
+
|
5
|
+
# Example: Simple chatbot with conditional routing
|
6
|
+
def basic_example
|
7
|
+
puts "=== Basic LangGraphRB Example ==="
|
8
|
+
|
9
|
+
# Create a state with message history
|
10
|
+
initial_state = LangGraphRB::State.new(
|
11
|
+
{ messages: [], step_count: 0 },
|
12
|
+
{ messages: LangGraphRB::State.add_messages }
|
13
|
+
)
|
14
|
+
|
15
|
+
# Create the graph
|
16
|
+
graph = LangGraphRB::Graph.new(state_class: LangGraphRB::State) do
|
17
|
+
# Define nodes
|
18
|
+
node :receive_input do |state|
|
19
|
+
puts "š¤ User input received: #{state[:input]}"
|
20
|
+
{
|
21
|
+
messages: [{ role: 'user', content: state[:input] }],
|
22
|
+
last_user_message: state[:input],
|
23
|
+
step_count: (state[:step_count] || 0) + 1
|
24
|
+
}
|
25
|
+
end
|
26
|
+
|
27
|
+
node :analyze_intent do |state|
|
28
|
+
user_message = state[:last_user_message].to_s.downcase
|
29
|
+
|
30
|
+
intent = case user_message
|
31
|
+
when /hello|hi|hey/
|
32
|
+
'greeting'
|
33
|
+
when /bye|goodbye|exit/
|
34
|
+
'farewell'
|
35
|
+
when /help|assist/
|
36
|
+
'help_request'
|
37
|
+
when /weather/
|
38
|
+
'weather_query'
|
39
|
+
else
|
40
|
+
'general_chat'
|
41
|
+
end
|
42
|
+
|
43
|
+
puts "š§ Detected intent: #{intent}"
|
44
|
+
|
45
|
+
{
|
46
|
+
intent: intent,
|
47
|
+
messages: [{ role: 'system', content: "Intent detected: #{intent}" }]
|
48
|
+
}
|
49
|
+
end
|
50
|
+
|
51
|
+
node :handle_greeting do |state|
|
52
|
+
response = "Hello! How can I help you today?"
|
53
|
+
puts "š¤ Bot: #{response}"
|
54
|
+
|
55
|
+
{
|
56
|
+
messages: [{ role: 'assistant', content: response }],
|
57
|
+
last_response: response
|
58
|
+
}
|
59
|
+
end
|
60
|
+
|
61
|
+
node :handle_farewell do |state|
|
62
|
+
response = "Goodbye! Have a great day!"
|
63
|
+
puts "š¤ Bot: #{response}"
|
64
|
+
|
65
|
+
LangGraphRB::Commands.update_and_goto(
|
66
|
+
{
|
67
|
+
messages: [{ role: 'assistant', content: response }],
|
68
|
+
last_response: response,
|
69
|
+
should_end: true
|
70
|
+
},
|
71
|
+
LangGraphRB::Graph::FINISH
|
72
|
+
)
|
73
|
+
end
|
74
|
+
|
75
|
+
node :handle_help do |state|
|
76
|
+
response = "I can help with greetings, weather queries, or general conversation. Just ask!"
|
77
|
+
puts "š¤ Bot: #{response}"
|
78
|
+
|
79
|
+
{
|
80
|
+
messages: [{ role: 'assistant', content: response }],
|
81
|
+
last_response: response
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
node :handle_weather do |state|
|
86
|
+
response = "I'm sorry, I don't have access to real weather data yet, but it's probably nice outside!"
|
87
|
+
puts "š¤ Bot: #{response}"
|
88
|
+
|
89
|
+
{
|
90
|
+
messages: [{ role: 'assistant', content: response }],
|
91
|
+
last_response: response
|
92
|
+
}
|
93
|
+
end
|
94
|
+
|
95
|
+
node :general_response do |state|
|
96
|
+
responses = [
|
97
|
+
"That's interesting! Tell me more.",
|
98
|
+
"I see what you mean. Can you elaborate?",
|
99
|
+
"Thanks for sharing that with me!",
|
100
|
+
"That's a good point. What do you think about it?"
|
101
|
+
]
|
102
|
+
|
103
|
+
response = responses.sample
|
104
|
+
puts "š¤ Bot: #{response}"
|
105
|
+
|
106
|
+
{
|
107
|
+
messages: [{ role: 'assistant', content: response }],
|
108
|
+
last_response: response
|
109
|
+
}
|
110
|
+
end
|
111
|
+
|
112
|
+
# Define edges
|
113
|
+
set_entry_point :receive_input
|
114
|
+
edge :receive_input, :analyze_intent
|
115
|
+
|
116
|
+
# Conditional routing based on intent
|
117
|
+
conditional_edge :analyze_intent, ->(state) { state[:intent] }, {
|
118
|
+
'greeting' => :handle_greeting,
|
119
|
+
'farewell' => :handle_farewell,
|
120
|
+
'help_request' => :handle_help,
|
121
|
+
'weather_query' => :handle_weather,
|
122
|
+
'general_chat' => :general_response
|
123
|
+
}
|
124
|
+
|
125
|
+
# All responses go back to waiting for input (except farewell)
|
126
|
+
edge :handle_greeting, :receive_input
|
127
|
+
edge :handle_help, :receive_input
|
128
|
+
edge :handle_weather, :receive_input
|
129
|
+
edge :general_response, :receive_input
|
130
|
+
end
|
131
|
+
|
132
|
+
# Compile the graph
|
133
|
+
graph.compile!
|
134
|
+
|
135
|
+
# Show the graph structure
|
136
|
+
puts "\nš Graph Structure (Mermaid):"
|
137
|
+
puts graph.to_mermaid
|
138
|
+
puts
|
139
|
+
|
140
|
+
# Example conversations
|
141
|
+
conversations = [
|
142
|
+
"Hello there!",
|
143
|
+
"What's the weather like?",
|
144
|
+
"Can you help me?",
|
145
|
+
"That's cool, thanks!",
|
146
|
+
"Goodbye!"
|
147
|
+
]
|
148
|
+
|
149
|
+
conversations.each_with_index do |input, i|
|
150
|
+
puts "\n--- Conversation #{i + 1} ---"
|
151
|
+
|
152
|
+
result = graph.invoke({ input: input })
|
153
|
+
|
154
|
+
puts "Final state keys: #{result.keys}"
|
155
|
+
puts "Message count: #{result[:messages]&.length || 0}"
|
156
|
+
puts "Step count: #{result[:step_count]}"
|
157
|
+
|
158
|
+
# Break if bot indicated it should end
|
159
|
+
break if result[:should_end]
|
160
|
+
end
|
161
|
+
|
162
|
+
puts "\nā
Basic example completed!"
|
163
|
+
end
|
164
|
+
|
165
|
+
# Example with streaming execution
|
166
|
+
def streaming_example
|
167
|
+
puts "\n=== Streaming Execution Example ==="
|
168
|
+
|
169
|
+
graph = LangGraphRB::Graph.new do
|
170
|
+
node :step1 do |state|
|
171
|
+
puts " š Executing step 1..."
|
172
|
+
sleep(0.5) # Simulate work
|
173
|
+
{ step: 1, data: "Processed in step 1" }
|
174
|
+
end
|
175
|
+
|
176
|
+
node :step2 do |state|
|
177
|
+
puts " š Executing step 2..."
|
178
|
+
sleep(0.5)
|
179
|
+
{ step: 2, data: state[:data] + " -> Processed in step 2" }
|
180
|
+
end
|
181
|
+
|
182
|
+
node :step3 do |state|
|
183
|
+
puts " š Executing step 3..."
|
184
|
+
sleep(0.5)
|
185
|
+
{ step: 3, data: state[:data] + " -> Final processing" }
|
186
|
+
end
|
187
|
+
|
188
|
+
set_entry_point :step1
|
189
|
+
edge :step1, :step2
|
190
|
+
edge :step2, :step3
|
191
|
+
set_finish_point :step3
|
192
|
+
end
|
193
|
+
|
194
|
+
graph.compile!
|
195
|
+
|
196
|
+
# Stream the execution
|
197
|
+
puts "š” Streaming execution:"
|
198
|
+
graph.stream({ input: "Hello streaming!" }) do |step_result|
|
199
|
+
puts " š Step #{step_result[:step]}: #{step_result[:active_nodes].inspect}"
|
200
|
+
puts " State: #{step_result[:state][:data]}" if step_result[:state][:data]
|
201
|
+
puts " Completed: #{step_result[:completed]}"
|
202
|
+
end
|
203
|
+
|
204
|
+
puts "ā
Streaming example completed!"
|
205
|
+
end
|
206
|
+
|
207
|
+
# Run examples
|
208
|
+
if __FILE__ == $0
|
209
|
+
basic_example
|
210
|
+
streaming_example
|
211
|
+
end
|