langgraph_rb 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: faa20cb53b8c5f9c1ea8d740d34ea63eadfe9e07a49683dd3c090f7bb042d3fd
4
- data.tar.gz: 56bfc5f8fb39473af9252caf1573f1126e30405f37ee7ac45bfde6704df20575
3
+ metadata.gz: bd46b52a6c57bec611953702facad328eda63e0c5f66f85c403230d05548709b
4
+ data.tar.gz: 19e32e26b6c38044536b8498c7900bcb19386adf1853881cc3a9c3ca3861dae4
5
5
  SHA512:
6
- metadata.gz: 0f24fbb2f677bb13e7a8091807ff6d3f3d835b295ee691420f30b576bbd74d05eae9040d6511319eaefbc6c04170dc501101f3ff07f5a3e08c066a3d398adc3f
7
- data.tar.gz: 91bdd7cf66e2c73fb5103d2fe469381bce46e889cacd6710a76c012d96296d3f0625dbc3b4e992570ef67ba3c506f8031058b3e6384304d0596e2c6f10dd8a6b
6
+ metadata.gz: ed37faa9f9cc70f30a63ac31f96daafd2fa8f6362f78607ac3532ee396fdd04037265a428fc2e5492b4971100be836845e29228e59b5876abc8b50239c9b83a4
7
+ data.tar.gz: 33a4b49fa73f482c4a66299b0a58964be9a81f505e44d35c36b033334b73a24341fc3e78349ff0652f26ca0ae6884b5623fa07d8263570367a790b1324e278ec
@@ -1,8 +1,18 @@
1
1
  #!/usr/bin/env ruby
2
2
  require 'pry'
3
3
  require 'pry-byebug'
4
+ require 'langfuse'
4
5
  require_relative '../lib/langgraph_rb'
5
6
 
7
+ url = 'https://us.cloud.langfuse.com'
8
+
9
+ Langfuse.configure do |config|
10
+ config.public_key = ENV['LANGFUSE_PUBLIC_KEY'] # e.g., 'pk-lf-...'
11
+ config.secret_key = ENV['LANGFUSE_SECRET_KEY'] # e.g., 'sk-lf-...'
12
+ config.host = url
13
+ config.debug = true # Enable debug logging
14
+ end
15
+
6
16
  class MovieInfoTool < LangGraphRB::ToolBase
7
17
  define_function :search_movie, description: "MovieInfoTool: Search for a movie by title" do
8
18
  property :query, type: "string", description: "The movie title to search for", required: true
@@ -28,9 +38,11 @@ end
28
38
  def run_chat_openai_tools
29
39
  tools = [MovieInfoTool.new(api_key: ENV['TMDB_API_KEY'] || 'demo')]
30
40
 
31
- chat = LangGraphRB::ChatOpenAI.new(model: ENV.fetch('OPENAI_MODEL', 'gpt-4o-mini'), temperature: 0)
41
+ chat = LangGraphRB::ChatOpenAI.new(model: ENV.fetch('OPENAI_MODEL', 'gpt-4o-min3i'), temperature: 0)
32
42
  chat = chat.bind_tools(tools)
33
43
 
44
+ observers = [LangGraphRB::Observers::LangfuseObserver.new(name: 'chat-openai-tools-example')]
45
+
34
46
  graph = LangGraphRB::Graph.new do
35
47
  node :receive_input do |state|
36
48
  user_msg = { role: 'user', content: state[:input].to_s }
@@ -38,42 +50,11 @@ def run_chat_openai_tools
38
50
  { messages: existing + [user_msg] }
39
51
  end
40
52
 
41
- llm_node :chat, llm_client: chat, system_prompt: "You are a movie assistant. Use tools when helpful." do |state, context|
42
- messages = state[:messages] || []
43
- messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
44
-
45
- response = context[:llm_client].call(messages)
46
-
47
- if response.is_a?(Hash) && response[:tool_calls]
48
- assistant_msg = { role: 'assistant', content: nil, tool_calls: response[:tool_calls] }
49
- { messages: (state[:messages] || []) + [assistant_msg], tool_call: response[:tool_calls].first }
50
- else
51
- assistant_msg = { role: 'assistant', content: response.to_s }
52
- { messages: (state[:messages] || []) + [assistant_msg], last_response: response.to_s }
53
- end
54
- end
55
-
56
- # node :tool do |state|
57
- # tool_call = state[:tool_call]
58
- # tool_name = tool_call[:name]
59
- # tool_args = tool_call[:arguments]
60
- # tool_call_id = tool_call[:id]
61
-
62
- # puts "TOOL CALL #########################"
63
- # puts "tool_name: #{tool_name}"
64
- # puts "tool_args: #{tool_args}"
65
- # puts "tool_call_id: #{tool_call_id}"
66
- # puts "########################"
67
- # puts "########################"
68
-
69
- # tool_method_name = tool_name.to_s.split('__').last
70
-
71
- # # Dispatch via ToolBase API to keep consistent interface
72
- # tool_result = tools.call({ name: tool_method_name, arguments: tool_args })
53
+ # Optional: callback to observe each assistant message as it's produced
54
+ add_message_callback = ->(message) { puts "New message: #{message}" }
55
+ sys_prompt = "You are a movie assistant. Use tools when helpful."
73
56
 
74
- # { messages: (state[:messages] || []) + [{ role: 'tool', content: tool_result.to_json, tool_call_id: tool_call_id, name: tool_name.to_s }],
75
- # tool_call: nil }
76
- # end
57
+ llm_node :chat, llm_client: chat, system_prompt: sys_prompt, add_message_callback: add_message_callback
77
58
 
78
59
  tool_node :tool, tools: tools
79
60
 
@@ -97,8 +78,10 @@ def run_chat_openai_tools
97
78
 
98
79
  graph.compile!
99
80
 
81
+ graph.draw_mermaid
82
+
100
83
  start = { messages: [], input: "Find details about 'The Matrix'" }
101
- result = graph.invoke(start)
84
+ result = graph.invoke(start, observers: observers)
102
85
  puts "Messages:"
103
86
  (result[:messages] || []).each do |m|
104
87
  if m[:role] == 'assistant' && m[:tool_calls]
@@ -113,3 +96,39 @@ end
113
96
  run_chat_openai_tools
114
97
 
115
98
 
99
+ # llm_node :chat, llm_client: chat, system_prompt: "You are a movie assistant. Use tools when helpful." do |state, context|
100
+ # messages = state[:messages] || []
101
+ # messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
102
+
103
+ # response = context[:llm_client].call(messages)
104
+
105
+ # if response.is_a?(Hash) && response[:tool_calls]
106
+ # assistant_msg = { role: 'assistant', content: nil, tool_calls: response[:tool_calls] }
107
+ # { messages: (state[:messages] || []) + [assistant_msg], tool_call: response[:tool_calls].first }
108
+ # else
109
+ # assistant_msg = { role: 'assistant', content: response.to_s }
110
+ # { messages: (state[:messages] || []) + [assistant_msg], last_response: response.to_s }
111
+ # end
112
+ # end
113
+
114
+ # node :tool do |state|
115
+ # tool_call = state[:tool_call]
116
+ # tool_name = tool_call[:name]
117
+ # tool_args = tool_call[:arguments]
118
+ # tool_call_id = tool_call[:id]
119
+
120
+ # puts "TOOL CALL #########################"
121
+ # puts "tool_name: #{tool_name}"
122
+ # puts "tool_args: #{tool_args}"
123
+ # puts "tool_call_id: #{tool_call_id}"
124
+ # puts "########################"
125
+ # puts "########################"
126
+
127
+ # tool_method_name = tool_name.to_s.split('__').last
128
+
129
+ # # Dispatch via ToolBase API to keep consistent interface
130
+ # tool_result = tools.call({ name: tool_method_name, arguments: tool_args })
131
+
132
+ # { messages: (state[:messages] || []) + [{ role: 'tool', content: tool_result.to_json, tool_call_id: tool_call_id, name: tool_name.to_s }],
133
+ # tool_call: nil }
134
+ # end
@@ -12,27 +12,6 @@ Langfuse.configure do |config|
12
12
  end
13
13
 
14
14
 
15
- class LangfuseObserver < LangGraphRB::Observers::BaseObserver
16
-
17
- def on_graph_start(event)
18
- @trace ||= Langfuse.trace(
19
- name: "graph-start2",
20
- thread_id: event.thread_id,
21
- metadata: event.to_h
22
- )
23
- end
24
-
25
- def on_node_end(event)
26
- span = Langfuse.span(
27
- name: "node-#{event.node_name}",
28
- trace_id: @trace.id,
29
- input: event.to_h,
30
- )
31
- Langfuse.update_span(span)
32
- end
33
- end
34
-
35
-
36
15
  def langfuse_example
37
16
  puts "########################################################"
38
17
  puts "########################################################"
@@ -82,7 +61,10 @@ def langfuse_example
82
61
 
83
62
 
84
63
  graph.compile!
85
- result = graph.invoke({ message: "Hello World", value: 31}, observers: [LangfuseObserver.new])
64
+ result = graph.invoke(
65
+ { message: "Hello World", value: 31},
66
+ observers: [LangGraphRB::Observers::LangfuseObserver.new(name: 'langfuse-example')]
67
+ )
86
68
  puts "Result: #{result}"
87
69
  puts "########################################################"
88
70
  puts "########################################################"
@@ -77,8 +77,14 @@ module LangGraphRB
77
77
  else
78
78
  text_content
79
79
  end
80
+ rescue => e
81
+ notify_llm_error({
82
+ error: e.message
83
+ })
84
+ raise e
80
85
  end
81
86
 
87
+
82
88
  private
83
89
 
84
90
  def normalize_messages(messages)
@@ -35,11 +35,11 @@ module LangGraphRB
35
35
  end
36
36
  end
37
37
 
38
- def llm_node(name, llm_client:, system_prompt: nil, &block)
38
+ def llm_node(name, llm_client:, system_prompt: nil, add_message_callback: nil, &block)
39
39
  name = name.to_sym
40
40
  raise GraphError, "Node '#{name}' already exists" if @nodes.key?(name)
41
41
 
42
- @nodes[name] = LLMNode.new(name, llm_client: llm_client, system_prompt: system_prompt, &block)
42
+ @nodes[name] = LLMNode.new(name, llm_client: llm_client, system_prompt: system_prompt, add_message_callback: add_message_callback, &block)
43
43
  end
44
44
 
45
45
  def tool_node(name, tools:, &block)
@@ -214,7 +214,10 @@ module LangGraphRB
214
214
  validate_node_exists!(edge.to)
215
215
  when ConditionalEdge
216
216
  validate_node_exists!(edge.from)
217
- # Path map targets will be validated at runtime
217
+ # If a static path_map is provided, validate mapped destinations now
218
+ if edge.path_map && !edge.path_map.empty?
219
+ edge.path_map.values.each { |dest| validate_node_exists!(dest) }
220
+ end
218
221
  when FanOutEdge
219
222
  validate_node_exists!(edge.from)
220
223
  edge.destinations.each { |dest| validate_node_exists!(dest) }
@@ -226,6 +229,12 @@ module LangGraphRB
226
229
  case edge
227
230
  when Edge
228
231
  [edge.to]
232
+ when ConditionalEdge
233
+ if edge.path_map && !edge.path_map.empty?
234
+ edge.path_map.values
235
+ else
236
+ []
237
+ end
229
238
  when FanOutEdge
230
239
  edge.destinations
231
240
  else
@@ -264,11 +273,17 @@ module LangGraphRB
264
273
  case edge
265
274
  when Edge
266
275
  reachable += find_reachable_nodes(edge.to, visited.dup)
276
+ when ConditionalEdge
277
+ # If a static path_map is provided, consider all mapped destinations reachable
278
+ if edge.path_map && !edge.path_map.empty?
279
+ edge.path_map.values.each do |dest|
280
+ reachable += find_reachable_nodes(dest, visited.dup)
281
+ end
282
+ end
267
283
  when FanOutEdge
268
284
  edge.destinations.each do |dest|
269
285
  reachable += find_reachable_nodes(dest, visited.dup)
270
- end
271
- # ConditionalEdge paths are dynamic, so we can't pre-validate them
286
+ end
272
287
  end
273
288
  end
274
289
 
@@ -54,6 +54,16 @@ module LangGraphRB
54
54
  end
55
55
  end
56
56
  end
57
+
58
+ def notify_llm_error(payload)
59
+ @observers.each do |observer|
60
+ begin
61
+ observer.on_llm_error(payload, @node_name)
62
+ rescue => _e
63
+ # Ignore observer errors
64
+ end
65
+ end
66
+ end
57
67
  end
58
68
  end
59
69
 
@@ -35,11 +35,12 @@ module LangGraphRB
35
35
 
36
36
  # Specialized node for LLM calls
37
37
  class LLMNode < Node
38
- attr_reader :llm_client, :system_prompt
38
+ attr_reader :llm_client, :system_prompt, :add_message_callback
39
39
 
40
- def initialize(name, llm_client:, system_prompt: nil, &block)
40
+ def initialize(name, llm_client:, system_prompt: nil, add_message_callback: nil, &block)
41
41
  @llm_client = llm_client
42
42
  @system_prompt = system_prompt
43
+ @add_message_callback = add_message_callback
43
44
 
44
45
  # Use default LLM behavior if no custom block provided
45
46
  super(name, &(block || method(:default_llm_call)))
@@ -49,7 +50,8 @@ module LangGraphRB
49
50
  # Auto-inject LLM config into the context for both default and custom blocks
50
51
  merged_context = (context || {}).merge(
51
52
  llm_client: @llm_client,
52
- system_prompt: @system_prompt
53
+ system_prompt: @system_prompt,
54
+ add_message_callback: @add_message_callback
53
55
  )
54
56
 
55
57
  begin
@@ -87,12 +89,14 @@ module LangGraphRB
87
89
  content: nil,
88
90
  tool_calls: response[:tool_calls]
89
91
  }
92
+ @add_message_callback&.call(assistant_msg)
90
93
  {
91
94
  messages: (state[:messages] || []) + [assistant_msg],
92
95
  tool_call: response[:tool_calls].first
93
96
  }
94
97
  else
95
98
  assistant_msg = { role: 'assistant', content: response.to_s }
99
+ @add_message_callback&.call(assistant_msg)
96
100
  {
97
101
  messages: (state[:messages] || []) + [assistant_msg],
98
102
  last_response: response.to_s
@@ -70,6 +70,10 @@ module LangGraphRB
70
70
  # Override in subclasses
71
71
  end
72
72
 
73
+ def on_llm_error(event)
74
+ # Override in subclasses
75
+ end
76
+
73
77
  protected
74
78
 
75
79
  # Helper method to create standardized event structure
@@ -0,0 +1,223 @@
1
+ require 'langfuse'
2
+
3
+ module LangGraphRB
4
+ module Observers
5
+ # Langfuse observer that captures graph, node, and LLM events.
6
+ # - Creates a Langfuse trace for each graph run (thread_id)
7
+ # - Creates spans per node execution and links LLM generations to spans
8
+ # - Thread-safe and resilient to Langfuse client errors
9
+ class LangfuseObserver < BaseObserver
10
+ def initialize(name: 'langgraph-run')
11
+ @name = name
12
+ @trace = nil
13
+ @trace_mutex = Mutex.new
14
+
15
+ # Maintain a stack per node_name to safely handle parallel executions
16
+ # { Symbol(String) => [ { span: <Span>, generation: <Generation>|nil } ] }
17
+ @records_by_node = Hash.new { |h, k| h[k] = [] }
18
+ @records_mutex = Mutex.new
19
+ end
20
+
21
+ # Graph lifecycle
22
+ def on_graph_start(event)
23
+ ensure_trace!(event)
24
+ rescue => _e
25
+ # Swallow observer errors to avoid impacting execution
26
+ end
27
+
28
+ def on_graph_end(event)
29
+ return unless @trace
30
+ Langfuse.trace(id: @trace.id, output: safe_state(event.initial_state))
31
+ rescue => _e
32
+ end
33
+
34
+ # Node lifecycle
35
+ def on_node_start(event)
36
+ return if event.node_name == :__start__
37
+
38
+ trace = ensure_trace!(event)
39
+ return unless trace
40
+
41
+ span = Langfuse.span(
42
+ name: event.node_name.to_s,
43
+ trace_id: trace.id,
44
+ metadata: event.to_h
45
+ )
46
+
47
+ # Track record on a stack keyed by node_name
48
+ with_records_lock do
49
+ @records_by_node[event.node_name] << { span: span, generation: nil }
50
+ end
51
+
52
+ Langfuse.update_span(span)
53
+ rescue => _e
54
+ end
55
+
56
+ def on_node_end(event)
57
+ return if event.node_name == :__start__
58
+
59
+ record = with_records_lock do
60
+ @records_by_node[event.node_name].pop
61
+ end
62
+
63
+ span = record && record[:span]
64
+ return unless span
65
+
66
+ data = event.to_h
67
+ span.input = safe_state(data[:state_before])
68
+ span.output = safe_state(data[:state_after])
69
+ span.metadata = data
70
+ span.end_time = Time.now.utc
71
+ Langfuse.update_span(span)
72
+ rescue => _e
73
+ end
74
+
75
+ def on_node_error(event)
76
+ return if event.node_name == :__start__
77
+
78
+ record = with_records_lock do
79
+ @records_by_node[event.node_name].pop
80
+ end
81
+
82
+ span = record && record[:span]
83
+ return unless span
84
+
85
+ span.metadata = event.to_h
86
+ span.end_time = Time.now.utc
87
+ Langfuse.update_span(span)
88
+ rescue => _e
89
+ end
90
+
91
+ # LLM lifecycle (called directly by LLM clients)
92
+ def on_llm_request(data, node_name)
93
+ record = with_records_lock do
94
+ stack = @records_by_node[node_name]
95
+ stack.empty? ? nil : stack[-1]
96
+ end
97
+ return unless record && record[:span]
98
+
99
+ # Prefer normalized payload from LLMBase implementations (e.g., ChatOpenAI)
100
+ input_payload = if data.is_a?(Hash)
101
+ data[:input] || data[:messages] || (data[:request] && data[:request][:messages])
102
+ else
103
+ data
104
+ end
105
+
106
+ generation = Langfuse.generation(
107
+ name: "llm-request-#{node_name}",
108
+ trace_id: @trace&.id,
109
+ parent_observation_id: record[:span].id,
110
+ model: data[:model],
111
+ input: input_payload,
112
+ metadata: (data.respond_to?(:to_h) ? data.to_h : data)
113
+ )
114
+
115
+ with_records_lock do
116
+ record[:generation] = generation
117
+ end
118
+ rescue => _e
119
+ end
120
+
121
+ def on_llm_response(data, node_name)
122
+ record = with_records_lock do
123
+ stack = @records_by_node[node_name]
124
+ stack.empty? ? nil : stack[-1]
125
+ end
126
+ return unless record && record[:generation]
127
+
128
+ generation = record[:generation]
129
+
130
+ if data.is_a?(Hash)
131
+ # Prefer normalized payload keys first
132
+ if data.key?(:output)
133
+ generation.output = data[:output]
134
+ else
135
+ # Fallback to OpenAI-style response structure
136
+ generation.output = data.dig(:choices, 0, :message, :content)
137
+ end
138
+
139
+ # Usage: support both normalized top-level and OpenAI usage block
140
+ prompt_tokens = data[:prompt_tokens] || data.dig(:usage, :prompt_tokens)
141
+ completion_tokens = data[:completion_tokens] || data.dig(:usage, :completion_tokens)
142
+ total_tokens = data[:total_tokens] || data.dig(:usage, :total_tokens)
143
+
144
+ if prompt_tokens || completion_tokens || total_tokens
145
+ begin
146
+ generation.usage = Langfuse::Models::Usage.new(
147
+ prompt_tokens: prompt_tokens,
148
+ completion_tokens: completion_tokens,
149
+ total_tokens: total_tokens
150
+ )
151
+ rescue => _e
152
+ # best-effort usage mapping
153
+ end
154
+ end
155
+ else
156
+ generation.output = data
157
+ end
158
+
159
+ generation.end_time = Time.now.utc
160
+ Langfuse.update_generation(generation)
161
+
162
+ with_records_lock do
163
+ record[:generation] = nil
164
+ end
165
+ rescue => _e
166
+ end
167
+
168
+ def on_llm_error(data, node_name)
169
+ record = with_records_lock do
170
+ stack = @records_by_node[node_name]
171
+ stack.empty? ? nil : stack[-1]
172
+ end
173
+ return unless record && record[:generation]
174
+
175
+ generation = record[:generation]
176
+ generation.output = data[:error]
177
+ generation.end_time = Time.now.utc
178
+ Langfuse.update_generation(generation)
179
+
180
+ with_records_lock do
181
+ record[:generation] = nil
182
+ end
183
+ rescue => _e
184
+ end
185
+
186
+ private
187
+
188
+ def ensure_trace!(event)
189
+ return @trace if @trace
190
+ @trace_mutex.synchronize do
191
+ return @trace if @trace
192
+ data = event.to_h
193
+ @trace = Langfuse.trace(
194
+ name: @name,
195
+ thread_id: data[:thread_id],
196
+ metadata: data,
197
+ input: safe_state(data[:initial_state])
198
+ )
199
+ end
200
+ @trace
201
+ end
202
+
203
+ def with_records_lock
204
+ @records_mutex.synchronize do
205
+ yield
206
+ end
207
+ end
208
+
209
+ def safe_state(state)
210
+ return nil if state.nil?
211
+ if state.respond_to?(:to_h)
212
+ state.to_h
213
+ else
214
+ state
215
+ end
216
+ rescue => _e
217
+ nil
218
+ end
219
+ end
220
+ end
221
+ end
222
+
223
+
@@ -1,3 +1,3 @@
1
1
  module LangGraphRB
2
- VERSION = "0.1.5"
2
+ VERSION = "0.1.7"
3
3
  end
data/lib/langgraph_rb.rb CHANGED
@@ -9,6 +9,7 @@ require_relative 'langgraph_rb/stores/memory'
9
9
  require_relative 'langgraph_rb/observers/base'
10
10
  require_relative 'langgraph_rb/observers/logger'
11
11
  require_relative 'langgraph_rb/observers/structured'
12
+ require_relative 'langgraph_rb/observers/langfuse'
12
13
  require_relative 'langgraph_rb/llm_base'
13
14
  require_relative 'langgraph_rb/chat_openai'
14
15
  require_relative 'langgraph_rb/tool_definition'
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langgraph_rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.5
4
+ version: 0.1.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Julian Toro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2025-09-20 00:00:00.000000000 Z
11
+ date: 2025-10-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: json
@@ -133,7 +133,6 @@ files:
133
133
  - examples/observer_example.rb
134
134
  - examples/reducers_example.rb
135
135
  - examples/simple_test.rb
136
- - examples/tool_and_llm_example.rb
137
136
  - langgraph_rb.gemspec
138
137
  - lib/langgraph_rb.rb
139
138
  - lib/langgraph_rb/chat_openai.rb
@@ -143,6 +142,7 @@ files:
143
142
  - lib/langgraph_rb/llm_base.rb
144
143
  - lib/langgraph_rb/node.rb
145
144
  - lib/langgraph_rb/observers/base.rb
145
+ - lib/langgraph_rb/observers/langfuse.rb
146
146
  - lib/langgraph_rb/observers/logger.rb
147
147
  - lib/langgraph_rb/observers/structured.rb
148
148
  - lib/langgraph_rb/runner.rb
@@ -1,145 +0,0 @@
1
- #!/usr/bin/env ruby
2
-
3
- require_relative '../lib/langgraph_rb'
4
-
5
- # Mock LLM client that can incorporate tool outputs when present
6
- class MockLLMClient
7
- def call(messages)
8
- last_user = messages&.reverse&.find { |m| m[:role] == 'user' }&.dig(:content)
9
- last_tool = messages&.reverse&.find { |m| m[:role] == 'tool' }&.dig(:content)
10
-
11
- if last_tool
12
- "(mock) Based on tool result: #{last_tool} | Answering user: #{last_user}"
13
- else
14
- "(mock) You said: #{last_user}"
15
- end
16
- end
17
- end
18
-
19
- # Simple search tool that returns a faux result string
20
- class SearchTool
21
- def self.call(args)
22
- query = args.is_a?(Hash) ? args[:query] || args['query'] : args
23
- query ||= args.to_s
24
- "Results for '#{query}': [Result A, Result B, Result C]"
25
- end
26
- end
27
-
28
- def tool_and_llm_example
29
- puts "=== Tool + LLM Example ==="
30
-
31
- mock_llm = MockLLMClient.new
32
-
33
- graph = LangGraphRB::Graph.new(state_class: LangGraphRB::State) do
34
- # 1) Capture user input into the message history
35
- node :receive_input do |state|
36
- user_msg = { role: 'user', content: state[:input].to_s }
37
- existing = state[:messages] || []
38
- { messages: existing + [user_msg], last_user_message: state[:input].to_s }
39
- end
40
-
41
- # 2) Decide whether to call a tool based on the user's request
42
- # If the user says: "search <query>", produce a tool_call for SearchTool
43
- llm_node :router, llm_client: mock_llm, system_prompt: "You are a helpful assistant that can decide to call tools when asked." do |state, context|
44
- last_user = state[:last_user_message].to_s
45
-
46
- if (match = last_user.match(/^\s*search\s+(.+)$/i))
47
- query = match[1].strip
48
- tool_call = {
49
- id: "call_#{Time.now.to_i}",
50
- name: 'search',
51
- args: { query: query }
52
- }
53
-
54
- assistant_msg = {
55
- role: 'assistant',
56
- content: "Let me search for: #{query}",
57
- tool_calls: [tool_call]
58
- }
59
-
60
- {
61
- messages: (state[:messages] || []) + [assistant_msg],
62
- tool_call: tool_call # also put it in state for convenience
63
- }
64
- else
65
- # No tool needed; provide a direct assistant response using the LLM
66
- messages = state[:messages] || []
67
- messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
68
- response = context[:llm_client].call(messages)
69
-
70
- {
71
- messages: (state[:messages] || []) + [{ role: 'assistant', content: response }],
72
- last_response: response
73
- }
74
- end
75
- end
76
-
77
- # 3) Execute the tool if requested and append a tool message
78
- # Use a custom block to merge the tool message with existing history
79
- tool_node :use_tool, tool: SearchTool do |state|
80
- # Determine the tool call (from state or messages)
81
- tool_call = state[:tool_call]
82
- unless tool_call
83
- # Fallback: look for a message containing tool_calls
84
- (state[:messages] || []).reverse.each do |msg|
85
- if msg[:tool_calls] && msg[:tool_calls].first
86
- tool_call = msg[:tool_calls].first
87
- break
88
- end
89
- end
90
- end
91
-
92
- return { error: 'No tool call found' } unless tool_call
93
-
94
- result = SearchTool.call(tool_call[:args])
95
-
96
- tool_msg = {
97
- role: 'tool',
98
- content: result.to_s,
99
- tool_call_id: tool_call[:id]
100
- }
101
-
102
- {
103
- messages: (state[:messages] || []) + [tool_msg],
104
- tool_result: result
105
- }
106
- end
107
-
108
- # 4) Produce the final answer with the LLM, using any tool results
109
- llm_node :final_answer, llm_client: mock_llm, system_prompt: "Use tool results if available to answer the user."
110
-
111
- # Flow
112
- set_entry_point :receive_input
113
- edge :receive_input, :router
114
-
115
- # If there is a tool_call, go to :use_tool, otherwise go directly to :final_answer
116
- conditional_edge :router, ->(state) {
117
- state[:tool_call] ? "use_tool" : "final_answer"
118
- }, {
119
- "use_tool" => :use_tool,
120
- "final_answer" => :final_answer
121
- }
122
-
123
- edge :use_tool, :router
124
- set_finish_point :final_answer
125
- end
126
-
127
- graph.compile!
128
-
129
- puts graph.to_mermaid
130
-
131
- puts "\n— Example 1: No tool needed —"
132
- result1 = graph.invoke({ messages: [], input: "Tell me a joke." })
133
- puts "Assistant: #{result1[:last_response]}"
134
-
135
- puts "\n— Example 2: Tool is used —"
136
- result2 = graph.invoke({ messages: [], input: "search Ruby LangGraphRB" })
137
- final_message = (result2[:messages] || []).reverse.find { |m| m[:role] == 'assistant' }&.dig(:content)
138
- puts "Assistant: #{final_message}"
139
- tool_message = (result2[:messages] || []).reverse.find { |m| m[:role] == 'tool' }&.dig(:content)
140
- puts "(Tool) #{tool_message}"
141
- end
142
-
143
- tool_and_llm_example
144
-
145
-