langgraph_rb 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: faa20cb53b8c5f9c1ea8d740d34ea63eadfe9e07a49683dd3c090f7bb042d3fd
4
- data.tar.gz: 56bfc5f8fb39473af9252caf1573f1126e30405f37ee7ac45bfde6704df20575
3
+ metadata.gz: 071bb22811337400b9569f90e07b50c0f7c6784b1025ce30b0502c5d321965b5
4
+ data.tar.gz: dad734ef3e60f76d8bf47de4c1f5f50a1b361dad8cfd7e36ed57e3cd4f998dca
5
5
  SHA512:
6
- metadata.gz: 0f24fbb2f677bb13e7a8091807ff6d3f3d835b295ee691420f30b576bbd74d05eae9040d6511319eaefbc6c04170dc501101f3ff07f5a3e08c066a3d398adc3f
7
- data.tar.gz: 91bdd7cf66e2c73fb5103d2fe469381bce46e889cacd6710a76c012d96296d3f0625dbc3b4e992570ef67ba3c506f8031058b3e6384304d0596e2c6f10dd8a6b
6
+ metadata.gz: 20c5c1c2ab980770d8ef0ca69159c1566c889c3ecd80d9185f6767b7954d92f5a15847ac50b1024dbd976670dc90d76b1bec9a95fa5d55b6123d6cdb26633bd2
7
+ data.tar.gz: a6edae4423708c7f807a050a9d4238d36ed188d0a0697e04eb2f21cebb3987ddf9d7f627cd6657d0b128470a58ca2a6895d02614da30341c73dbd5af1584337e
@@ -1,8 +1,18 @@
1
1
  #!/usr/bin/env ruby
2
2
  require 'pry'
3
3
  require 'pry-byebug'
4
+ require 'langfuse'
4
5
  require_relative '../lib/langgraph_rb'
5
6
 
7
+ url = 'https://us.cloud.langfuse.com'
8
+
9
+ Langfuse.configure do |config|
10
+ config.public_key = ENV['LANGFUSE_PUBLIC_KEY'] # e.g., 'pk-lf-...'
11
+ config.secret_key = ENV['LANGFUSE_SECRET_KEY'] # e.g., 'sk-lf-...'
12
+ config.host = url
13
+ config.debug = true # Enable debug logging
14
+ end
15
+
6
16
  class MovieInfoTool < LangGraphRB::ToolBase
7
17
  define_function :search_movie, description: "MovieInfoTool: Search for a movie by title" do
8
18
  property :query, type: "string", description: "The movie title to search for", required: true
@@ -31,6 +41,8 @@ def run_chat_openai_tools
31
41
  chat = LangGraphRB::ChatOpenAI.new(model: ENV.fetch('OPENAI_MODEL', 'gpt-4o-mini'), temperature: 0)
32
42
  chat = chat.bind_tools(tools)
33
43
 
44
+ observers = [LangGraphRB::Observers::LangfuseObserver.new(name: 'chat-openai-tools-example')]
45
+
34
46
  graph = LangGraphRB::Graph.new do
35
47
  node :receive_input do |state|
36
48
  user_msg = { role: 'user', content: state[:input].to_s }
@@ -38,42 +50,7 @@ def run_chat_openai_tools
38
50
  { messages: existing + [user_msg] }
39
51
  end
40
52
 
41
- llm_node :chat, llm_client: chat, system_prompt: "You are a movie assistant. Use tools when helpful." do |state, context|
42
- messages = state[:messages] || []
43
- messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
44
-
45
- response = context[:llm_client].call(messages)
46
-
47
- if response.is_a?(Hash) && response[:tool_calls]
48
- assistant_msg = { role: 'assistant', content: nil, tool_calls: response[:tool_calls] }
49
- { messages: (state[:messages] || []) + [assistant_msg], tool_call: response[:tool_calls].first }
50
- else
51
- assistant_msg = { role: 'assistant', content: response.to_s }
52
- { messages: (state[:messages] || []) + [assistant_msg], last_response: response.to_s }
53
- end
54
- end
55
-
56
- # node :tool do |state|
57
- # tool_call = state[:tool_call]
58
- # tool_name = tool_call[:name]
59
- # tool_args = tool_call[:arguments]
60
- # tool_call_id = tool_call[:id]
61
-
62
- # puts "TOOL CALL #########################"
63
- # puts "tool_name: #{tool_name}"
64
- # puts "tool_args: #{tool_args}"
65
- # puts "tool_call_id: #{tool_call_id}"
66
- # puts "########################"
67
- # puts "########################"
68
-
69
- # tool_method_name = tool_name.to_s.split('__').last
70
-
71
- # # Dispatch via ToolBase API to keep consistent interface
72
- # tool_result = tools.call({ name: tool_method_name, arguments: tool_args })
73
-
74
- # { messages: (state[:messages] || []) + [{ role: 'tool', content: tool_result.to_json, tool_call_id: tool_call_id, name: tool_name.to_s }],
75
- # tool_call: nil }
76
- # end
53
+ llm_node :chat, llm_client: chat, system_prompt: "You are a movie assistant. Use tools when helpful."
77
54
 
78
55
  tool_node :tool, tools: tools
79
56
 
@@ -97,8 +74,10 @@ def run_chat_openai_tools
97
74
 
98
75
  graph.compile!
99
76
 
77
+ graph.draw_mermaid
78
+
100
79
  start = { messages: [], input: "Find details about 'The Matrix'" }
101
- result = graph.invoke(start)
80
+ result = graph.invoke(start, observers: observers)
102
81
  puts "Messages:"
103
82
  (result[:messages] || []).each do |m|
104
83
  if m[:role] == 'assistant' && m[:tool_calls]
@@ -113,3 +92,39 @@ end
113
92
  run_chat_openai_tools
114
93
 
115
94
 
95
+ # llm_node :chat, llm_client: chat, system_prompt: "You are a movie assistant. Use tools when helpful." do |state, context|
96
+ # messages = state[:messages] || []
97
+ # messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
98
+
99
+ # response = context[:llm_client].call(messages)
100
+
101
+ # if response.is_a?(Hash) && response[:tool_calls]
102
+ # assistant_msg = { role: 'assistant', content: nil, tool_calls: response[:tool_calls] }
103
+ # { messages: (state[:messages] || []) + [assistant_msg], tool_call: response[:tool_calls].first }
104
+ # else
105
+ # assistant_msg = { role: 'assistant', content: response.to_s }
106
+ # { messages: (state[:messages] || []) + [assistant_msg], last_response: response.to_s }
107
+ # end
108
+ # end
109
+
110
+ # node :tool do |state|
111
+ # tool_call = state[:tool_call]
112
+ # tool_name = tool_call[:name]
113
+ # tool_args = tool_call[:arguments]
114
+ # tool_call_id = tool_call[:id]
115
+
116
+ # puts "TOOL CALL #########################"
117
+ # puts "tool_name: #{tool_name}"
118
+ # puts "tool_args: #{tool_args}"
119
+ # puts "tool_call_id: #{tool_call_id}"
120
+ # puts "########################"
121
+ # puts "########################"
122
+
123
+ # tool_method_name = tool_name.to_s.split('__').last
124
+
125
+ # # Dispatch via ToolBase API to keep consistent interface
126
+ # tool_result = tools.call({ name: tool_method_name, arguments: tool_args })
127
+
128
+ # { messages: (state[:messages] || []) + [{ role: 'tool', content: tool_result.to_json, tool_call_id: tool_call_id, name: tool_name.to_s }],
129
+ # tool_call: nil }
130
+ # end
@@ -12,27 +12,6 @@ Langfuse.configure do |config|
12
12
  end
13
13
 
14
14
 
15
- class LangfuseObserver < LangGraphRB::Observers::BaseObserver
16
-
17
- def on_graph_start(event)
18
- @trace ||= Langfuse.trace(
19
- name: "graph-start2",
20
- thread_id: event.thread_id,
21
- metadata: event.to_h
22
- )
23
- end
24
-
25
- def on_node_end(event)
26
- span = Langfuse.span(
27
- name: "node-#{event.node_name}",
28
- trace_id: @trace.id,
29
- input: event.to_h,
30
- )
31
- Langfuse.update_span(span)
32
- end
33
- end
34
-
35
-
36
15
  def langfuse_example
37
16
  puts "########################################################"
38
17
  puts "########################################################"
@@ -82,7 +61,10 @@ def langfuse_example
82
61
 
83
62
 
84
63
  graph.compile!
85
- result = graph.invoke({ message: "Hello World", value: 31}, observers: [LangfuseObserver.new])
64
+ result = graph.invoke(
65
+ { message: "Hello World", value: 31},
66
+ observers: [LangGraphRB::Observers::LangfuseObserver.new(name: 'langfuse-example')]
67
+ )
86
68
  puts "Result: #{result}"
87
69
  puts "########################################################"
88
70
  puts "########################################################"
@@ -214,7 +214,10 @@ module LangGraphRB
214
214
  validate_node_exists!(edge.to)
215
215
  when ConditionalEdge
216
216
  validate_node_exists!(edge.from)
217
- # Path map targets will be validated at runtime
217
+ # If a static path_map is provided, validate mapped destinations now
218
+ if edge.path_map && !edge.path_map.empty?
219
+ edge.path_map.values.each { |dest| validate_node_exists!(dest) }
220
+ end
218
221
  when FanOutEdge
219
222
  validate_node_exists!(edge.from)
220
223
  edge.destinations.each { |dest| validate_node_exists!(dest) }
@@ -226,6 +229,12 @@ module LangGraphRB
226
229
  case edge
227
230
  when Edge
228
231
  [edge.to]
232
+ when ConditionalEdge
233
+ if edge.path_map && !edge.path_map.empty?
234
+ edge.path_map.values
235
+ else
236
+ []
237
+ end
229
238
  when FanOutEdge
230
239
  edge.destinations
231
240
  else
@@ -264,11 +273,17 @@ module LangGraphRB
264
273
  case edge
265
274
  when Edge
266
275
  reachable += find_reachable_nodes(edge.to, visited.dup)
276
+ when ConditionalEdge
277
+ # If a static path_map is provided, consider all mapped destinations reachable
278
+ if edge.path_map && !edge.path_map.empty?
279
+ edge.path_map.values.each do |dest|
280
+ reachable += find_reachable_nodes(dest, visited.dup)
281
+ end
282
+ end
267
283
  when FanOutEdge
268
284
  edge.destinations.each do |dest|
269
285
  reachable += find_reachable_nodes(dest, visited.dup)
270
- end
271
- # ConditionalEdge paths are dynamic, so we can't pre-validate them
286
+ end
272
287
  end
273
288
  end
274
289
 
@@ -0,0 +1,205 @@
1
+ require 'langfuse'
2
+
3
+ module LangGraphRB
4
+ module Observers
5
+ # Langfuse observer that captures graph, node, and LLM events.
6
+ # - Creates a Langfuse trace for each graph run (thread_id)
7
+ # - Creates spans per node execution and links LLM generations to spans
8
+ # - Thread-safe and resilient to Langfuse client errors
9
+ class LangfuseObserver < BaseObserver
10
+ def initialize(name: 'langgraph-run')
11
+ @name = name
12
+ @trace = nil
13
+ @trace_mutex = Mutex.new
14
+
15
+ # Maintain a stack per node_name to safely handle parallel executions
16
+ # { Symbol(String) => [ { span: <Span>, generation: <Generation>|nil } ] }
17
+ @records_by_node = Hash.new { |h, k| h[k] = [] }
18
+ @records_mutex = Mutex.new
19
+ end
20
+
21
+ # Graph lifecycle
22
+ def on_graph_start(event)
23
+ ensure_trace!(event)
24
+ rescue => _e
25
+ # Swallow observer errors to avoid impacting execution
26
+ end
27
+
28
+ def on_graph_end(event)
29
+ return unless @trace
30
+ Langfuse.trace(id: @trace.id, output: safe_state(event.initial_state))
31
+ rescue => _e
32
+ end
33
+
34
+ # Node lifecycle
35
+ def on_node_start(event)
36
+ return if event.node_name == :__start__
37
+
38
+ trace = ensure_trace!(event)
39
+ return unless trace
40
+
41
+ span = Langfuse.span(
42
+ name: event.node_name.to_s,
43
+ trace_id: trace.id,
44
+ metadata: event.to_h
45
+ )
46
+
47
+ # Track record on a stack keyed by node_name
48
+ with_records_lock do
49
+ @records_by_node[event.node_name] << { span: span, generation: nil }
50
+ end
51
+
52
+ Langfuse.update_span(span)
53
+ rescue => _e
54
+ end
55
+
56
+ def on_node_end(event)
57
+ return if event.node_name == :__start__
58
+
59
+ record = with_records_lock do
60
+ @records_by_node[event.node_name].pop
61
+ end
62
+
63
+ span = record && record[:span]
64
+ return unless span
65
+
66
+ data = event.to_h
67
+ span.input = safe_state(data[:state_before])
68
+ span.output = safe_state(data[:state_after])
69
+ span.metadata = data
70
+ span.end_time = Time.now.utc
71
+ Langfuse.update_span(span)
72
+ rescue => _e
73
+ end
74
+
75
+ def on_node_error(event)
76
+ return if event.node_name == :__start__
77
+
78
+ record = with_records_lock do
79
+ @records_by_node[event.node_name].pop
80
+ end
81
+
82
+ span = record && record[:span]
83
+ return unless span
84
+
85
+ span.metadata = event.to_h
86
+ span.end_time = Time.now.utc
87
+ Langfuse.update_span(span)
88
+ rescue => _e
89
+ end
90
+
91
+ # LLM lifecycle (called directly by LLM clients)
92
+ def on_llm_request(data, node_name)
93
+ record = with_records_lock do
94
+ stack = @records_by_node[node_name]
95
+ stack.empty? ? nil : stack[-1]
96
+ end
97
+ return unless record && record[:span]
98
+
99
+ # Prefer normalized payload from LLMBase implementations (e.g., ChatOpenAI)
100
+ input_payload = if data.is_a?(Hash)
101
+ data[:input] || data[:messages] || (data[:request] && data[:request][:messages])
102
+ else
103
+ data
104
+ end
105
+
106
+ generation = Langfuse.generation(
107
+ name: "llm-request-#{node_name}",
108
+ trace_id: @trace&.id,
109
+ parent_observation_id: record[:span].id,
110
+ model: data[:model],
111
+ input: input_payload,
112
+ metadata: (data.respond_to?(:to_h) ? data.to_h : data)
113
+ )
114
+
115
+ with_records_lock do
116
+ record[:generation] = generation
117
+ end
118
+ rescue => _e
119
+ end
120
+
121
+ def on_llm_response(data, node_name)
122
+ record = with_records_lock do
123
+ stack = @records_by_node[node_name]
124
+ stack.empty? ? nil : stack[-1]
125
+ end
126
+ return unless record && record[:generation]
127
+
128
+ generation = record[:generation]
129
+
130
+ if data.is_a?(Hash)
131
+ # Prefer normalized payload keys first
132
+ if data.key?(:output)
133
+ generation.output = data[:output]
134
+ else
135
+ # Fallback to OpenAI-style response structure
136
+ generation.output = data.dig(:choices, 0, :message, :content)
137
+ end
138
+
139
+ # Usage: support both normalized top-level and OpenAI usage block
140
+ prompt_tokens = data[:prompt_tokens] || data.dig(:usage, :prompt_tokens)
141
+ completion_tokens = data[:completion_tokens] || data.dig(:usage, :completion_tokens)
142
+ total_tokens = data[:total_tokens] || data.dig(:usage, :total_tokens)
143
+
144
+ if prompt_tokens || completion_tokens || total_tokens
145
+ begin
146
+ generation.usage = Langfuse::Models::Usage.new(
147
+ prompt_tokens: prompt_tokens,
148
+ completion_tokens: completion_tokens,
149
+ total_tokens: total_tokens
150
+ )
151
+ rescue => _e
152
+ # best-effort usage mapping
153
+ end
154
+ end
155
+ else
156
+ generation.output = data
157
+ end
158
+
159
+ generation.end_time = Time.now.utc
160
+ Langfuse.update_generation(generation)
161
+
162
+ with_records_lock do
163
+ record[:generation] = nil
164
+ end
165
+ rescue => _e
166
+ end
167
+
168
+ private
169
+
170
+ def ensure_trace!(event)
171
+ return @trace if @trace
172
+ @trace_mutex.synchronize do
173
+ return @trace if @trace
174
+ data = event.to_h
175
+ @trace = Langfuse.trace(
176
+ name: @name,
177
+ thread_id: data[:thread_id],
178
+ metadata: data,
179
+ input: safe_state(data[:initial_state])
180
+ )
181
+ end
182
+ @trace
183
+ end
184
+
185
+ def with_records_lock
186
+ @records_mutex.synchronize do
187
+ yield
188
+ end
189
+ end
190
+
191
+ def safe_state(state)
192
+ return nil if state.nil?
193
+ if state.respond_to?(:to_h)
194
+ state.to_h
195
+ else
196
+ state
197
+ end
198
+ rescue => _e
199
+ nil
200
+ end
201
+ end
202
+ end
203
+ end
204
+
205
+
@@ -1,3 +1,3 @@
1
1
  module LangGraphRB
2
- VERSION = "0.1.5"
2
+ VERSION = "0.1.6"
3
3
  end
data/lib/langgraph_rb.rb CHANGED
@@ -9,6 +9,7 @@ require_relative 'langgraph_rb/stores/memory'
9
9
  require_relative 'langgraph_rb/observers/base'
10
10
  require_relative 'langgraph_rb/observers/logger'
11
11
  require_relative 'langgraph_rb/observers/structured'
12
+ require_relative 'langgraph_rb/observers/langfuse'
12
13
  require_relative 'langgraph_rb/llm_base'
13
14
  require_relative 'langgraph_rb/chat_openai'
14
15
  require_relative 'langgraph_rb/tool_definition'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langgraph_rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.5
4
+ version: 0.1.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Julian Toro
@@ -133,7 +133,6 @@ files:
133
133
  - examples/observer_example.rb
134
134
  - examples/reducers_example.rb
135
135
  - examples/simple_test.rb
136
- - examples/tool_and_llm_example.rb
137
136
  - langgraph_rb.gemspec
138
137
  - lib/langgraph_rb.rb
139
138
  - lib/langgraph_rb/chat_openai.rb
@@ -143,6 +142,7 @@ files:
143
142
  - lib/langgraph_rb/llm_base.rb
144
143
  - lib/langgraph_rb/node.rb
145
144
  - lib/langgraph_rb/observers/base.rb
145
+ - lib/langgraph_rb/observers/langfuse.rb
146
146
  - lib/langgraph_rb/observers/logger.rb
147
147
  - lib/langgraph_rb/observers/structured.rb
148
148
  - lib/langgraph_rb/runner.rb
@@ -1,145 +0,0 @@
1
- #!/usr/bin/env ruby
2
-
3
- require_relative '../lib/langgraph_rb'
4
-
5
- # Mock LLM client that can incorporate tool outputs when present
6
- class MockLLMClient
7
- def call(messages)
8
- last_user = messages&.reverse&.find { |m| m[:role] == 'user' }&.dig(:content)
9
- last_tool = messages&.reverse&.find { |m| m[:role] == 'tool' }&.dig(:content)
10
-
11
- if last_tool
12
- "(mock) Based on tool result: #{last_tool} | Answering user: #{last_user}"
13
- else
14
- "(mock) You said: #{last_user}"
15
- end
16
- end
17
- end
18
-
19
- # Simple search tool that returns a faux result string
20
- class SearchTool
21
- def self.call(args)
22
- query = args.is_a?(Hash) ? args[:query] || args['query'] : args
23
- query ||= args.to_s
24
- "Results for '#{query}': [Result A, Result B, Result C]"
25
- end
26
- end
27
-
28
- def tool_and_llm_example
29
- puts "=== Tool + LLM Example ==="
30
-
31
- mock_llm = MockLLMClient.new
32
-
33
- graph = LangGraphRB::Graph.new(state_class: LangGraphRB::State) do
34
- # 1) Capture user input into the message history
35
- node :receive_input do |state|
36
- user_msg = { role: 'user', content: state[:input].to_s }
37
- existing = state[:messages] || []
38
- { messages: existing + [user_msg], last_user_message: state[:input].to_s }
39
- end
40
-
41
- # 2) Decide whether to call a tool based on the user's request
42
- # If the user says: "search <query>", produce a tool_call for SearchTool
43
- llm_node :router, llm_client: mock_llm, system_prompt: "You are a helpful assistant that can decide to call tools when asked." do |state, context|
44
- last_user = state[:last_user_message].to_s
45
-
46
- if (match = last_user.match(/^\s*search\s+(.+)$/i))
47
- query = match[1].strip
48
- tool_call = {
49
- id: "call_#{Time.now.to_i}",
50
- name: 'search',
51
- args: { query: query }
52
- }
53
-
54
- assistant_msg = {
55
- role: 'assistant',
56
- content: "Let me search for: #{query}",
57
- tool_calls: [tool_call]
58
- }
59
-
60
- {
61
- messages: (state[:messages] || []) + [assistant_msg],
62
- tool_call: tool_call # also put it in state for convenience
63
- }
64
- else
65
- # No tool needed; provide a direct assistant response using the LLM
66
- messages = state[:messages] || []
67
- messages = [{ role: 'system', content: context[:system_prompt] }] + messages if context[:system_prompt]
68
- response = context[:llm_client].call(messages)
69
-
70
- {
71
- messages: (state[:messages] || []) + [{ role: 'assistant', content: response }],
72
- last_response: response
73
- }
74
- end
75
- end
76
-
77
- # 3) Execute the tool if requested and append a tool message
78
- # Use a custom block to merge the tool message with existing history
79
- tool_node :use_tool, tool: SearchTool do |state|
80
- # Determine the tool call (from state or messages)
81
- tool_call = state[:tool_call]
82
- unless tool_call
83
- # Fallback: look for a message containing tool_calls
84
- (state[:messages] || []).reverse.each do |msg|
85
- if msg[:tool_calls] && msg[:tool_calls].first
86
- tool_call = msg[:tool_calls].first
87
- break
88
- end
89
- end
90
- end
91
-
92
- return { error: 'No tool call found' } unless tool_call
93
-
94
- result = SearchTool.call(tool_call[:args])
95
-
96
- tool_msg = {
97
- role: 'tool',
98
- content: result.to_s,
99
- tool_call_id: tool_call[:id]
100
- }
101
-
102
- {
103
- messages: (state[:messages] || []) + [tool_msg],
104
- tool_result: result
105
- }
106
- end
107
-
108
- # 4) Produce the final answer with the LLM, using any tool results
109
- llm_node :final_answer, llm_client: mock_llm, system_prompt: "Use tool results if available to answer the user."
110
-
111
- # Flow
112
- set_entry_point :receive_input
113
- edge :receive_input, :router
114
-
115
- # If there is a tool_call, go to :use_tool, otherwise go directly to :final_answer
116
- conditional_edge :router, ->(state) {
117
- state[:tool_call] ? "use_tool" : "final_answer"
118
- }, {
119
- "use_tool" => :use_tool,
120
- "final_answer" => :final_answer
121
- }
122
-
123
- edge :use_tool, :router
124
- set_finish_point :final_answer
125
- end
126
-
127
- graph.compile!
128
-
129
- puts graph.to_mermaid
130
-
131
- puts "\n— Example 1: No tool needed —"
132
- result1 = graph.invoke({ messages: [], input: "Tell me a joke." })
133
- puts "Assistant: #{result1[:last_response]}"
134
-
135
- puts "\n— Example 2: Tool is used —"
136
- result2 = graph.invoke({ messages: [], input: "search Ruby LangGraphRB" })
137
- final_message = (result2[:messages] || []).reverse.find { |m| m[:role] == 'assistant' }&.dig(:content)
138
- puts "Assistant: #{final_message}"
139
- tool_message = (result2[:messages] || []).reverse.find { |m| m[:role] == 'tool' }&.dig(:content)
140
- puts "(Tool) #{tool_message}"
141
- end
142
-
143
- tool_and_llm_example
144
-
145
-