langgraph_rb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +9 -0
- data/README.md +350 -0
- data/SUMMARY.md +170 -0
- data/examples/advanced_example.rb +388 -0
- data/examples/basic_example.rb +211 -0
- data/examples/simple_test.rb +266 -0
- data/langgraph_rb.gemspec +43 -0
- data/lib/langgraph_rb/command.rb +132 -0
- data/lib/langgraph_rb/edge.rb +141 -0
- data/lib/langgraph_rb/graph.rb +268 -0
- data/lib/langgraph_rb/node.rb +112 -0
- data/lib/langgraph_rb/runner.rb +360 -0
- data/lib/langgraph_rb/state.rb +70 -0
- data/lib/langgraph_rb/stores/memory.rb +265 -0
- data/lib/langgraph_rb/version.rb +3 -0
- data/lib/langgraph_rb.rb +15 -0
- data/test_runner.rb +160 -0
- metadata +151 -0
@@ -0,0 +1,360 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
module LangGraphRB
|
4
|
+
class Runner
|
5
|
+
attr_reader :graph, :store, :thread_id
|
6
|
+
|
7
|
+
def initialize(graph, store:, thread_id:)
|
8
|
+
@graph = graph
|
9
|
+
@store = store
|
10
|
+
@thread_id = thread_id
|
11
|
+
@step_number = 0
|
12
|
+
@execution_queue = Queue.new
|
13
|
+
@interrupt_handler = nil
|
14
|
+
end
|
15
|
+
|
16
|
+
# Synchronous execution
|
17
|
+
def invoke(initial_state, context: nil)
|
18
|
+
result = nil
|
19
|
+
|
20
|
+
stream(initial_state, context: context) do |step_result|
|
21
|
+
result = step_result
|
22
|
+
end
|
23
|
+
|
24
|
+
result[:state]
|
25
|
+
end
|
26
|
+
|
27
|
+
# Streaming execution with optional block for receiving intermediate results
|
28
|
+
def stream(initial_state, context: nil, &block)
|
29
|
+
@step_number = 0
|
30
|
+
current_state = initial_state
|
31
|
+
|
32
|
+
# Initialize execution queue with START node
|
33
|
+
active_executions = [
|
34
|
+
ExecutionFrame.new(Graph::START, current_state, 0)
|
35
|
+
]
|
36
|
+
|
37
|
+
loop do
|
38
|
+
break if active_executions.empty?
|
39
|
+
|
40
|
+
# Execute current super-step (all nodes at current level in parallel)
|
41
|
+
step_results = execute_super_step(active_executions, context)
|
42
|
+
break if step_results.empty?
|
43
|
+
|
44
|
+
@step_number += 1
|
45
|
+
|
46
|
+
# Process results and determine next nodes
|
47
|
+
next_active = []
|
48
|
+
final_state = nil
|
49
|
+
|
50
|
+
step_results.each do |result|
|
51
|
+
case result[:type]
|
52
|
+
when :completed
|
53
|
+
# Node completed normally
|
54
|
+
if result[:next_destination]
|
55
|
+
# Command specified explicit destination
|
56
|
+
dest_name = result[:next_destination]
|
57
|
+
dest_state = result[:state]
|
58
|
+
|
59
|
+
if dest_name == Graph::FINISH
|
60
|
+
final_state = dest_state
|
61
|
+
else
|
62
|
+
next_active << ExecutionFrame.new(dest_name, dest_state, @step_number)
|
63
|
+
end
|
64
|
+
else
|
65
|
+
# Use normal edge routing
|
66
|
+
next_destinations = determine_next_destinations(
|
67
|
+
result[:node_name],
|
68
|
+
result[:state],
|
69
|
+
context
|
70
|
+
)
|
71
|
+
|
72
|
+
next_destinations.each do |dest_name, dest_state|
|
73
|
+
if dest_name == Graph::FINISH
|
74
|
+
final_state = dest_state
|
75
|
+
else
|
76
|
+
next_active << ExecutionFrame.new(dest_name, dest_state, @step_number)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
when :send
|
82
|
+
# Handle Send commands (map-reduce)
|
83
|
+
result[:sends].each do |send_cmd|
|
84
|
+
payload_state = result[:state].merge_delta(send_cmd.payload)
|
85
|
+
next_active << ExecutionFrame.new(send_cmd.to, payload_state, @step_number)
|
86
|
+
end
|
87
|
+
|
88
|
+
when :interrupt
|
89
|
+
# Handle human-in-the-loop interrupts
|
90
|
+
if @interrupt_handler
|
91
|
+
user_input = @interrupt_handler.call(result[:interrupt])
|
92
|
+
# Continue with user input merged into state
|
93
|
+
updated_state = result[:state].merge_delta(user_input || {})
|
94
|
+
next_active << ExecutionFrame.new(result[:node_name], updated_state, @step_number)
|
95
|
+
else
|
96
|
+
# No interrupt handler, treat as completion
|
97
|
+
final_state = result[:state]
|
98
|
+
end
|
99
|
+
|
100
|
+
when :error
|
101
|
+
raise result[:error]
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
# Save checkpoint
|
106
|
+
checkpoint_state = final_state || (next_active.first&.state) || current_state
|
107
|
+
save_checkpoint(checkpoint_state, @step_number)
|
108
|
+
|
109
|
+
# Yield intermediate result if block given
|
110
|
+
if block
|
111
|
+
yield({
|
112
|
+
step: @step_number,
|
113
|
+
state: checkpoint_state,
|
114
|
+
active_nodes: next_active.map(&:node_name),
|
115
|
+
completed: next_active.empty?
|
116
|
+
})
|
117
|
+
end
|
118
|
+
|
119
|
+
# Update for next iteration
|
120
|
+
current_state = checkpoint_state
|
121
|
+
active_executions = next_active
|
122
|
+
|
123
|
+
# Break if we reached END
|
124
|
+
break if final_state
|
125
|
+
end
|
126
|
+
|
127
|
+
{
|
128
|
+
state: current_state,
|
129
|
+
step_number: @step_number,
|
130
|
+
thread_id: @thread_id
|
131
|
+
}
|
132
|
+
end
|
133
|
+
|
134
|
+
# Resume from checkpoint
|
135
|
+
def resume(additional_input = {}, context: nil)
|
136
|
+
checkpoint = @store.load(@thread_id)
|
137
|
+
raise GraphError, "No checkpoint found for thread #{@thread_id}" unless checkpoint
|
138
|
+
|
139
|
+
@step_number = checkpoint[:step_number]
|
140
|
+
resumed_state = checkpoint[:state].merge_delta(additional_input)
|
141
|
+
|
142
|
+
# Resume execution from where we left off
|
143
|
+
stream(resumed_state, context: context)
|
144
|
+
end
|
145
|
+
|
146
|
+
# Set interrupt handler for human-in-the-loop
|
147
|
+
def on_interrupt(&handler)
|
148
|
+
@interrupt_handler = handler
|
149
|
+
end
|
150
|
+
|
151
|
+
private
|
152
|
+
|
153
|
+
# Execute all nodes in the current super-step in parallel
|
154
|
+
def execute_super_step(active_executions, context)
|
155
|
+
return [] if active_executions.empty?
|
156
|
+
|
157
|
+
# Group by node name to handle potential duplicates
|
158
|
+
grouped_executions = active_executions.group_by(&:node_name)
|
159
|
+
|
160
|
+
results = []
|
161
|
+
threads = []
|
162
|
+
|
163
|
+
grouped_executions.each do |node_name, executions|
|
164
|
+
node = @graph.nodes[node_name]
|
165
|
+
next unless node # Skip if node doesn't exist
|
166
|
+
|
167
|
+
# Execute each frame for this node
|
168
|
+
executions.each do |frame|
|
169
|
+
thread = Thread.new do
|
170
|
+
execute_node_safely(node, frame.state, context, frame.step)
|
171
|
+
end
|
172
|
+
threads << thread
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
# Wait for all threads to complete
|
177
|
+
threads.each do |thread|
|
178
|
+
result = thread.join.value
|
179
|
+
results << result if result
|
180
|
+
end
|
181
|
+
|
182
|
+
results
|
183
|
+
end
|
184
|
+
|
185
|
+
# Safely execute a single node
|
186
|
+
def execute_node_safely(node, state, context, step)
|
187
|
+
begin
|
188
|
+
result = node.call(state, context: context)
|
189
|
+
process_node_result(node.name, state, result, step)
|
190
|
+
rescue => error
|
191
|
+
{
|
192
|
+
type: :error,
|
193
|
+
node_name: node.name,
|
194
|
+
state: state,
|
195
|
+
step: step,
|
196
|
+
error: error
|
197
|
+
}
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
# Process the result from a node execution
|
202
|
+
def process_node_result(node_name, original_state, result, step)
|
203
|
+
case result
|
204
|
+
when Command
|
205
|
+
# Handle Command (update + goto)
|
206
|
+
updated_state = original_state.merge_delta(result.update)
|
207
|
+
|
208
|
+
if result.goto
|
209
|
+
determine_next_destinations(node_name, updated_state, nil, forced_destination: result.goto)
|
210
|
+
.map do |dest_name, dest_state|
|
211
|
+
{
|
212
|
+
type: :completed,
|
213
|
+
node_name: node_name,
|
214
|
+
state: dest_state,
|
215
|
+
step: step,
|
216
|
+
next_destination: dest_name
|
217
|
+
}
|
218
|
+
end.first
|
219
|
+
else
|
220
|
+
{
|
221
|
+
type: :completed,
|
222
|
+
node_name: node_name,
|
223
|
+
state: updated_state,
|
224
|
+
step: step
|
225
|
+
}
|
226
|
+
end
|
227
|
+
|
228
|
+
when Send
|
229
|
+
# Handle single Send
|
230
|
+
{
|
231
|
+
type: :send,
|
232
|
+
node_name: node_name,
|
233
|
+
state: original_state,
|
234
|
+
step: step,
|
235
|
+
sends: [result]
|
236
|
+
}
|
237
|
+
|
238
|
+
when MultiSend
|
239
|
+
# Handle multiple Sends
|
240
|
+
{
|
241
|
+
type: :send,
|
242
|
+
node_name: node_name,
|
243
|
+
state: original_state,
|
244
|
+
step: step,
|
245
|
+
sends: result.sends
|
246
|
+
}
|
247
|
+
|
248
|
+
when Interrupt
|
249
|
+
# Handle interrupt for human-in-the-loop
|
250
|
+
{
|
251
|
+
type: :interrupt,
|
252
|
+
node_name: node_name,
|
253
|
+
state: original_state,
|
254
|
+
step: step,
|
255
|
+
interrupt: result
|
256
|
+
}
|
257
|
+
|
258
|
+
when Hash
|
259
|
+
# Handle simple state delta
|
260
|
+
updated_state = original_state.merge_delta(result)
|
261
|
+
{
|
262
|
+
type: :completed,
|
263
|
+
node_name: node_name,
|
264
|
+
state: updated_state,
|
265
|
+
step: step
|
266
|
+
}
|
267
|
+
|
268
|
+
else
|
269
|
+
# Handle other return values
|
270
|
+
{
|
271
|
+
type: :completed,
|
272
|
+
node_name: node_name,
|
273
|
+
state: original_state,
|
274
|
+
step: step
|
275
|
+
}
|
276
|
+
end
|
277
|
+
end
|
278
|
+
|
279
|
+
# Determine next destinations based on edges
|
280
|
+
def determine_next_destinations(from_node, state, context, forced_destination: nil)
|
281
|
+
if forced_destination
|
282
|
+
return [[forced_destination, state]]
|
283
|
+
end
|
284
|
+
|
285
|
+
edges = @graph.get_edges_from(from_node)
|
286
|
+
destinations = []
|
287
|
+
|
288
|
+
edges.each do |edge|
|
289
|
+
case edge
|
290
|
+
when Edge
|
291
|
+
destinations << [edge.to, state]
|
292
|
+
|
293
|
+
when ConditionalEdge
|
294
|
+
routes = edge.route(state, context: context)
|
295
|
+
routes.each do |dest|
|
296
|
+
destinations << [dest, state]
|
297
|
+
end
|
298
|
+
|
299
|
+
when FanOutEdge
|
300
|
+
routes = edge.route(state, context: context)
|
301
|
+
routes.each do |dest|
|
302
|
+
destinations << [dest, state]
|
303
|
+
end
|
304
|
+
end
|
305
|
+
end
|
306
|
+
|
307
|
+
# Default to FINISH if no edges defined
|
308
|
+
destinations.empty? ? [[Graph::FINISH, state]] : destinations
|
309
|
+
end
|
310
|
+
|
311
|
+
# Save execution checkpoint
|
312
|
+
def save_checkpoint(state, step_number)
|
313
|
+
@store.save(@thread_id, state, step_number, {
|
314
|
+
timestamp: Time.now,
|
315
|
+
graph_class: @graph.class.name
|
316
|
+
})
|
317
|
+
end
|
318
|
+
|
319
|
+
# Execution frame for tracking active node executions
|
320
|
+
class ExecutionFrame
|
321
|
+
attr_reader :node_name, :state, :step
|
322
|
+
|
323
|
+
def initialize(node_name, state, step)
|
324
|
+
@node_name = node_name.to_sym
|
325
|
+
@state = state
|
326
|
+
@step = step
|
327
|
+
end
|
328
|
+
|
329
|
+
def to_s
|
330
|
+
"#<ExecutionFrame node: #{@node_name}, step: #{@step}>"
|
331
|
+
end
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
# Thread-safe execution result collector
|
336
|
+
class ResultCollector
|
337
|
+
def initialize
|
338
|
+
@results = []
|
339
|
+
@mutex = Mutex.new
|
340
|
+
end
|
341
|
+
|
342
|
+
def add(result)
|
343
|
+
@mutex.synchronize do
|
344
|
+
@results << result
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
def all
|
349
|
+
@mutex.synchronize do
|
350
|
+
@results.dup
|
351
|
+
end
|
352
|
+
end
|
353
|
+
|
354
|
+
def clear
|
355
|
+
@mutex.synchronize do
|
356
|
+
@results.clear
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|
360
|
+
end
|
@@ -0,0 +1,70 @@
|
|
1
|
+
module LangGraphRB
|
2
|
+
class State < Hash
|
3
|
+
attr_reader :reducers
|
4
|
+
|
5
|
+
def initialize(schema = {}, reducers = {})
|
6
|
+
@reducers = reducers || {}
|
7
|
+
super()
|
8
|
+
merge!(schema) if schema.is_a?(Hash)
|
9
|
+
end
|
10
|
+
|
11
|
+
# Merge a delta (partial state update) using reducers
|
12
|
+
def merge_delta(delta)
|
13
|
+
return self if delta.nil? || delta.empty?
|
14
|
+
|
15
|
+
new_state = self.class.new({}, @reducers)
|
16
|
+
new_state.merge!(self)
|
17
|
+
|
18
|
+
delta.each do |key, value|
|
19
|
+
key = key.to_sym
|
20
|
+
|
21
|
+
if @reducers[key]
|
22
|
+
# Use the reducer function to combine old and new values
|
23
|
+
new_state[key] = @reducers[key].call(self[key], value)
|
24
|
+
else
|
25
|
+
# Simple replacement
|
26
|
+
new_state[key] = value
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
new_state
|
31
|
+
end
|
32
|
+
|
33
|
+
# Create a new state with additional reducers
|
34
|
+
def with_reducers(new_reducers)
|
35
|
+
self.class.new(self, @reducers.merge(new_reducers))
|
36
|
+
end
|
37
|
+
|
38
|
+
# Common reducer for adding messages to an array
|
39
|
+
def self.add_messages
|
40
|
+
->(old_value, new_value) do
|
41
|
+
old_array = old_value || []
|
42
|
+
new_array = new_value.is_a?(Array) ? new_value : [new_value]
|
43
|
+
old_array + new_array
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
# Common reducer for appending strings
|
48
|
+
def self.append_string
|
49
|
+
->(old_value, new_value) do
|
50
|
+
(old_value || "") + new_value.to_s
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Common reducer for merging hashes
|
55
|
+
def self.merge_hash
|
56
|
+
->(old_value, new_value) do
|
57
|
+
old_hash = old_value || {}
|
58
|
+
old_hash.merge(new_value || {})
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def to_h
|
63
|
+
Hash[self]
|
64
|
+
end
|
65
|
+
|
66
|
+
def inspect
|
67
|
+
"#<#{self.class.name} #{super}>"
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
@@ -0,0 +1,265 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'yaml'
|
3
|
+
require 'fileutils'
|
4
|
+
|
5
|
+
module LangGraphRB
|
6
|
+
module Stores
|
7
|
+
# Abstract base class for memory stores
|
8
|
+
class BaseStore
|
9
|
+
def save(thread_id, state, step_number, metadata = {})
|
10
|
+
raise NotImplementedError, "Subclasses must implement #save"
|
11
|
+
end
|
12
|
+
|
13
|
+
def load(thread_id, step_number = nil)
|
14
|
+
raise NotImplementedError, "Subclasses must implement #load"
|
15
|
+
end
|
16
|
+
|
17
|
+
def list_threads
|
18
|
+
raise NotImplementedError, "Subclasses must implement #list_threads"
|
19
|
+
end
|
20
|
+
|
21
|
+
def delete(thread_id)
|
22
|
+
raise NotImplementedError, "Subclasses must implement #delete"
|
23
|
+
end
|
24
|
+
|
25
|
+
def list_steps(thread_id)
|
26
|
+
raise NotImplementedError, "Subclasses must implement #list_steps"
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# In-memory store (not persistent across process restarts)
|
31
|
+
class InMemoryStore < BaseStore
|
32
|
+
def initialize
|
33
|
+
@data = {}
|
34
|
+
end
|
35
|
+
|
36
|
+
def save(thread_id, state, step_number, metadata = {})
|
37
|
+
@data[thread_id] ||= {}
|
38
|
+
@data[thread_id][step_number] = {
|
39
|
+
state: deep_copy(state),
|
40
|
+
timestamp: Time.now,
|
41
|
+
metadata: metadata
|
42
|
+
}
|
43
|
+
end
|
44
|
+
|
45
|
+
def load(thread_id, step_number = nil)
|
46
|
+
thread_data = @data[thread_id]
|
47
|
+
return nil unless thread_data
|
48
|
+
|
49
|
+
if step_number
|
50
|
+
checkpoint = thread_data[step_number]
|
51
|
+
return nil unless checkpoint
|
52
|
+
|
53
|
+
{
|
54
|
+
state: deep_copy(checkpoint[:state]),
|
55
|
+
step_number: step_number,
|
56
|
+
timestamp: checkpoint[:timestamp],
|
57
|
+
metadata: checkpoint[:metadata]
|
58
|
+
}
|
59
|
+
else
|
60
|
+
# Return latest checkpoint
|
61
|
+
latest_step = thread_data.keys.max
|
62
|
+
return nil unless latest_step
|
63
|
+
|
64
|
+
checkpoint = thread_data[latest_step]
|
65
|
+
{
|
66
|
+
state: deep_copy(checkpoint[:state]),
|
67
|
+
step_number: latest_step,
|
68
|
+
timestamp: checkpoint[:timestamp],
|
69
|
+
metadata: checkpoint[:metadata]
|
70
|
+
}
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def list_threads
|
75
|
+
@data.keys
|
76
|
+
end
|
77
|
+
|
78
|
+
def delete(thread_id)
|
79
|
+
@data.delete(thread_id)
|
80
|
+
end
|
81
|
+
|
82
|
+
def list_steps(thread_id)
|
83
|
+
thread_data = @data[thread_id]
|
84
|
+
return [] unless thread_data
|
85
|
+
|
86
|
+
thread_data.keys.sort
|
87
|
+
end
|
88
|
+
|
89
|
+
def clear
|
90
|
+
@data.clear
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
def deep_copy(obj)
|
96
|
+
case obj
|
97
|
+
when Hash
|
98
|
+
obj.transform_values { |v| deep_copy(v) }
|
99
|
+
when Array
|
100
|
+
obj.map { |v| deep_copy(v) }
|
101
|
+
else
|
102
|
+
obj.dup rescue obj
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# File-based store using YAML
|
108
|
+
class FileStore < BaseStore
|
109
|
+
def initialize(base_path)
|
110
|
+
@base_path = base_path
|
111
|
+
FileUtils.mkdir_p(@base_path) unless Dir.exist?(@base_path)
|
112
|
+
end
|
113
|
+
|
114
|
+
def save(thread_id, state, step_number, metadata = {})
|
115
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
116
|
+
FileUtils.mkdir_p(thread_dir) unless Dir.exist?(thread_dir)
|
117
|
+
|
118
|
+
checkpoint_file = File.join(thread_dir, "#{step_number}.yml")
|
119
|
+
|
120
|
+
data = {
|
121
|
+
state: state.to_h,
|
122
|
+
timestamp: Time.now,
|
123
|
+
metadata: metadata
|
124
|
+
}
|
125
|
+
|
126
|
+
File.write(checkpoint_file, YAML.dump(data))
|
127
|
+
end
|
128
|
+
|
129
|
+
def load(thread_id, step_number = nil)
|
130
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
131
|
+
return nil unless Dir.exist?(thread_dir)
|
132
|
+
|
133
|
+
if step_number
|
134
|
+
checkpoint_file = File.join(thread_dir, "#{step_number}.yml")
|
135
|
+
return nil unless File.exist?(checkpoint_file)
|
136
|
+
|
137
|
+
data = YAML.load_file(checkpoint_file)
|
138
|
+
{
|
139
|
+
state: State.new(data['state']),
|
140
|
+
step_number: step_number,
|
141
|
+
timestamp: data['timestamp'],
|
142
|
+
metadata: data['metadata'] || {}
|
143
|
+
}
|
144
|
+
else
|
145
|
+
# Find latest checkpoint
|
146
|
+
files = Dir.glob(File.join(thread_dir, "*.yml"))
|
147
|
+
return nil if files.empty?
|
148
|
+
|
149
|
+
latest_file = files.max_by do |file|
|
150
|
+
File.basename(file, '.yml').to_i
|
151
|
+
end
|
152
|
+
|
153
|
+
step_num = File.basename(latest_file, '.yml').to_i
|
154
|
+
data = YAML.load_file(latest_file)
|
155
|
+
|
156
|
+
{
|
157
|
+
state: State.new(data['state']),
|
158
|
+
step_number: step_num,
|
159
|
+
timestamp: data['timestamp'],
|
160
|
+
metadata: data['metadata'] || {}
|
161
|
+
}
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def list_threads
|
166
|
+
Dir.entries(@base_path).select do |entry|
|
167
|
+
File.directory?(File.join(@base_path, entry)) && entry != '.' && entry != '..'
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
def delete(thread_id)
|
172
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
173
|
+
FileUtils.rm_rf(thread_dir) if Dir.exist?(thread_dir)
|
174
|
+
end
|
175
|
+
|
176
|
+
def list_steps(thread_id)
|
177
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
178
|
+
return [] unless Dir.exist?(thread_dir)
|
179
|
+
|
180
|
+
Dir.glob(File.join(thread_dir, "*.yml")).map do |file|
|
181
|
+
File.basename(file, '.yml').to_i
|
182
|
+
end.sort
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
# JSON-based store
|
187
|
+
class JsonStore < BaseStore
|
188
|
+
def initialize(base_path)
|
189
|
+
@base_path = base_path
|
190
|
+
FileUtils.mkdir_p(@base_path) unless Dir.exist?(@base_path)
|
191
|
+
end
|
192
|
+
|
193
|
+
def save(thread_id, state, step_number, metadata = {})
|
194
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
195
|
+
FileUtils.mkdir_p(thread_dir) unless Dir.exist?(thread_dir)
|
196
|
+
|
197
|
+
checkpoint_file = File.join(thread_dir, "#{step_number}.json")
|
198
|
+
|
199
|
+
data = {
|
200
|
+
state: state.to_h,
|
201
|
+
timestamp: Time.now.iso8601,
|
202
|
+
metadata: metadata
|
203
|
+
}
|
204
|
+
|
205
|
+
File.write(checkpoint_file, JSON.pretty_generate(data))
|
206
|
+
end
|
207
|
+
|
208
|
+
def load(thread_id, step_number = nil)
|
209
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
210
|
+
return nil unless Dir.exist?(thread_dir)
|
211
|
+
|
212
|
+
if step_number
|
213
|
+
checkpoint_file = File.join(thread_dir, "#{step_number}.json")
|
214
|
+
return nil unless File.exist?(checkpoint_file)
|
215
|
+
|
216
|
+
data = JSON.parse(File.read(checkpoint_file))
|
217
|
+
{
|
218
|
+
state: State.new(data['state']),
|
219
|
+
step_number: step_number,
|
220
|
+
timestamp: Time.parse(data['timestamp']),
|
221
|
+
metadata: data['metadata'] || {}
|
222
|
+
}
|
223
|
+
else
|
224
|
+
# Find latest checkpoint
|
225
|
+
files = Dir.glob(File.join(thread_dir, "*.json"))
|
226
|
+
return nil if files.empty?
|
227
|
+
|
228
|
+
latest_file = files.max_by do |file|
|
229
|
+
File.basename(file, '.json').to_i
|
230
|
+
end
|
231
|
+
|
232
|
+
step_num = File.basename(latest_file, '.json').to_i
|
233
|
+
data = JSON.parse(File.read(latest_file))
|
234
|
+
|
235
|
+
{
|
236
|
+
state: State.new(data['state']),
|
237
|
+
step_number: step_num,
|
238
|
+
timestamp: Time.parse(data['timestamp']),
|
239
|
+
metadata: data['metadata'] || {}
|
240
|
+
}
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
def list_threads
|
245
|
+
Dir.entries(@base_path).select do |entry|
|
246
|
+
File.directory?(File.join(@base_path, entry)) && entry != '.' && entry != '..'
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
def delete(thread_id)
|
251
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
252
|
+
FileUtils.rm_rf(thread_dir) if Dir.exist?(thread_dir)
|
253
|
+
end
|
254
|
+
|
255
|
+
def list_steps(thread_id)
|
256
|
+
thread_dir = File.join(@base_path, thread_id.to_s)
|
257
|
+
return [] unless Dir.exist?(thread_dir)
|
258
|
+
|
259
|
+
Dir.glob(File.join(thread_dir, "*.json")).map do |file|
|
260
|
+
File.basename(file, '.json').to_i
|
261
|
+
end.sort
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
265
|
+
end
|
data/lib/langgraph_rb.rb
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
require_relative 'langgraph_rb/version'
|
2
|
+
require_relative 'langgraph_rb/state'
|
3
|
+
require_relative 'langgraph_rb/node'
|
4
|
+
require_relative 'langgraph_rb/edge'
|
5
|
+
require_relative 'langgraph_rb/command'
|
6
|
+
require_relative 'langgraph_rb/graph'
|
7
|
+
require_relative 'langgraph_rb/runner'
|
8
|
+
require_relative 'langgraph_rb/stores/memory'
|
9
|
+
|
10
|
+
module LangGraphRB
|
11
|
+
class Error < StandardError; end
|
12
|
+
class GraphError < Error; end
|
13
|
+
class NodeError < Error; end
|
14
|
+
class StateError < Error; end
|
15
|
+
end
|