desiru 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.env.example +34 -0
- data/.rubocop.yml +7 -4
- data/.ruby-version +1 -0
- data/CLAUDE.md +4 -0
- data/Gemfile +21 -2
- data/Gemfile.lock +87 -12
- data/README.md +295 -2
- data/Rakefile +1 -0
- data/db/migrations/001_create_initial_tables.rb +96 -0
- data/db/migrations/002_create_job_results.rb +39 -0
- data/desiru.db +0 -0
- data/desiru.gemspec +2 -5
- data/docs/background_processing_roadmap.md +87 -0
- data/docs/job_scheduling.md +167 -0
- data/dspy-analysis-swarm.yml +60 -0
- data/dspy-feature-analysis.md +121 -0
- data/examples/README.md +69 -0
- data/examples/api_with_persistence.rb +122 -0
- data/examples/assertions_example.rb +232 -0
- data/examples/async_processing.rb +2 -0
- data/examples/few_shot_learning.rb +1 -2
- data/examples/graphql_api.rb +4 -2
- data/examples/graphql_integration.rb +3 -3
- data/examples/graphql_optimization_summary.md +143 -0
- data/examples/graphql_performance_benchmark.rb +247 -0
- data/examples/persistence_example.rb +102 -0
- data/examples/react_agent.rb +203 -0
- data/examples/rest_api.rb +173 -0
- data/examples/rest_api_advanced.rb +333 -0
- data/examples/scheduled_job_example.rb +116 -0
- data/examples/simple_qa.rb +1 -2
- data/examples/sinatra_api.rb +109 -0
- data/examples/typed_signatures.rb +1 -2
- data/graphql_optimization_summary.md +53 -0
- data/lib/desiru/api/grape_integration.rb +284 -0
- data/lib/desiru/api/persistence_middleware.rb +148 -0
- data/lib/desiru/api/sinatra_integration.rb +217 -0
- data/lib/desiru/api.rb +42 -0
- data/lib/desiru/assertions.rb +74 -0
- data/lib/desiru/async_status.rb +65 -0
- data/lib/desiru/cache.rb +1 -1
- data/lib/desiru/configuration.rb +2 -1
- data/lib/desiru/errors.rb +160 -0
- data/lib/desiru/field.rb +17 -14
- data/lib/desiru/graphql/batch_loader.rb +85 -0
- data/lib/desiru/graphql/data_loader.rb +242 -75
- data/lib/desiru/graphql/enum_builder.rb +75 -0
- data/lib/desiru/graphql/executor.rb +37 -4
- data/lib/desiru/graphql/schema_generator.rb +62 -158
- data/lib/desiru/graphql/type_builder.rb +138 -0
- data/lib/desiru/graphql/type_cache_warmer.rb +91 -0
- data/lib/desiru/jobs/async_predict.rb +1 -1
- data/lib/desiru/jobs/base.rb +67 -0
- data/lib/desiru/jobs/batch_processor.rb +6 -6
- data/lib/desiru/jobs/retriable.rb +119 -0
- data/lib/desiru/jobs/retry_strategies.rb +169 -0
- data/lib/desiru/jobs/scheduler.rb +219 -0
- data/lib/desiru/jobs/webhook_notifier.rb +242 -0
- data/lib/desiru/models/anthropic.rb +164 -0
- data/lib/desiru/models/base.rb +37 -3
- data/lib/desiru/models/open_ai.rb +151 -0
- data/lib/desiru/models/open_router.rb +161 -0
- data/lib/desiru/module.rb +59 -9
- data/lib/desiru/modules/chain_of_thought.rb +3 -3
- data/lib/desiru/modules/majority.rb +51 -0
- data/lib/desiru/modules/multi_chain_comparison.rb +204 -0
- data/lib/desiru/modules/predict.rb +8 -1
- data/lib/desiru/modules/program_of_thought.rb +139 -0
- data/lib/desiru/modules/react.rb +273 -0
- data/lib/desiru/modules/retrieve.rb +4 -2
- data/lib/desiru/optimizers/base.rb +2 -4
- data/lib/desiru/optimizers/bootstrap_few_shot.rb +2 -2
- data/lib/desiru/optimizers/copro.rb +268 -0
- data/lib/desiru/optimizers/knn_few_shot.rb +185 -0
- data/lib/desiru/persistence/database.rb +71 -0
- data/lib/desiru/persistence/models/api_request.rb +38 -0
- data/lib/desiru/persistence/models/job_result.rb +138 -0
- data/lib/desiru/persistence/models/module_execution.rb +37 -0
- data/lib/desiru/persistence/models/optimization_result.rb +28 -0
- data/lib/desiru/persistence/models/training_example.rb +25 -0
- data/lib/desiru/persistence/models.rb +11 -0
- data/lib/desiru/persistence/repositories/api_request_repository.rb +98 -0
- data/lib/desiru/persistence/repositories/base_repository.rb +77 -0
- data/lib/desiru/persistence/repositories/job_result_repository.rb +116 -0
- data/lib/desiru/persistence/repositories/module_execution_repository.rb +85 -0
- data/lib/desiru/persistence/repositories/optimization_result_repository.rb +67 -0
- data/lib/desiru/persistence/repositories/training_example_repository.rb +102 -0
- data/lib/desiru/persistence/repository.rb +29 -0
- data/lib/desiru/persistence/setup.rb +77 -0
- data/lib/desiru/persistence.rb +49 -0
- data/lib/desiru/registry.rb +3 -5
- data/lib/desiru/signature.rb +91 -24
- data/lib/desiru/version.rb +1 -1
- data/lib/desiru.rb +23 -8
- data/missing-features-analysis.md +192 -0
- metadata +63 -45
- data/lib/desiru/models/raix_adapter.rb +0 -210
@@ -0,0 +1,247 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require 'bundler/setup'
|
5
|
+
require 'desiru'
|
6
|
+
require 'desiru/graphql/schema_generator'
|
7
|
+
require 'desiru/graphql/executor'
|
8
|
+
require 'desiru/graphql/data_loader'
|
9
|
+
require 'benchmark'
|
10
|
+
|
11
|
+
# Example: GraphQL Performance Benchmark with Request Deduplication
|
12
|
+
# This demonstrates the performance improvement from request deduplication
|
13
|
+
|
14
|
+
# Mock model for benchmarking
|
15
|
+
class MockModel < Desiru::Models::Base
|
16
|
+
def initialize(config = {})
|
17
|
+
super
|
18
|
+
end
|
19
|
+
|
20
|
+
def call(_prompt, **_options)
|
21
|
+
# Return a simple response without actually calling an LLM
|
22
|
+
{ content: "Mock response", usage: { total_tokens: 0 } }
|
23
|
+
end
|
24
|
+
|
25
|
+
def validate_config!
|
26
|
+
# No validation needed for mock
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
# Configure Desiru with mock model
|
31
|
+
Desiru.configure do |config|
|
32
|
+
config.default_model = MockModel.new
|
33
|
+
end
|
34
|
+
|
35
|
+
# Create a module that tracks call counts
|
36
|
+
class BenchmarkModule < Desiru::Modules::Predict
|
37
|
+
@call_count = 0
|
38
|
+
@batch_count = 0
|
39
|
+
|
40
|
+
class << self
|
41
|
+
attr_reader :call_count, :batch_count
|
42
|
+
|
43
|
+
def reset_counts!
|
44
|
+
@call_count = 0
|
45
|
+
@batch_count = 0
|
46
|
+
end
|
47
|
+
|
48
|
+
def increment_call_count
|
49
|
+
@call_count += 1
|
50
|
+
end
|
51
|
+
|
52
|
+
def increment_batch_count
|
53
|
+
@batch_count += 1
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def call(inputs)
|
58
|
+
self.class.increment_call_count
|
59
|
+
# Simulate some processing time
|
60
|
+
sleep(0.001)
|
61
|
+
{ result: "Processed: #{inputs[:id]}", timestamp: Time.now.to_f }
|
62
|
+
end
|
63
|
+
|
64
|
+
def batch_forward(inputs_array)
|
65
|
+
self.class.increment_batch_count
|
66
|
+
# Simulate batch processing time (more efficient than individual calls)
|
67
|
+
sleep(0.001 * Math.log(inputs_array.size + 1))
|
68
|
+
inputs_array.map do |inputs|
|
69
|
+
{ result: "Batch processed: #{inputs[:id]}", timestamp: Time.now.to_f }
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Create schema generator and register the module
|
75
|
+
generator = Desiru::GraphQL::SchemaGenerator.new
|
76
|
+
signature = Desiru::Signature.new('id: string -> result: string, timestamp: float')
|
77
|
+
module_instance = BenchmarkModule.new(signature)
|
78
|
+
|
79
|
+
generator.register_signature('fetchData', signature)
|
80
|
+
generator.register_module('fetchData', module_instance)
|
81
|
+
|
82
|
+
# Generate schema
|
83
|
+
schema = generator.generate_schema
|
84
|
+
|
85
|
+
# Create executors with and without DataLoader
|
86
|
+
data_loader = Desiru::GraphQL::DataLoader.new
|
87
|
+
executor_with_loader = Desiru::GraphQL::Executor.new(schema, data_loader: data_loader)
|
88
|
+
executor_without_loader = Desiru::GraphQL::Executor.new(schema)
|
89
|
+
|
90
|
+
puts "=== GraphQL Performance Benchmark with Request Deduplication ==="
|
91
|
+
puts
|
92
|
+
|
93
|
+
# Test 1: Query with duplicate fields (common in GraphQL)
|
94
|
+
duplicate_query = <<~GRAPHQL
|
95
|
+
{
|
96
|
+
user1: fetchData(id: "123") { result timestamp }
|
97
|
+
user2: fetchData(id: "456") { result timestamp }
|
98
|
+
user3: fetchData(id: "123") { result timestamp }
|
99
|
+
user4: fetchData(id: "789") { result timestamp }
|
100
|
+
user5: fetchData(id: "456") { result timestamp }
|
101
|
+
user6: fetchData(id: "123") { result timestamp }
|
102
|
+
}
|
103
|
+
GRAPHQL
|
104
|
+
|
105
|
+
puts "Test 1: Query with duplicate requests (3x id:123, 2x id:456, 1x id:789)"
|
106
|
+
puts
|
107
|
+
|
108
|
+
# Without deduplication
|
109
|
+
BenchmarkModule.reset_counts!
|
110
|
+
time_without = Benchmark.realtime do
|
111
|
+
executor_without_loader.execute(duplicate_query)
|
112
|
+
end
|
113
|
+
without_calls = BenchmarkModule.call_count
|
114
|
+
without_batches = BenchmarkModule.batch_count
|
115
|
+
|
116
|
+
# With deduplication
|
117
|
+
BenchmarkModule.reset_counts!
|
118
|
+
time_with = Benchmark.realtime do
|
119
|
+
executor_with_loader.execute(duplicate_query)
|
120
|
+
end
|
121
|
+
with_calls = BenchmarkModule.call_count
|
122
|
+
with_batches = BenchmarkModule.batch_count
|
123
|
+
|
124
|
+
puts "Without deduplication:"
|
125
|
+
puts " Time: #{(time_without * 1000).round(2)}ms"
|
126
|
+
puts " Individual calls: #{without_calls}"
|
127
|
+
puts " Batch calls: #{without_batches}"
|
128
|
+
puts
|
129
|
+
|
130
|
+
puts "With deduplication:"
|
131
|
+
puts " Time: #{(time_with * 1000).round(2)}ms"
|
132
|
+
puts " Individual calls: #{with_calls}"
|
133
|
+
puts " Batch calls: #{with_batches}"
|
134
|
+
puts " Unique requests processed: 3 (deduplication working!)"
|
135
|
+
puts
|
136
|
+
|
137
|
+
improvement = ((time_without - time_with) / time_without * 100).round(1)
|
138
|
+
puts "Performance improvement: #{improvement}%"
|
139
|
+
puts
|
140
|
+
|
141
|
+
# Test 2: Nested query simulation (common with relationships)
|
142
|
+
puts "\nTest 2: Simulating nested queries (N+1 problem)"
|
143
|
+
puts
|
144
|
+
|
145
|
+
nested_query = <<~GRAPHQL
|
146
|
+
{
|
147
|
+
posts1: fetchData(id: "post1") { result }
|
148
|
+
posts2: fetchData(id: "post2") { result }
|
149
|
+
posts3: fetchData(id: "post3") { result }
|
150
|
+
author1: fetchData(id: "author1") { result }
|
151
|
+
author2: fetchData(id: "author2") { result }
|
152
|
+
author3: fetchData(id: "author1") { result }
|
153
|
+
author4: fetchData(id: "author2") { result }
|
154
|
+
author5: fetchData(id: "author1") { result }
|
155
|
+
comments1: fetchData(id: "comment1") { result }
|
156
|
+
comments2: fetchData(id: "comment2") { result }
|
157
|
+
comments3: fetchData(id: "comment1") { result }
|
158
|
+
}
|
159
|
+
GRAPHQL
|
160
|
+
|
161
|
+
# Without deduplication
|
162
|
+
BenchmarkModule.reset_counts!
|
163
|
+
time_without = Benchmark.realtime do
|
164
|
+
executor_without_loader.execute(nested_query)
|
165
|
+
end
|
166
|
+
without_calls = BenchmarkModule.call_count
|
167
|
+
without_batches = BenchmarkModule.batch_count
|
168
|
+
|
169
|
+
# With deduplication
|
170
|
+
BenchmarkModule.reset_counts!
|
171
|
+
time_with = Benchmark.realtime do
|
172
|
+
executor_with_loader.execute(nested_query)
|
173
|
+
end
|
174
|
+
with_calls = BenchmarkModule.call_count
|
175
|
+
with_batches = BenchmarkModule.batch_count
|
176
|
+
|
177
|
+
puts "Without deduplication:"
|
178
|
+
puts " Time: #{(time_without * 1000).round(2)}ms"
|
179
|
+
puts " Total module calls: #{without_calls + without_batches}"
|
180
|
+
puts
|
181
|
+
|
182
|
+
puts "With deduplication + batching:"
|
183
|
+
puts " Time: #{(time_with * 1000).round(2)}ms"
|
184
|
+
puts " Total module calls: #{with_calls + with_batches}"
|
185
|
+
puts " Unique requests: 7 (3 posts, 2 authors, 2 comments)"
|
186
|
+
puts
|
187
|
+
|
188
|
+
improvement = ((time_without - time_with) / time_without * 100).round(1)
|
189
|
+
puts "Performance improvement: #{improvement}%"
|
190
|
+
puts
|
191
|
+
|
192
|
+
# Test 3: Large batch with many duplicates
|
193
|
+
puts "\nTest 3: Large batch with high duplication rate"
|
194
|
+
puts
|
195
|
+
|
196
|
+
# Generate a query with many duplicates
|
197
|
+
field_count = 50
|
198
|
+
unique_ids = 10
|
199
|
+
fields = []
|
200
|
+
field_count.times do |i|
|
201
|
+
id = rand(unique_ids)
|
202
|
+
fields << "field#{i}: fetchData(id: \"id_#{id}\") { result }"
|
203
|
+
end
|
204
|
+
large_query = "{ #{fields.join(' ')} }"
|
205
|
+
|
206
|
+
# Without deduplication
|
207
|
+
BenchmarkModule.reset_counts!
|
208
|
+
time_without = Benchmark.realtime do
|
209
|
+
executor_without_loader.execute(large_query)
|
210
|
+
end
|
211
|
+
without_calls = BenchmarkModule.call_count
|
212
|
+
without_batches = BenchmarkModule.batch_count
|
213
|
+
|
214
|
+
# With deduplication
|
215
|
+
BenchmarkModule.reset_counts!
|
216
|
+
time_with = Benchmark.realtime do
|
217
|
+
executor_with_loader.execute(large_query)
|
218
|
+
end
|
219
|
+
BenchmarkModule.call_count
|
220
|
+
with_batches = BenchmarkModule.batch_count
|
221
|
+
|
222
|
+
puts "Query with #{field_count} fields, ~#{unique_ids} unique IDs"
|
223
|
+
puts
|
224
|
+
|
225
|
+
puts "Without deduplication:"
|
226
|
+
puts " Time: #{(time_without * 1000).round(2)}ms"
|
227
|
+
puts " Total requests processed: #{without_calls + without_batches}"
|
228
|
+
puts
|
229
|
+
|
230
|
+
puts "With deduplication + batching:"
|
231
|
+
puts " Time: #{(time_with * 1000).round(2)}ms"
|
232
|
+
puts " Unique requests processed: #{unique_ids}"
|
233
|
+
puts " Batch calls: #{with_batches}"
|
234
|
+
puts
|
235
|
+
|
236
|
+
improvement = ((time_without - time_with) / time_without * 100).round(1)
|
237
|
+
puts "Performance improvement: #{improvement}%"
|
238
|
+
puts
|
239
|
+
puts "Deduplication ratio: #{(field_count.to_f / unique_ids).round(1)}:1"
|
240
|
+
|
241
|
+
puts "\n=== Summary ==="
|
242
|
+
puts "Request deduplication in GraphQL DataLoader prevents duplicate operations,"
|
243
|
+
puts "significantly improving performance when the same data is requested multiple"
|
244
|
+
puts "times within a single query. This is especially beneficial for:"
|
245
|
+
puts "- Complex queries with repeated fields"
|
246
|
+
puts "- Nested relationships that cause N+1 problems"
|
247
|
+
puts "- High-traffic APIs where efficiency matters"
|
@@ -0,0 +1,102 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require 'bundler/setup'
|
5
|
+
require 'desiru'
|
6
|
+
require 'desiru/persistence'
|
7
|
+
|
8
|
+
# Configure persistence
|
9
|
+
Desiru::Persistence.database_url = 'sqlite://desiru_example.db'
|
10
|
+
|
11
|
+
# Connect and migrate
|
12
|
+
puts "Setting up database..."
|
13
|
+
Desiru::Persistence.connect!
|
14
|
+
Desiru::Persistence.migrate!
|
15
|
+
|
16
|
+
# Access repositories
|
17
|
+
module_executions = Desiru::Persistence[:module_executions]
|
18
|
+
api_requests = Desiru::Persistence[:api_requests]
|
19
|
+
optimization_results = Desiru::Persistence[:optimization_results]
|
20
|
+
training_examples = Desiru::Persistence[:training_examples]
|
21
|
+
|
22
|
+
# Example 1: Track module executions
|
23
|
+
puts "\n1. Tracking module executions:"
|
24
|
+
execution = module_executions.create_for_module('TextSummarizer', { text: 'Long article...' })
|
25
|
+
puts "Created execution: #{execution.id}"
|
26
|
+
|
27
|
+
# Simulate processing
|
28
|
+
sleep 0.5
|
29
|
+
result = { summary: 'Article summary', word_count: 50 }
|
30
|
+
module_executions.complete(execution.id, result, { model: 'gpt-3.5-turbo' })
|
31
|
+
puts "Completed execution with result"
|
32
|
+
|
33
|
+
# Example 2: Store API requests
|
34
|
+
puts "\n2. Storing API requests:"
|
35
|
+
api_request = api_requests.create(
|
36
|
+
method: 'POST',
|
37
|
+
path: '/api/v1/summarize',
|
38
|
+
remote_ip: '127.0.0.1',
|
39
|
+
headers: { 'Content-Type' => 'application/json' },
|
40
|
+
params: { text: 'Long article...' },
|
41
|
+
status_code: 200,
|
42
|
+
response_body: { summary: 'Article summary' },
|
43
|
+
response_time: 0.234
|
44
|
+
)
|
45
|
+
puts "Stored API request: #{api_request.path} (#{api_request.duration_ms}ms)"
|
46
|
+
|
47
|
+
# Example 3: Track optimization results
|
48
|
+
puts "\n3. Recording optimization results:"
|
49
|
+
opt_result = optimization_results.create_result(
|
50
|
+
module_name: 'TextSummarizer',
|
51
|
+
optimizer_type: 'BootstrapFewShot',
|
52
|
+
score: 0.89,
|
53
|
+
baseline_score: 0.75,
|
54
|
+
training_size: 50,
|
55
|
+
parameters: { temperature: 0.7, max_tokens: 150 },
|
56
|
+
metrics: { accuracy: 0.89, f1_score: 0.87 }
|
57
|
+
)
|
58
|
+
puts "Optimization improved performance by #{opt_result.improvement_percentage}%"
|
59
|
+
|
60
|
+
# Example 4: Store training examples
|
61
|
+
puts "\n4. Managing training examples:"
|
62
|
+
examples = [
|
63
|
+
{ inputs: { text: 'Example 1' }, outputs: { summary: 'Summary 1' } },
|
64
|
+
{ inputs: { text: 'Example 2' }, outputs: { summary: 'Summary 2' } },
|
65
|
+
{ inputs: { text: 'Example 3' }, outputs: { summary: 'Summary 3' } }
|
66
|
+
]
|
67
|
+
|
68
|
+
training_examples.bulk_create('TextSummarizer', examples)
|
69
|
+
puts "Created #{examples.length} training examples"
|
70
|
+
|
71
|
+
# Example 5: Query data
|
72
|
+
puts "\n5. Querying stored data:"
|
73
|
+
puts "- Module execution success rate: #{module_executions.success_rate}%"
|
74
|
+
puts "- Recent API requests: #{api_requests.recent(5).map(&:path).join(', ')}"
|
75
|
+
puts "- Best optimization score: #{optimization_results.find_best_for_module('TextSummarizer')&.score}"
|
76
|
+
puts "- Training examples available: #{training_examples.count}"
|
77
|
+
|
78
|
+
# Example 6: Analytics
|
79
|
+
puts "\n6. Analytics:"
|
80
|
+
puts "- Average response time: #{api_requests.average_response_time}s"
|
81
|
+
puts "- Requests per minute: #{api_requests.requests_per_minute(60)}"
|
82
|
+
puts "- Top API paths:"
|
83
|
+
api_requests.top_paths(3).each do |path_info|
|
84
|
+
puts " #{path_info[:path]}: #{path_info[:count]} requests"
|
85
|
+
end
|
86
|
+
|
87
|
+
# Example 7: Dataset splitting
|
88
|
+
puts "\n7. Dataset management:"
|
89
|
+
splits = training_examples.split_dataset('TextSummarizer')
|
90
|
+
puts "- Training set: #{splits[:training].length} examples"
|
91
|
+
puts "- Validation set: #{splits[:validation].length} examples"
|
92
|
+
puts "- Test set: #{splits[:test].length} examples"
|
93
|
+
|
94
|
+
# Export for training
|
95
|
+
puts "\n8. Export training data:"
|
96
|
+
export_data = training_examples.export_for_training('TextSummarizer', format: :dspy)
|
97
|
+
puts "Exported #{export_data.length} examples in DSPy format"
|
98
|
+
|
99
|
+
# Cleanup
|
100
|
+
puts "\nCleaning up..."
|
101
|
+
Desiru::Persistence.disconnect!
|
102
|
+
puts "Done!"
|
@@ -0,0 +1,203 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require_relative '../lib/desiru'
|
5
|
+
require_relative '../lib/desiru/modules/react'
|
6
|
+
require 'json'
|
7
|
+
require 'net/http'
|
8
|
+
|
9
|
+
# Example of using ReAct module to build a tool-using agent
|
10
|
+
|
11
|
+
# Define some useful tools
|
12
|
+
class WeatherTool
|
13
|
+
def self.name
|
14
|
+
"get_weather"
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.description
|
18
|
+
"Get current weather for a city. Args: city (string)"
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.call(city:)
|
22
|
+
# In a real implementation, this would call a weather API
|
23
|
+
# For demo purposes, we'll return mock data
|
24
|
+
temps = {
|
25
|
+
"Tokyo" => 72,
|
26
|
+
"New York" => 68,
|
27
|
+
"London" => 59,
|
28
|
+
"Sydney" => 77
|
29
|
+
}
|
30
|
+
|
31
|
+
temp = temps[city] || rand(50..85)
|
32
|
+
conditions = ["sunny", "partly cloudy", "cloudy", "rainy"].sample
|
33
|
+
|
34
|
+
"Current weather in #{city}: #{conditions}, #{temp}°F"
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
class CalculatorTool
|
39
|
+
def self.name
|
40
|
+
"calculator"
|
41
|
+
end
|
42
|
+
|
43
|
+
def self.description
|
44
|
+
"Perform calculations. Args: expression (string) - a mathematical expression to evaluate"
|
45
|
+
end
|
46
|
+
|
47
|
+
def self.call(expression:)
|
48
|
+
# Safety note: In production, use a proper expression parser
|
49
|
+
# This is just for demonstration
|
50
|
+
|
51
|
+
# Only allow basic math operations
|
52
|
+
if expression =~ %r{^[\d\s\+\-\*/\(\)\.]+$}
|
53
|
+
result = eval(expression) # rubocop:disable Security/Eval
|
54
|
+
"Result: #{result}"
|
55
|
+
else
|
56
|
+
"Error: Invalid expression. Only numbers and basic operators allowed."
|
57
|
+
end
|
58
|
+
rescue StandardError => e
|
59
|
+
"Error: #{e.message}"
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
class TimeTool
|
64
|
+
def self.name
|
65
|
+
"get_time"
|
66
|
+
end
|
67
|
+
|
68
|
+
def self.description
|
69
|
+
"Get current time for a timezone. Args: timezone (string) - e.g., 'EST', 'PST', 'GMT'"
|
70
|
+
end
|
71
|
+
|
72
|
+
def self.call(timezone: "GMT")
|
73
|
+
# Simple timezone offset mapping
|
74
|
+
offsets = {
|
75
|
+
"GMT" => 0,
|
76
|
+
"EST" => -5,
|
77
|
+
"PST" => -8,
|
78
|
+
"JST" => 9,
|
79
|
+
"AEST" => 10
|
80
|
+
}
|
81
|
+
|
82
|
+
offset = offsets[timezone.upcase] || 0
|
83
|
+
time = Time.now.utc + (offset * 3600)
|
84
|
+
|
85
|
+
"Current time in #{timezone}: #{time.strftime('%Y-%m-%d %H:%M:%S')}"
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
# Configure Desiru
|
90
|
+
Desiru.configure do |config|
|
91
|
+
config.default_model = Desiru::Models::Anthropic.new(
|
92
|
+
model: ENV['LLM_MODEL'] || 'claude-3-haiku-20240307',
|
93
|
+
api_key: ENV['ANTHROPIC_API_KEY'] || raise('Please set ANTHROPIC_API_KEY environment variable')
|
94
|
+
)
|
95
|
+
end
|
96
|
+
|
97
|
+
# Create tools array
|
98
|
+
tools = [WeatherTool, CalculatorTool, TimeTool]
|
99
|
+
|
100
|
+
# Example 1: Weather Query
|
101
|
+
puts "=== Example 1: Weather Query ==="
|
102
|
+
weather_agent = Desiru::Modules::ReAct.new(
|
103
|
+
'question: string -> answer: string',
|
104
|
+
tools: tools,
|
105
|
+
max_iterations: 5
|
106
|
+
)
|
107
|
+
|
108
|
+
result = weather_agent.call(
|
109
|
+
question: "What's the weather like in Tokyo and New York? Also, what time is it in JST?"
|
110
|
+
)
|
111
|
+
puts "Question: What's the weather like in Tokyo and New York? Also, what time is it in JST?"
|
112
|
+
puts "Answer: #{result[:answer]}"
|
113
|
+
puts
|
114
|
+
|
115
|
+
# Example 2: Multi-step Calculation
|
116
|
+
puts "=== Example 2: Multi-step Calculation ==="
|
117
|
+
calc_agent = Desiru::Modules::ReAct.new(
|
118
|
+
'problem: string -> solution: string, result: float',
|
119
|
+
tools: tools,
|
120
|
+
max_iterations: 5
|
121
|
+
)
|
122
|
+
|
123
|
+
result = calc_agent.call(
|
124
|
+
problem: "If the temperature in Tokyo is 72°F, what is it in Celsius? (Use formula: C = (F - 32) * 5/9)"
|
125
|
+
)
|
126
|
+
puts "Problem: If the temperature in Tokyo is 72°F, what is it in Celsius?"
|
127
|
+
puts "Solution: #{result[:solution]}"
|
128
|
+
puts "Result: #{result[:result]}"
|
129
|
+
puts
|
130
|
+
|
131
|
+
# Example 3: Complex Query Requiring Multiple Tools
|
132
|
+
puts "=== Example 3: Complex Multi-tool Query ==="
|
133
|
+
complex_agent = Desiru::Modules::ReAct.new(
|
134
|
+
'query: string -> summary: string, data: list[string]',
|
135
|
+
tools: tools,
|
136
|
+
max_iterations: 8
|
137
|
+
)
|
138
|
+
|
139
|
+
result = complex_agent.call(
|
140
|
+
query: "I'm planning a trip. Get the weather for London and Sydney, " \
|
141
|
+
"calculate the time difference between GMT and AEST, and tell me what time it is in both cities."
|
142
|
+
)
|
143
|
+
puts "Query: Planning a trip - need weather and time info for London and Sydney"
|
144
|
+
puts "Summary: #{result[:summary]}"
|
145
|
+
puts "Data points:"
|
146
|
+
result[:data].each { |point| puts " - #{point}" }
|
147
|
+
puts
|
148
|
+
|
149
|
+
# Example 4: Tool with Error Handling
|
150
|
+
puts "=== Example 4: Error Handling ==="
|
151
|
+
error_agent = Desiru::Modules::ReAct.new(
|
152
|
+
'task: string -> result: string, status: string',
|
153
|
+
tools: tools,
|
154
|
+
max_iterations: 3
|
155
|
+
)
|
156
|
+
|
157
|
+
result = error_agent.call(
|
158
|
+
task: "Calculate the result of this expression: 10 / 0"
|
159
|
+
)
|
160
|
+
puts "Task: Calculate 10 / 0"
|
161
|
+
puts "Result: #{result[:result]}"
|
162
|
+
puts "Status: #{result[:status]}"
|
163
|
+
puts
|
164
|
+
|
165
|
+
# Example 5: Custom Tool Integration
|
166
|
+
puts "=== Example 5: Custom Tool Integration ==="
|
167
|
+
|
168
|
+
# Define a custom database lookup tool
|
169
|
+
database = {
|
170
|
+
"user123" => { name: "Alice", balance: 1500.50 },
|
171
|
+
"user456" => { name: "Bob", balance: 2300.75 }
|
172
|
+
}
|
173
|
+
|
174
|
+
lookup_tool = lambda do |user_id:|
|
175
|
+
if database.key?(user_id)
|
176
|
+
user = database[user_id]
|
177
|
+
"User #{user[:name]} has balance: $#{user[:balance]}"
|
178
|
+
else
|
179
|
+
"User not found"
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
# Create agent with custom tool
|
184
|
+
custom_agent = Desiru::Modules::ReAct.new(
|
185
|
+
'request: string -> response: string, amount: float',
|
186
|
+
tools: [
|
187
|
+
{ name: "lookup_user", function: lookup_tool },
|
188
|
+
CalculatorTool
|
189
|
+
],
|
190
|
+
max_iterations: 5
|
191
|
+
)
|
192
|
+
|
193
|
+
result = custom_agent.call(
|
194
|
+
request: "Look up user123 and calculate 10% of their balance"
|
195
|
+
)
|
196
|
+
puts "Request: Look up user123 and calculate 10% of their balance"
|
197
|
+
puts "Response: #{result[:response]}"
|
198
|
+
puts "Amount: #{result[:amount]}"
|
199
|
+
|
200
|
+
# Demonstrate trajectory truncation for long conversations
|
201
|
+
puts "\n=== Trajectory Management ==="
|
202
|
+
puts "The ReAct module automatically manages long trajectories to fit within context limits."
|
203
|
+
puts "This ensures the agent can handle extended conversations without exceeding token limits."
|