langsmith-sdk 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +4 -0
- data/.rubocop.yml +120 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +48 -0
- data/LICENSE +22 -0
- data/README.md +224 -0
- data/Rakefile +8 -0
- data/examples/LLM_TRACING.md +439 -0
- data/examples/complex_agent.rb +472 -0
- data/examples/llm_tracing.rb +304 -0
- data/examples/openai_integration.rb +751 -0
- data/langsmith.gemspec +38 -0
- data/lib/langsmith/batch_processor.rb +237 -0
- data/lib/langsmith/client.rb +181 -0
- data/lib/langsmith/configuration.rb +96 -0
- data/lib/langsmith/context.rb +73 -0
- data/lib/langsmith/errors.rb +13 -0
- data/lib/langsmith/railtie.rb +86 -0
- data/lib/langsmith/run.rb +320 -0
- data/lib/langsmith/run_tree.rb +154 -0
- data/lib/langsmith/traceable.rb +120 -0
- data/lib/langsmith/version.rb +5 -0
- data/lib/langsmith.rb +144 -0
- metadata +134 -0
|
@@ -0,0 +1,472 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Example: Complex AI Agent with Simulated Execution Times
|
|
4
|
+
#
|
|
5
|
+
# This example demonstrates a realistic multi-step AI agent workflow
|
|
6
|
+
# with proper timing simulation to generate meaningful traces.
|
|
7
|
+
#
|
|
8
|
+
# Run with: LANGSMITH_API_KEY=... ruby examples/complex_agent.rb
|
|
9
|
+
|
|
10
|
+
require_relative "../lib/langsmith"
|
|
11
|
+
require "securerandom"
|
|
12
|
+
|
|
13
|
+
# Configure Langsmith
|
|
14
|
+
Langsmith.configure do |config|
|
|
15
|
+
config.api_key = ENV.fetch("LANGSMITH_API_KEY", "your-api-key")
|
|
16
|
+
config.tracing_enabled = true
|
|
17
|
+
config.project = "complex-agent-demo"
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Simulated delay to make traces more realistic
|
|
21
|
+
def simulate_latency(min_ms, max_ms)
|
|
22
|
+
sleep(rand(min_ms..max_ms) / 1000.0)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# =============================================================================
|
|
26
|
+
# Simulated LLM and Tool Functions
|
|
27
|
+
# =============================================================================
|
|
28
|
+
|
|
29
|
+
def call_llm(messages, model: "gpt-4", temperature: 0.7, max_tokens: 1000)
|
|
30
|
+
Langsmith.trace("llm.chat", run_type: "llm", inputs: { messages:, model: }) do |run|
|
|
31
|
+
run.set_model(model:, provider: "openai")
|
|
32
|
+
run.add_metadata(temperature:, max_tokens:)
|
|
33
|
+
|
|
34
|
+
# Simulate API latency (200-800ms for LLM calls)
|
|
35
|
+
simulate_latency(200, 800)
|
|
36
|
+
|
|
37
|
+
# Simulate token counts based on message length
|
|
38
|
+
input_tokens = messages.sum { |m| (m[:content].to_s.length / 4.0).ceil }
|
|
39
|
+
output_tokens = rand(50..300)
|
|
40
|
+
|
|
41
|
+
run.set_token_usage(
|
|
42
|
+
input_tokens:,
|
|
43
|
+
output_tokens:,
|
|
44
|
+
total_tokens: input_tokens + output_tokens
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
run.add_metadata(
|
|
48
|
+
finish_reason: "stop",
|
|
49
|
+
response_id: "chatcmpl-#{SecureRandom.hex(12)}"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Return simulated response
|
|
53
|
+
{
|
|
54
|
+
content: generate_response_for(messages.last[:content]),
|
|
55
|
+
model: model,
|
|
56
|
+
tokens: input_tokens + output_tokens
|
|
57
|
+
}
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def embed_text(text, model: "text-embedding-3-small")
|
|
62
|
+
Langsmith.trace("llm.embed", run_type: "llm", inputs: { text: text[0..100], model: }) do |run|
|
|
63
|
+
run.set_model(model:, provider: "openai")
|
|
64
|
+
run.add_metadata(dimensions: 1536)
|
|
65
|
+
|
|
66
|
+
# Simulate embedding latency (50-150ms)
|
|
67
|
+
simulate_latency(50, 150)
|
|
68
|
+
|
|
69
|
+
tokens = (text.length / 4.0).ceil
|
|
70
|
+
# Embeddings only have input tokens
|
|
71
|
+
run.set_token_usage(input_tokens: tokens)
|
|
72
|
+
|
|
73
|
+
Array.new(1536) { rand(-1.0..1.0) }
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def search_vector_db(embedding, collection:, top_k: 5)
|
|
78
|
+
Langsmith.trace("vector_db.search", run_type: "retriever", inputs: { collection: collection, top_k: top_k }) do |run|
|
|
79
|
+
run.add_metadata(
|
|
80
|
+
database: "pinecone",
|
|
81
|
+
collection: collection,
|
|
82
|
+
top_k: top_k,
|
|
83
|
+
metric: "cosine"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Simulate DB latency (30-100ms)
|
|
87
|
+
simulate_latency(30, 100)
|
|
88
|
+
|
|
89
|
+
results = top_k.times.map do |i|
|
|
90
|
+
{
|
|
91
|
+
id: "doc-#{SecureRandom.hex(4)}",
|
|
92
|
+
score: (0.95 - i * 0.05).round(3),
|
|
93
|
+
content: "Document #{i + 1}: This is relevant context about the query topic."
|
|
94
|
+
}
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
run.add_metadata(results_count: results.length, max_score: results.first[:score])
|
|
98
|
+
results
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def search_web(query)
|
|
103
|
+
Langsmith.trace("tool.web_search", run_type: "tool", inputs: { query: query }) do |run|
|
|
104
|
+
run.add_metadata(engine: "tavily", max_results: 5)
|
|
105
|
+
|
|
106
|
+
# Simulate web search latency (300-600ms)
|
|
107
|
+
simulate_latency(300, 600)
|
|
108
|
+
|
|
109
|
+
results = [
|
|
110
|
+
{ title: "#{query} - Wikipedia", url: "https://en.wikipedia.org/wiki/#{query.gsub(' ', '_')}", snippet: "Overview of #{query}..." },
|
|
111
|
+
{ title: "#{query} Guide", url: "https://example.com/guide", snippet: "Complete guide to #{query}..." },
|
|
112
|
+
{ title: "Understanding #{query}", url: "https://blog.example.com/#{query}", snippet: "Deep dive into #{query}..." }
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
run.add_metadata(results_count: results.length)
|
|
116
|
+
results
|
|
117
|
+
end
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def execute_code(code, language: "python")
|
|
121
|
+
Langsmith.trace("tool.code_execution", run_type: "tool", inputs: { code: code, language: language }) do |run|
|
|
122
|
+
run.add_metadata(language: language, sandbox: "docker")
|
|
123
|
+
|
|
124
|
+
# Simulate code execution (100-500ms)
|
|
125
|
+
simulate_latency(100, 500)
|
|
126
|
+
|
|
127
|
+
output = "Execution successful. Result: 42"
|
|
128
|
+
run.add_metadata(exit_code: 0, execution_time_ms: rand(50..200))
|
|
129
|
+
|
|
130
|
+
{ success: true, output: output }
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def generate_response_for(query)
|
|
135
|
+
responses = {
|
|
136
|
+
/search|find|look up/i => "I found several relevant results for your query.",
|
|
137
|
+
/calculate|compute|math/i => "Based on my calculations, the answer is 42.",
|
|
138
|
+
/code|program|script/i => "I've executed the code and here are the results.",
|
|
139
|
+
/summarize|summary/i => "Here's a concise summary of the information.",
|
|
140
|
+
/explain|what is/i => "Let me explain this concept in detail.",
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
responses.each do |pattern, response|
|
|
144
|
+
return response if query.match?(pattern)
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
"I've processed your request and here's my response based on the available information."
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
# =============================================================================
|
|
151
|
+
# Complex Agent Implementation
|
|
152
|
+
# =============================================================================
|
|
153
|
+
|
|
154
|
+
class ResearchAgent
|
|
155
|
+
include Langsmith::Traceable
|
|
156
|
+
|
|
157
|
+
def initialize
|
|
158
|
+
@conversation_history = []
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
traceable run_type: "chain", name: "research_agent.run"
|
|
162
|
+
def run(user_query)
|
|
163
|
+
@conversation_history << { role: "user", content: user_query }
|
|
164
|
+
|
|
165
|
+
# Step 1: Analyze the query and plan
|
|
166
|
+
plan = plan_execution(user_query)
|
|
167
|
+
|
|
168
|
+
# Step 2: Execute the plan
|
|
169
|
+
results = execute_plan(plan)
|
|
170
|
+
|
|
171
|
+
# Step 3: Synthesize final response
|
|
172
|
+
response = synthesize_response(user_query, results)
|
|
173
|
+
|
|
174
|
+
@conversation_history << { role: "assistant", content: response }
|
|
175
|
+
response
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
private
|
|
179
|
+
|
|
180
|
+
def plan_execution(query)
|
|
181
|
+
Langsmith.trace("agent.plan", run_type: "chain", inputs: { query: query }) do |run|
|
|
182
|
+
run.add_metadata(planner_version: "v2")
|
|
183
|
+
|
|
184
|
+
# Call LLM to create a plan
|
|
185
|
+
planning_response = call_llm([
|
|
186
|
+
{ role: "system", content: "You are a planning agent. Analyze the query and create an execution plan." },
|
|
187
|
+
{ role: "user", content: "Create a plan for: #{query}" }
|
|
188
|
+
], model: "gpt-4", temperature: 0.3)
|
|
189
|
+
|
|
190
|
+
# Simulate plan based on query type
|
|
191
|
+
plan = determine_plan_steps(query)
|
|
192
|
+
run.add_metadata(steps_count: plan.length, plan_type: plan.first[:type])
|
|
193
|
+
|
|
194
|
+
plan
|
|
195
|
+
end
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def determine_plan_steps(query)
|
|
199
|
+
if query.match?(/research|learn about|what is/i)
|
|
200
|
+
[
|
|
201
|
+
{ type: "retrieve", action: "Search knowledge base" },
|
|
202
|
+
{ type: "web_search", action: "Search web for recent info" },
|
|
203
|
+
{ type: "synthesize", action: "Combine and summarize" }
|
|
204
|
+
]
|
|
205
|
+
elsif query.match?(/calculate|compute|analyze data/i)
|
|
206
|
+
[
|
|
207
|
+
{ type: "retrieve", action: "Get relevant formulas" },
|
|
208
|
+
{ type: "code", action: "Execute calculation" },
|
|
209
|
+
{ type: "synthesize", action: "Explain results" }
|
|
210
|
+
]
|
|
211
|
+
else
|
|
212
|
+
[
|
|
213
|
+
{ type: "retrieve", action: "Search knowledge base" },
|
|
214
|
+
{ type: "synthesize", action: "Generate response" }
|
|
215
|
+
]
|
|
216
|
+
end
|
|
217
|
+
end
|
|
218
|
+
|
|
219
|
+
def execute_plan(plan)
|
|
220
|
+
Langsmith.trace("agent.execute_plan", run_type: "chain", inputs: { plan: plan }) do |run|
|
|
221
|
+
run.add_metadata(total_steps: plan.length)
|
|
222
|
+
|
|
223
|
+
results = []
|
|
224
|
+
|
|
225
|
+
plan.each_with_index do |step, index|
|
|
226
|
+
step_result = execute_step(step, index + 1, results)
|
|
227
|
+
results << step_result
|
|
228
|
+
run.add_event(name: "step_completed", step: index + 1, type: step[:type])
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
run.add_metadata(completed_steps: results.length, success: true)
|
|
232
|
+
results
|
|
233
|
+
end
|
|
234
|
+
end
|
|
235
|
+
|
|
236
|
+
def execute_step(step, step_number, previous_results)
|
|
237
|
+
Langsmith.trace("agent.step_#{step_number}", run_type: "chain", inputs: { step: step }) do |run|
|
|
238
|
+
run.add_metadata(step_number: step_number, step_type: step[:type])
|
|
239
|
+
run.add_tags("step", step[:type])
|
|
240
|
+
|
|
241
|
+
result = case step[:type]
|
|
242
|
+
when "retrieve"
|
|
243
|
+
execute_retrieval_step
|
|
244
|
+
when "web_search"
|
|
245
|
+
execute_web_search_step
|
|
246
|
+
when "code"
|
|
247
|
+
execute_code_step
|
|
248
|
+
when "synthesize"
|
|
249
|
+
execute_synthesis_step(previous_results)
|
|
250
|
+
else
|
|
251
|
+
{ type: step[:type], data: "Unknown step type" }
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
run.add_metadata(result_type: result[:type])
|
|
255
|
+
result
|
|
256
|
+
end
|
|
257
|
+
end
|
|
258
|
+
|
|
259
|
+
def execute_retrieval_step
|
|
260
|
+
Langsmith.trace("retrieval_pipeline", run_type: "chain") do |run|
|
|
261
|
+
# Embed the query
|
|
262
|
+
query_embedding = embed_text("user query for knowledge base search")
|
|
263
|
+
|
|
264
|
+
# Search vector DB
|
|
265
|
+
kb_results = search_vector_db(query_embedding, collection: "knowledge_base", top_k: 3)
|
|
266
|
+
|
|
267
|
+
run.add_metadata(documents_retrieved: kb_results.length)
|
|
268
|
+
|
|
269
|
+
{ type: "retrieval", data: kb_results }
|
|
270
|
+
end
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
def execute_web_search_step
|
|
274
|
+
results = search_web("latest information on the topic")
|
|
275
|
+
{ type: "web_search", data: results }
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
def execute_code_step
|
|
279
|
+
code = <<~PYTHON
|
|
280
|
+
import math
|
|
281
|
+
result = math.sqrt(1764)
|
|
282
|
+
print(f"The answer is {result}")
|
|
283
|
+
PYTHON
|
|
284
|
+
|
|
285
|
+
execution_result = execute_code(code, language: "python")
|
|
286
|
+
{ type: "code_execution", data: execution_result }
|
|
287
|
+
end
|
|
288
|
+
|
|
289
|
+
def execute_synthesis_step(previous_results)
|
|
290
|
+
Langsmith.trace("synthesis", run_type: "chain", inputs: { results_count: previous_results.length }) do |run|
|
|
291
|
+
# Prepare context from previous results
|
|
292
|
+
context = previous_results.map { |r| r[:data].to_s }.join("\n\n")
|
|
293
|
+
|
|
294
|
+
# Call LLM to synthesize
|
|
295
|
+
synthesis = call_llm([
|
|
296
|
+
{ role: "system", content: "Synthesize the following information into a coherent response." },
|
|
297
|
+
{ role: "user", content: "Information to synthesize:\n#{context}" }
|
|
298
|
+
], model: "gpt-4", temperature: 0.5)
|
|
299
|
+
|
|
300
|
+
run.add_metadata(context_length: context.length, synthesis_tokens: synthesis[:tokens])
|
|
301
|
+
|
|
302
|
+
{ type: "synthesis", data: synthesis[:content] }
|
|
303
|
+
end
|
|
304
|
+
end
|
|
305
|
+
|
|
306
|
+
def synthesize_response(query, results)
|
|
307
|
+
Langsmith.trace("final_response", run_type: "llm", inputs: { query: }) do |run|
|
|
308
|
+
run.set_model(model: "gpt-4", provider: "openai")
|
|
309
|
+
run.add_metadata(results_count: results.length)
|
|
310
|
+
|
|
311
|
+
# Final LLM call to generate response
|
|
312
|
+
response = call_llm([
|
|
313
|
+
{ role: "system", content: "Generate a helpful, comprehensive response based on the research results." },
|
|
314
|
+
{ role: "user", content: "Query: #{query}\n\nResearch results: #{results.map { |r| r[:data] }.join("\n")}" }
|
|
315
|
+
], model: "gpt-4", temperature: 0.7, max_tokens: 500)
|
|
316
|
+
|
|
317
|
+
input_tokens = rand(200..400)
|
|
318
|
+
output_tokens = rand(100..300)
|
|
319
|
+
run.set_token_usage(input_tokens:, output_tokens:, total_tokens: input_tokens + output_tokens)
|
|
320
|
+
|
|
321
|
+
response[:content]
|
|
322
|
+
end
|
|
323
|
+
end
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
# =============================================================================
|
|
327
|
+
# Multi-Agent Collaboration Example
|
|
328
|
+
# =============================================================================
|
|
329
|
+
|
|
330
|
+
def run_multi_agent_task(task)
|
|
331
|
+
Langsmith.trace("multi_agent.orchestrator", run_type: "chain", inputs: { task: task }) do |run|
|
|
332
|
+
run.add_metadata(agent_count: 3, task_type: "collaborative")
|
|
333
|
+
run.add_tags("multi-agent", "production")
|
|
334
|
+
|
|
335
|
+
# Agent 1: Research
|
|
336
|
+
research_result = Langsmith.trace("agent.researcher", run_type: "chain") do |agent_run|
|
|
337
|
+
agent_run.add_metadata(agent_role: "researcher", specialization: "information_gathering")
|
|
338
|
+
|
|
339
|
+
simulate_latency(100, 200)
|
|
340
|
+
embed_text(task)
|
|
341
|
+
docs = search_vector_db([], collection: "research_papers", top_k: 5)
|
|
342
|
+
web = search_web(task)
|
|
343
|
+
|
|
344
|
+
call_llm([
|
|
345
|
+
{ role: "system", content: "You are a research specialist." },
|
|
346
|
+
{ role: "user", content: "Research: #{task}" }
|
|
347
|
+
], model: "gpt-4")
|
|
348
|
+
|
|
349
|
+
{ findings: docs.length + web.length, summary: "Research completed successfully" }
|
|
350
|
+
end
|
|
351
|
+
|
|
352
|
+
# Agent 2: Analyst
|
|
353
|
+
analysis_result = Langsmith.trace("agent.analyst", run_type: "chain") do |agent_run|
|
|
354
|
+
agent_run.add_metadata(agent_role: "analyst", specialization: "data_analysis")
|
|
355
|
+
|
|
356
|
+
simulate_latency(100, 200)
|
|
357
|
+
|
|
358
|
+
# Multiple analysis sub-steps
|
|
359
|
+
Langsmith.trace("analysis.data_processing", run_type: "tool") do |step|
|
|
360
|
+
simulate_latency(50, 150)
|
|
361
|
+
step.add_metadata(records_processed: rand(100..1000))
|
|
362
|
+
end
|
|
363
|
+
|
|
364
|
+
Langsmith.trace("analysis.statistical", run_type: "tool") do |step|
|
|
365
|
+
simulate_latency(100, 200)
|
|
366
|
+
step.add_metadata(metrics_computed: ["mean", "median", "std_dev"])
|
|
367
|
+
end
|
|
368
|
+
|
|
369
|
+
call_llm([
|
|
370
|
+
{ role: "system", content: "You are a data analyst." },
|
|
371
|
+
{ role: "user", content: "Analyze findings: #{research_result[:summary]}" }
|
|
372
|
+
], model: "gpt-4")
|
|
373
|
+
|
|
374
|
+
{ insights: 5, confidence: 0.87 }
|
|
375
|
+
end
|
|
376
|
+
|
|
377
|
+
# Agent 3: Writer
|
|
378
|
+
final_output = Langsmith.trace("agent.writer", run_type: "chain") do |agent_run|
|
|
379
|
+
agent_run.add_metadata(agent_role: "writer", specialization: "content_creation")
|
|
380
|
+
|
|
381
|
+
simulate_latency(100, 200)
|
|
382
|
+
|
|
383
|
+
# Generate outline
|
|
384
|
+
Langsmith.trace("writing.outline", run_type: "llm") do |step|
|
|
385
|
+
step.set_model(model: "gpt-4", provider: "openai")
|
|
386
|
+
simulate_latency(150, 300)
|
|
387
|
+
step.set_token_usage(input_tokens: 100, output_tokens: 150, total_tokens: 250)
|
|
388
|
+
end
|
|
389
|
+
|
|
390
|
+
# Write draft
|
|
391
|
+
Langsmith.trace("writing.draft", run_type: "llm") do |step|
|
|
392
|
+
step.set_model(model: "gpt-4", provider: "openai")
|
|
393
|
+
simulate_latency(300, 600)
|
|
394
|
+
step.set_token_usage(input_tokens: 300, output_tokens: 500, total_tokens: 800)
|
|
395
|
+
end
|
|
396
|
+
|
|
397
|
+
# Polish
|
|
398
|
+
response = call_llm([
|
|
399
|
+
{ role: "system", content: "You are a professional writer." },
|
|
400
|
+
{ role: "user", content: "Write a comprehensive report based on: Research=#{research_result[:summary]}, Analysis=#{analysis_result[:insights]} insights" }
|
|
401
|
+
], model: "gpt-4", max_tokens: 1000)
|
|
402
|
+
|
|
403
|
+
response[:content]
|
|
404
|
+
end
|
|
405
|
+
|
|
406
|
+
run.add_metadata(
|
|
407
|
+
research_findings: research_result[:findings],
|
|
408
|
+
analysis_insights: analysis_result[:insights],
|
|
409
|
+
output_length: final_output.length
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
final_output
|
|
413
|
+
end
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
# =============================================================================
|
|
417
|
+
# Run Examples
|
|
418
|
+
# =============================================================================
|
|
419
|
+
|
|
420
|
+
if __FILE__ == $PROGRAM_NAME
|
|
421
|
+
puts "=" * 70
|
|
422
|
+
puts "Complex AI Agent Tracing Demo"
|
|
423
|
+
puts "=" * 70
|
|
424
|
+
puts
|
|
425
|
+
|
|
426
|
+
# Example 1: Multi-Agent Collaboration (MOVED TO FIRST to test if order matters)
|
|
427
|
+
puts "Running Multi-Agent Collaboration FIRST..."
|
|
428
|
+
puts "-" * 40
|
|
429
|
+
|
|
430
|
+
start_time = Time.now
|
|
431
|
+
result = run_multi_agent_task("Analyze market trends for renewable energy sector")
|
|
432
|
+
elapsed = ((Time.now - start_time) * 1000).round
|
|
433
|
+
puts "Result: #{result[0..100]}..."
|
|
434
|
+
puts "Time: #{elapsed}ms"
|
|
435
|
+
|
|
436
|
+
# Example 2: Research Agent (now runs after multi-agent)
|
|
437
|
+
puts "\n"
|
|
438
|
+
puts "=" * 70
|
|
439
|
+
puts "Running Research Agent..."
|
|
440
|
+
puts "-" * 40
|
|
441
|
+
agent = ResearchAgent.new
|
|
442
|
+
|
|
443
|
+
queries = [
|
|
444
|
+
"What is quantum computing and how does it work?",
|
|
445
|
+
"Calculate the optimal portfolio allocation for a $100k investment",
|
|
446
|
+
"Analyze market trends for renewable energy sector",
|
|
447
|
+
"Analyze market trends for renewable energy sector",
|
|
448
|
+
"Analyze market trends for renewable energy sector",
|
|
449
|
+
]
|
|
450
|
+
|
|
451
|
+
queries.each_with_index do |query, i|
|
|
452
|
+
puts "\nQuery #{i + 1}: #{query}"
|
|
453
|
+
start_time = Time.now
|
|
454
|
+
result = agent.run(query)
|
|
455
|
+
elapsed = ((Time.now - start_time) * 1000).round
|
|
456
|
+
puts "Response: #{result[0..100]}..."
|
|
457
|
+
puts "Time: #{elapsed}ms"
|
|
458
|
+
end
|
|
459
|
+
|
|
460
|
+
# Flush all traces
|
|
461
|
+
sleep 10
|
|
462
|
+
puts "\nFlushing traces..."
|
|
463
|
+
Langsmith.shutdown
|
|
464
|
+
|
|
465
|
+
puts "\n"
|
|
466
|
+
puts "=" * 70
|
|
467
|
+
puts "All traces sent to LangSmith!"
|
|
468
|
+
puts "Check your dashboard at https://smith.langchain.com"
|
|
469
|
+
puts "Project: complex-agent-demo"
|
|
470
|
+
puts "=" * 70
|
|
471
|
+
end
|
|
472
|
+
|