langfuse-ruby 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,196 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'langfuse'
4
+
5
+ # Initialize the Langfuse client
6
+ client = Langfuse.new(
7
+ public_key: ENV['LANGFUSE_PUBLIC_KEY'],
8
+ secret_key: ENV['LANGFUSE_SECRET_KEY'],
9
+ host: ENV['LANGFUSE_HOST'] || 'https://cloud.langfuse.com'
10
+ )
11
+
12
+ puts 'šŸš€ Starting basic tracing example...'
13
+
14
+ # Example 1: Simple trace with generation
15
+ puts "\nšŸ“ Example 1: Simple trace with generation"
16
+
17
+ trace = client.trace(
18
+ name: 'simple-chat',
19
+ user_id: 'user-123',
20
+ session_id: 'session-456',
21
+ input: { message: 'Hello, how are you?' },
22
+ metadata: {
23
+ environment: 'development',
24
+ version: '1.0.0'
25
+ }
26
+ )
27
+
28
+ puts "Created trace: #{trace.id}"
29
+
30
+ generation = trace.generation(
31
+ name: 'openai-chat',
32
+ model: 'gpt-3.5-turbo',
33
+ input: [
34
+ { role: 'user', content: 'Hello, how are you?' }
35
+ ],
36
+ output: { content: "I'm doing well, thank you! How can I help you today?" },
37
+ usage: {
38
+ prompt_tokens: 12,
39
+ completion_tokens: 18,
40
+ total_tokens: 30
41
+ },
42
+ model_parameters: {
43
+ temperature: 0.7,
44
+ max_tokens: 150
45
+ }
46
+ )
47
+
48
+ puts "Created generation: #{generation.id}"
49
+
50
+ # Update trace with final output
51
+ trace.update(
52
+ output: { response: "I'm doing well, thank you! How can I help you today?" }
53
+ )
54
+
55
+ puts "Trace URL: #{trace.get_url}"
56
+
57
+ # Example 2: Nested spans for complex workflow
58
+ puts "\nšŸ”— Example 2: Nested spans for complex workflow"
59
+
60
+ workflow_trace = client.trace(
61
+ name: 'document-qa-workflow',
62
+ user_id: 'user-456',
63
+ input: { question: 'What is machine learning?' }
64
+ )
65
+
66
+ # Document retrieval span
67
+ retrieval_span = workflow_trace.span(
68
+ name: 'document-retrieval',
69
+ input: { query: 'What is machine learning?' }
70
+ )
71
+
72
+ # Embedding generation within retrieval
73
+ embedding_gen = retrieval_span.generation(
74
+ name: 'embedding-generation',
75
+ model: 'text-embedding-ada-002',
76
+ input: 'What is machine learning?',
77
+ output: [0.1, 0.2, 0.3, 0.4, 0.5], # Simplified embedding
78
+ usage: { prompt_tokens: 5, total_tokens: 5 }
79
+ )
80
+
81
+ # End retrieval span
82
+ retrieval_span.end(
83
+ output: {
84
+ documents: [
85
+ 'Machine learning is a subset of artificial intelligence...',
86
+ 'ML algorithms learn patterns from data...'
87
+ ]
88
+ }
89
+ )
90
+
91
+ # Answer generation span
92
+ answer_span = workflow_trace.span(
93
+ name: 'answer-generation',
94
+ input: {
95
+ question: 'What is machine learning?',
96
+ context: [
97
+ 'Machine learning is a subset of artificial intelligence...',
98
+ 'ML algorithms learn patterns from data...'
99
+ ]
100
+ }
101
+ )
102
+
103
+ # LLM generation for answer
104
+ answer_gen = answer_span.generation(
105
+ name: 'openai-completion',
106
+ model: 'gpt-3.5-turbo',
107
+ input: [
108
+ {
109
+ role: 'system',
110
+ content: "Answer the user's question based on the provided context."
111
+ },
112
+ {
113
+ role: 'user',
114
+ content: 'What is machine learning? Context: Machine learning is a subset of artificial intelligence... ML algorithms learn patterns from data...'
115
+ }
116
+ ],
117
+ output: {
118
+ content: 'Machine learning is a subset of artificial intelligence that enables computers to learn and improve from experience without being explicitly programmed. ML algorithms identify patterns in data and use these patterns to make predictions or decisions.'
119
+ },
120
+ usage: {
121
+ prompt_tokens: 85,
122
+ completion_tokens: 45,
123
+ total_tokens: 130
124
+ }
125
+ )
126
+
127
+ answer_span.end(
128
+ output: {
129
+ answer: 'Machine learning is a subset of artificial intelligence that enables computers to learn and improve from experience without being explicitly programmed. ML algorithms identify patterns in data and use these patterns to make predictions or decisions.'
130
+ }
131
+ )
132
+
133
+ # Update workflow trace
134
+ workflow_trace.update(
135
+ output: {
136
+ answer: 'Machine learning is a subset of artificial intelligence that enables computers to learn and improve from experience without being explicitly programmed. ML algorithms identify patterns in data and use these patterns to make predictions or decisions.'
137
+ }
138
+ )
139
+
140
+ puts "Workflow trace URL: #{workflow_trace.get_url}"
141
+
142
+ # Example 3: Adding scores and evaluations
143
+ puts "\n⭐ Example 3: Adding scores and evaluations"
144
+
145
+ # Score the generation quality
146
+ answer_gen.score(
147
+ name: 'accuracy',
148
+ value: 0.9,
149
+ comment: 'Highly accurate answer based on context'
150
+ )
151
+
152
+ answer_gen.score(
153
+ name: 'helpfulness',
154
+ value: 0.85,
155
+ comment: 'Very helpful and informative response'
156
+ )
157
+
158
+ # Score the entire workflow
159
+ workflow_trace.score(
160
+ name: 'user-satisfaction',
161
+ value: 0.95,
162
+ comment: 'User was very satisfied with the answer'
163
+ )
164
+
165
+ puts 'Added scores to generation and trace'
166
+
167
+ # Example 4: Error handling
168
+ puts "\n🚨 Example 4: Error handling"
169
+
170
+ begin
171
+ error_trace = client.trace(name: 'error-example')
172
+
173
+ error_gen = error_trace.generation(
174
+ name: 'failed-generation',
175
+ model: 'gpt-3.5-turbo',
176
+ input: [{ role: 'user', content: 'This will fail' }],
177
+ level: 'ERROR',
178
+ status_message: 'Rate limit exceeded'
179
+ )
180
+
181
+ puts "Created error trace: #{error_trace.id}"
182
+ rescue Langfuse::RateLimitError => e
183
+ puts "Rate limit error: #{e.message}"
184
+ rescue Langfuse::APIError => e
185
+ puts "API error: #{e.message}"
186
+ end
187
+
188
+ # Flush all events
189
+ puts "\nšŸ”„ Flushing events..."
190
+ client.flush
191
+
192
+ puts "\nāœ… Basic tracing example completed!"
193
+ puts 'Check your Langfuse dashboard to see the traces.'
194
+
195
+ # Shutdown client
196
+ client.shutdown
@@ -0,0 +1,283 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'langfuse'
4
+
5
+ # Initialize the Langfuse client
6
+ client = Langfuse.new(
7
+ public_key: ENV['LANGFUSE_PUBLIC_KEY'],
8
+ secret_key: ENV['LANGFUSE_SECRET_KEY'],
9
+ host: ENV['LANGFUSE_HOST'] || 'https://cloud.langfuse.com'
10
+ )
11
+
12
+ puts "šŸš€ Starting prompt management example..."
13
+
14
+ # Example 1: Create and use text prompts
15
+ puts "\nšŸ“ Example 1: Create and use text prompts"
16
+
17
+ begin
18
+ # Create a text prompt
19
+ text_prompt = client.create_prompt(
20
+ name: "greeting-prompt",
21
+ prompt: "Hello {{user_name}}! Welcome to {{service_name}}. How can I help you with {{topic}} today?",
22
+ labels: ["greeting", "customer-service"],
23
+ config: {
24
+ temperature: 0.7,
25
+ max_tokens: 100
26
+ }
27
+ )
28
+
29
+ puts "Created text prompt: #{text_prompt.name} (Version: #{text_prompt.version})"
30
+
31
+ rescue Langfuse::APIError => e
32
+ puts "Note: Prompt might already exist - #{e.message}"
33
+ end
34
+
35
+ # Get and use the prompt
36
+ begin
37
+ prompt = client.get_prompt("greeting-prompt")
38
+
39
+ # Compile prompt with variables
40
+ compiled_text = prompt.compile(
41
+ user_name: "Alice",
42
+ service_name: "AI Assistant",
43
+ topic: "machine learning"
44
+ )
45
+
46
+ puts "Compiled prompt: #{compiled_text}"
47
+
48
+ rescue Langfuse::APIError => e
49
+ puts "Could not retrieve prompt: #{e.message}"
50
+ end
51
+
52
+ # Example 2: Create and use chat prompts
53
+ puts "\nšŸ’¬ Example 2: Create and use chat prompts"
54
+
55
+ begin
56
+ # Create a chat prompt
57
+ chat_prompt = client.create_prompt(
58
+ name: "ai-assistant-chat",
59
+ prompt: [
60
+ {
61
+ role: "system",
62
+ content: "You are a helpful AI assistant specialized in {{domain}}. Always be {{tone}} and provide {{detail_level}} answers."
63
+ },
64
+ {
65
+ role: "user",
66
+ content: "{{user_message}}"
67
+ }
68
+ ],
69
+ labels: ["chat", "assistant", "ai"],
70
+ config: {
71
+ temperature: 0.8,
72
+ max_tokens: 200
73
+ }
74
+ )
75
+
76
+ puts "Created chat prompt: #{chat_prompt.name}"
77
+
78
+ rescue Langfuse::APIError => e
79
+ puts "Note: Chat prompt might already exist - #{e.message}"
80
+ end
81
+
82
+ # Get and use the chat prompt
83
+ begin
84
+ chat_prompt = client.get_prompt("ai-assistant-chat")
85
+
86
+ # Compile chat prompt with variables
87
+ compiled_messages = chat_prompt.compile(
88
+ domain: "software development",
89
+ tone: "friendly and professional",
90
+ detail_level: "detailed",
91
+ user_message: "How do I implement a REST API in Ruby?"
92
+ )
93
+
94
+ puts "Compiled chat messages:"
95
+ compiled_messages.each_with_index do |message, index|
96
+ puts " #{index + 1}. #{message[:role]}: #{message[:content]}"
97
+ end
98
+
99
+ rescue Langfuse::APIError => e
100
+ puts "Could not retrieve chat prompt: #{e.message}"
101
+ end
102
+
103
+ # Example 3: Using prompt templates
104
+ puts "\nšŸŽØ Example 3: Using prompt templates"
105
+
106
+ # Create a reusable text template
107
+ translation_template = Langfuse::PromptTemplate.from_template(
108
+ "Translate the following {{source_language}} text to {{target_language}}:\n\n{{text}}\n\nTranslation:"
109
+ )
110
+
111
+ puts "Template variables: #{translation_template.input_variables}"
112
+
113
+ # Use the template
114
+ translated_prompt = translation_template.format(
115
+ source_language: "English",
116
+ target_language: "Spanish",
117
+ text: "Hello, how are you today?"
118
+ )
119
+
120
+ puts "Translation prompt: #{translated_prompt}"
121
+
122
+ # Create a reusable chat template
123
+ coding_template = Langfuse::ChatPromptTemplate.from_messages([
124
+ {
125
+ role: "system",
126
+ content: "You are an expert {{language}} developer. Provide clean, well-commented code examples."
127
+ },
128
+ {
129
+ role: "user",
130
+ content: "{{request}}"
131
+ }
132
+ ])
133
+
134
+ puts "Chat template variables: #{coding_template.input_variables}"
135
+
136
+ # Use the chat template
137
+ coding_messages = coding_template.format(
138
+ language: "Ruby",
139
+ request: "Show me how to create a simple HTTP server"
140
+ )
141
+
142
+ puts "Coding chat messages:"
143
+ coding_messages.each_with_index do |message, index|
144
+ puts " #{index + 1}. #{message[:role]}: #{message[:content]}"
145
+ end
146
+
147
+ # Example 4: Prompt versioning and caching
148
+ puts "\nšŸ”„ Example 4: Prompt versioning and caching"
149
+
150
+ # Get specific version of a prompt
151
+ begin
152
+ versioned_prompt = client.get_prompt("greeting-prompt", version: 1)
153
+ puts "Retrieved prompt version: #{versioned_prompt.version}"
154
+
155
+ # Get latest version (cached)
156
+ latest_prompt = client.get_prompt("greeting-prompt")
157
+ puts "Latest prompt version: #{latest_prompt.version}"
158
+
159
+ # Get with label
160
+ labeled_prompt = client.get_prompt("greeting-prompt", label: "production")
161
+ puts "Labeled prompt: #{labeled_prompt.labels}"
162
+
163
+ rescue Langfuse::APIError => e
164
+ puts "Could not retrieve versioned prompt: #{e.message}"
165
+ end
166
+
167
+ # Example 5: Using prompts in tracing
168
+ puts "\nšŸ”— Example 5: Using prompts in tracing"
169
+
170
+ begin
171
+ # Get a prompt for use in generation
172
+ system_prompt = client.get_prompt("ai-assistant-chat")
173
+
174
+ # Create a trace
175
+ trace = client.trace(
176
+ name: "prompt-based-chat",
177
+ user_id: "user-789",
178
+ input: { message: "Explain Ruby blocks" }
179
+ )
180
+
181
+ # Compile the prompt
182
+ messages = system_prompt.compile(
183
+ domain: "Ruby programming",
184
+ tone: "educational and clear",
185
+ detail_level: "beginner-friendly",
186
+ user_message: "Explain Ruby blocks"
187
+ )
188
+
189
+ # Create generation with prompt
190
+ generation = trace.generation(
191
+ name: "openai-chat-with-prompt",
192
+ model: "gpt-3.5-turbo",
193
+ input: messages,
194
+ output: {
195
+ content: "Ruby blocks are pieces of code that can be passed to methods. They're defined using either do...end or curly braces {}. Blocks are commonly used with iterators like .each, .map, and .select."
196
+ },
197
+ usage: {
198
+ prompt_tokens: 45,
199
+ completion_tokens: 35,
200
+ total_tokens: 80
201
+ },
202
+ metadata: {
203
+ prompt_name: system_prompt.name,
204
+ prompt_version: system_prompt.version
205
+ }
206
+ )
207
+
208
+ puts "Created generation with prompt: #{generation.id}"
209
+ puts "Trace URL: #{trace.get_url}"
210
+
211
+ rescue Langfuse::APIError => e
212
+ puts "Could not use prompt in tracing: #{e.message}"
213
+ end
214
+
215
+ # Example 6: Advanced prompt features
216
+ puts "\nšŸŽÆ Example 6: Advanced prompt features"
217
+
218
+ # Create a prompt with complex templating
219
+ begin
220
+ complex_prompt = client.create_prompt(
221
+ name: "code-review-prompt",
222
+ prompt: {
223
+ system: "You are a senior {{language}} developer reviewing code. Focus on {{review_aspects}}.",
224
+ user: "Please review this {{language}} code:\n\n```{{language}}\n{{code}}\n```\n\nProvide feedback on: {{specific_feedback}}"
225
+ },
226
+ labels: ["code-review", "development"],
227
+ config: {
228
+ temperature: 0.3,
229
+ max_tokens: 500
230
+ }
231
+ )
232
+
233
+ puts "Created complex prompt: #{complex_prompt.name}"
234
+
235
+ rescue Langfuse::APIError => e
236
+ puts "Note: Complex prompt might already exist - #{e.message}"
237
+ end
238
+
239
+ # Create a prompt with conditional logic (using Ruby)
240
+ class ConditionalPrompt
241
+ def self.generate(user_level:, topic:, include_examples: true)
242
+ base_prompt = "Explain {{topic}} for a {{user_level}} audience."
243
+
244
+ if include_examples
245
+ base_prompt += " Include practical examples."
246
+ end
247
+
248
+ if user_level == "beginner"
249
+ base_prompt += " Use simple language and avoid jargon."
250
+ elsif user_level == "advanced"
251
+ base_prompt += " Feel free to use technical terminology."
252
+ end
253
+
254
+ base_prompt
255
+ end
256
+ end
257
+
258
+ conditional_prompt_text = ConditionalPrompt.generate(
259
+ user_level: "beginner",
260
+ topic: "machine learning",
261
+ include_examples: true
262
+ )
263
+
264
+ puts "Conditional prompt: #{conditional_prompt_text}"
265
+
266
+ # Use with template
267
+ conditional_template = Langfuse::PromptTemplate.from_template(conditional_prompt_text)
268
+ formatted_prompt = conditional_template.format(
269
+ topic: "neural networks",
270
+ user_level: "beginner"
271
+ )
272
+
273
+ puts "Formatted conditional prompt: #{formatted_prompt}"
274
+
275
+ # Flush events
276
+ puts "\nšŸ”„ Flushing events..."
277
+ client.flush
278
+
279
+ puts "\nāœ… Prompt management example completed!"
280
+ puts "Check your Langfuse dashboard to see the prompts and traces."
281
+
282
+ # Shutdown client
283
+ client.shutdown
@@ -0,0 +1,51 @@
1
+ require_relative 'lib/langfuse/version'
2
+
3
+ Gem::Specification.new do |spec|
4
+ spec.name = 'langfuse-ruby'
5
+ spec.version = Langfuse::VERSION
6
+ spec.authors = ['Your Name']
7
+ spec.email = ['your.email@example.com']
8
+ spec.summary = 'Ruby SDK for Langfuse - Open source LLM engineering platform'
9
+ spec.description = 'Ruby client library for Langfuse, providing tracing, prompt management, and evaluation capabilities for LLM applications'
10
+ spec.homepage = 'https://github.com/your-username/langfuse-ruby'
11
+ spec.license = 'MIT'
12
+ spec.required_ruby_version = '>= 2.7.0'
13
+
14
+ spec.metadata['allowed_push_host'] = 'https://rubygems.org'
15
+ spec.metadata['homepage_uri'] = spec.homepage
16
+ spec.metadata['source_code_uri'] = 'https://github.com/your-username/langfuse-ruby'
17
+ spec.metadata['changelog_uri'] = 'https://github.com/your-username/langfuse-ruby/blob/main/CHANGELOG.md'
18
+
19
+ # Specify which files should be added to the gem when it is released.
20
+ spec.files = Dir.chdir(File.expand_path(__dir__)) do
21
+ if File.exist?('.git')
22
+ `git ls-files -z`.split("\x0").reject { |f| f.match(%r{\A(?:test|spec|features)/}) }
23
+ else
24
+ Dir.glob('**/*').reject do |f|
25
+ File.directory?(f) ||
26
+ f.match(%r{\A(?:test|spec|features)/}) ||
27
+ f.match(/\A\./) ||
28
+ f.match(/\.gem$/) ||
29
+ f.match(/test_.*\.rb$/)
30
+ end
31
+ end
32
+ end
33
+ spec.bindir = 'exe'
34
+ spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
35
+ spec.require_paths = ['lib']
36
+
37
+ # Dependencies
38
+ spec.add_dependency 'concurrent-ruby', '~> 1.0'
39
+ spec.add_dependency 'faraday', '~> 2.0'
40
+ spec.add_dependency 'faraday-net_http', '~> 3.0'
41
+ spec.add_dependency 'json', '~> 2.0'
42
+
43
+ # Development dependencies
44
+ spec.add_development_dependency 'bundler', '~> 2.0'
45
+ spec.add_development_dependency 'rake', '~> 13.0'
46
+ spec.add_development_dependency 'rspec', '~> 3.0'
47
+ spec.add_development_dependency 'rubocop', '~> 1.0'
48
+ spec.add_development_dependency 'vcr', '~> 6.0'
49
+ spec.add_development_dependency 'webmock', '~> 3.0'
50
+ spec.add_development_dependency 'yard', '~> 0.9'
51
+ end