llm_conductor 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,301 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../lib/llm_conductor'
4
+
5
+ # Example: Company Data Builder
6
+ #
7
+ # This example demonstrates how to use LlmConductor::DataBuilder to structure
8
+ # data from complex objects for LLM consumption.
9
+
10
+ class CompanyDataBuilder < LlmConductor::DataBuilder
11
+ def build
12
+ {
13
+ id: source_object.id,
14
+ name: source_object.name,
15
+ domain_name: source_object.domain_name,
16
+ description: format_for_llm(source_object.description, max_length: 500),
17
+ industry: extract_nested_data(:data, 'categories', 'primary') || 'Unknown',
18
+ location: build_location,
19
+ metrics: build_metrics,
20
+ technology_stack: extract_list(:technologies, limit: 5, separator: ', '),
21
+ summary: build_company_summary
22
+ }
23
+ end
24
+
25
+ private
26
+
27
+ def build_location
28
+ city = safe_extract(:city, default: nil)
29
+ country = safe_extract(:country, default: nil)
30
+
31
+ return 'Unknown' if city.nil? && country.nil?
32
+ return city if country.nil?
33
+ return country if city.nil?
34
+
35
+ "#{city}, #{country}"
36
+ end
37
+
38
+ def build_metrics
39
+ {
40
+ employees: format_employee_count,
41
+ revenue: format_revenue,
42
+ founded: safe_extract(:founded_year, default: 'Unknown'),
43
+ funding: extract_nested_data(:financial_data, 'total_funding'),
44
+ stage: extract_nested_data(:financial_data, 'stage')
45
+ }
46
+ end
47
+
48
+ def format_employee_count
49
+ count = safe_extract(:employee_count)
50
+ return 'Unknown' if count.nil?
51
+
52
+ case count
53
+ when 0..10
54
+ 'Small (1-10)'
55
+ when 11..50
56
+ 'Medium (11-50)'
57
+ when 51..200
58
+ 'Large (51-200)'
59
+ when 201..1000
60
+ 'Enterprise (201-1000)'
61
+ else
62
+ 'Large Enterprise (1000+)'
63
+ end
64
+ end
65
+
66
+ def format_revenue
67
+ revenue = safe_extract(:revenue)
68
+ return 'Not disclosed' if revenue.nil? || revenue.zero?
69
+
70
+ format_number(revenue, as_currency: true, precision: 0)
71
+ end
72
+
73
+ def build_company_summary
74
+ name = safe_extract(:name, default: 'Unknown Company')
75
+ industry = extract_nested_data(:data, 'categories', 'primary') || 'Unknown Industry'
76
+ employees = format_employee_count
77
+ location = build_location
78
+
79
+ "#{name} is a #{industry.downcase} company with #{employees.downcase} employees, " \
80
+ "based in #{location}"
81
+ end
82
+ end
83
+
84
+ # Example: User Profile Builder
85
+ class UserProfileBuilder < LlmConductor::DataBuilder
86
+ def build
87
+ {
88
+ basic_info: build_basic_info,
89
+ professional: build_professional_info,
90
+ preferences: build_preferences,
91
+ activity_summary: build_activity_summary
92
+ }
93
+ end
94
+
95
+ private
96
+
97
+ def build_basic_info
98
+ {
99
+ name: format_for_llm(build_full_name),
100
+ email: safe_extract(:email, default: 'Not provided'),
101
+ age: calculate_age,
102
+ location: build_summary(:city, :state, :country, separator: ', ')
103
+ }
104
+ end
105
+
106
+ def build_full_name
107
+ first = safe_extract(:first_name, default: '')
108
+ last = safe_extract(:last_name, default: '')
109
+ "#{first} #{last}".strip
110
+ end
111
+
112
+ def calculate_age
113
+ birth_date = safe_extract(:birth_date)
114
+ return 'Unknown' if birth_date.nil?
115
+
116
+ begin
117
+ ((Time.zone.now - birth_date) / 365.25 / 24 / 60 / 60).to_i
118
+ rescue StandardError
119
+ 'Unknown'
120
+ end
121
+ end
122
+
123
+ def build_professional_info
124
+ {
125
+ title: safe_extract(:job_title, default: 'Not specified'),
126
+ company: safe_extract(:company_name, default: 'Not specified'),
127
+ experience_years: safe_extract(:years_experience, default: 'Unknown'),
128
+ skills: extract_list(:skills, limit: 10, separator: ', '),
129
+ industry: extract_nested_data(:profile, 'professional', 'industry')
130
+ }
131
+ end
132
+
133
+ def build_preferences
134
+ preferences = safe_extract(:preferences, default: {})
135
+ return 'No preferences set' unless preferences.respond_to?(:[])
136
+
137
+ {
138
+ communication: preferences['communication'] || 'Email',
139
+ notifications: preferences['notifications'] ? 'Enabled' : 'Disabled',
140
+ privacy_level: preferences['privacy'] || 'Standard'
141
+ }
142
+ end
143
+
144
+ def build_activity_summary
145
+ summary_parts = []
146
+
147
+ add_membership_info(summary_parts)
148
+ add_activity_info(summary_parts)
149
+
150
+ summary_parts.empty? ? 'No activity data' : summary_parts.join(' • ')
151
+ end
152
+
153
+ def add_membership_info(summary_parts)
154
+ account_created = safe_extract(:created_at)
155
+ return unless account_created
156
+
157
+ formatted_date = format_creation_date(account_created)
158
+ summary_parts << "Member since #{formatted_date}"
159
+ end
160
+
161
+ def add_activity_info(summary_parts)
162
+ last_login = safe_extract(:last_login_at)
163
+ return unless last_login
164
+
165
+ days_ago = calculate_days_since_login(last_login)
166
+ return unless days_ago
167
+
168
+ summary_parts << format_activity_status(days_ago)
169
+ end
170
+
171
+ def format_creation_date(account_created)
172
+ account_created.strftime('%B %Y')
173
+ rescue StandardError
174
+ 'Unknown'
175
+ end
176
+
177
+ def calculate_days_since_login(last_login)
178
+ ((Time.zone.now - last_login) / 86_400).to_i
179
+ rescue StandardError
180
+ nil
181
+ end
182
+
183
+ def format_activity_status(days_ago)
184
+ case days_ago
185
+ when 0
186
+ 'Active today'
187
+ when 1
188
+ 'Last active yesterday'
189
+ when 2..7
190
+ "Last active #{days_ago} days ago"
191
+ else
192
+ 'Inactive user'
193
+ end
194
+ end
195
+ end
196
+
197
+ # Example usage with mock data
198
+ puts '=== Company Data Builder Example ==='
199
+
200
+ # Mock company object
201
+ company_data = OpenStruct.new(
202
+ id: 1,
203
+ name: 'InnovateTech Solutions',
204
+ domain_name: 'innovatetech.com',
205
+ description: 'A cutting-edge technology company focused on AI and machine learning solutions ' \
206
+ 'for enterprise customers.',
207
+ city: 'San Francisco',
208
+ country: 'USA',
209
+ employee_count: 150,
210
+ revenue: 25_000_000,
211
+ founded_year: 2018,
212
+ technologies: %w[Ruby Python JavaScript AWS Docker Kubernetes],
213
+ data: {
214
+ 'categories' => {
215
+ 'primary' => 'Software Development',
216
+ 'secondary' => 'AI/ML'
217
+ }
218
+ },
219
+ financial_data: {
220
+ 'total_funding' => '$5.2M',
221
+ 'stage' => 'Series A'
222
+ }
223
+ )
224
+
225
+ company_builder = CompanyDataBuilder.new(company_data)
226
+ company_result = company_builder.build
227
+
228
+ puts 'Company data structure:'
229
+ puts JSON.pretty_generate(company_result)
230
+
231
+ puts "\n=== Using with LLM Conductor ==="
232
+
233
+ # Example of using the built data with LLM Conductor
234
+ # response = LlmConductor.generate(
235
+ # model: 'gpt-3.5-turbo',
236
+ # data: company_result,
237
+ # type: :company_analysis
238
+ # )
239
+
240
+ puts 'Built data ready for LLM consumption:'
241
+ puts "- Company: #{company_result[:name]}"
242
+ puts "- Industry: #{company_result[:industry]}"
243
+ puts "- Summary: #{company_result[:summary]}"
244
+
245
+ puts "\n=== User Profile Builder Example ==="
246
+
247
+ # Mock user object
248
+ user_data = OpenStruct.new(
249
+ first_name: 'John',
250
+ last_name: 'Doe',
251
+ email: 'john.doe@example.com',
252
+ birth_date: Time.zone.parse('1985-03-15'),
253
+ city: 'Austin',
254
+ state: 'Texas',
255
+ country: 'USA',
256
+ job_title: 'Senior Software Engineer',
257
+ company_name: 'TechCorp',
258
+ years_experience: 8,
259
+ skills: %w[Ruby Rails JavaScript React PostgreSQL],
260
+ created_at: Time.zone.parse('2020-01-15'),
261
+ last_login_at: Time.zone.now - (2 * 24 * 60 * 60), # 2 days ago
262
+ preferences: {
263
+ 'communication' => 'Email',
264
+ 'notifications' => true,
265
+ 'privacy' => 'High'
266
+ },
267
+ profile: {
268
+ 'professional' => {
269
+ 'industry' => 'Technology'
270
+ }
271
+ }
272
+ )
273
+
274
+ user_builder = UserProfileBuilder.new(user_data)
275
+ user_result = user_builder.build
276
+
277
+ puts 'User profile structure:'
278
+ puts JSON.pretty_generate(user_result)
279
+
280
+ puts "\n=== Error Handling Example ==="
281
+
282
+ # Example with missing/nil data
283
+ incomplete_company = OpenStruct.new(
284
+ id: 2,
285
+ name: 'StartupCorp',
286
+ domain_name: nil,
287
+ description: '',
288
+ employee_count: nil,
289
+ revenue: 0,
290
+ city: 'New York'
291
+ # Missing many other fields
292
+ )
293
+
294
+ incomplete_builder = CompanyDataBuilder.new(incomplete_company)
295
+ incomplete_result = incomplete_builder.build
296
+
297
+ puts 'Handling incomplete data:'
298
+ puts "- Revenue: #{incomplete_result[:metrics][:revenue]}"
299
+ puts "- Industry: #{incomplete_result[:industry]}"
300
+ puts "- Location: #{incomplete_result[:location]}"
301
+ puts "- Employee Count: #{incomplete_result[:metrics][:employees]}"
@@ -0,0 +1,133 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example of the new prompt registration system
5
+ require_relative '../lib/llm_conductor'
6
+
7
+ # Configure the gem
8
+ LlmConductor.configure do |config|
9
+ config.default_model = 'gpt-3.5-turbo'
10
+ config.openai(api_key: ENV['OPENAI_API_KEY'])
11
+ end
12
+
13
+ # Define a custom prompt class
14
+ class CompanyAnalysisPrompt < LlmConductor::Prompts::BasePrompt
15
+ def render
16
+ <<~PROMPT
17
+ Company: #{name}
18
+ Domain: #{domain_name}
19
+ Description: #{truncate_text(description, max_length: 1000)}
20
+
21
+ Please analyze this company and provide:
22
+ 1. Core business model
23
+ 2. Target market
24
+ 3. Competitive advantages
25
+ 4. Growth potential
26
+
27
+ Format as JSON with the following structure:
28
+ {
29
+ "business_model": "description",
30
+ "target_market": "description",#{' '}
31
+ "competitive_advantages": ["advantage1", "advantage2"],
32
+ "growth_potential": "high|medium|low"
33
+ }
34
+ PROMPT
35
+ end
36
+ end
37
+
38
+ # Define another prompt for summarization
39
+ class CompanySummaryPrompt < LlmConductor::Prompts::BasePrompt
40
+ def render
41
+ <<~PROMPT
42
+ Company Information:
43
+ #{company_details}
44
+
45
+ Please provide a brief summary of this company in #{word_limit || 100} words or less.
46
+ Focus on:
47
+ #{bulleted_list(focus_areas || ['Main business', 'Key products/services', 'Market position'])}
48
+ PROMPT
49
+ end
50
+
51
+ private
52
+
53
+ def company_details
54
+ details = []
55
+ details << "Name: #{name}" if name
56
+ details << "Domain: #{domain_name}" if domain_name
57
+ details << "Description: #{description}" if description
58
+ details << "Industry: #{industry}" if industry
59
+ details.join("\n")
60
+ end
61
+ end
62
+
63
+ # Register the prompt classes
64
+ LlmConductor::PromptManager.register(:detailed_analysis, CompanyAnalysisPrompt)
65
+ LlmConductor::PromptManager.register(:company_summary, CompanySummaryPrompt)
66
+
67
+ # Example company data
68
+ company_data = {
69
+ name: 'TechCorp',
70
+ domain_name: 'techcorp.com',
71
+ description: 'A leading technology company specializing in artificial intelligence and machine learning ' \
72
+ 'solutions for enterprise clients. We help businesses automate their processes and make ' \
73
+ 'data-driven decisions through our cutting-edge AI platform.',
74
+ industry: 'Technology',
75
+ founded_year: 2020
76
+ }
77
+
78
+ puts '=== Example 1: Detailed Analysis ==='
79
+
80
+ # Use the registered prompt
81
+ response = LlmConductor.generate(
82
+ model: 'gpt-3.5-turbo',
83
+ data: company_data,
84
+ type: :detailed_analysis
85
+ )
86
+
87
+ puts 'Generated Prompt Preview:'
88
+ puts LlmConductor::PromptManager.render(:detailed_analysis, company_data)
89
+ puts "\n#{'=' * 50}\n"
90
+
91
+ if response.success?
92
+ puts "Response: #{response.output}"
93
+
94
+ # Try to parse as JSON
95
+ begin
96
+ analysis = response.parse_json
97
+ puts "\nParsed Analysis:"
98
+ puts "Business Model: #{analysis['business_model']}"
99
+ puts "Target Market: #{analysis['target_market']}"
100
+ puts "Growth Potential: #{analysis['growth_potential']}"
101
+ rescue JSON::ParserError => e
102
+ puts "Note: Response is not valid JSON: #{e.message}"
103
+ end
104
+ else
105
+ puts "Error: #{response.metadata[:error]}"
106
+ end
107
+
108
+ puts "\n=== Example 2: Company Summary ==="
109
+
110
+ # Use the summary prompt with custom parameters
111
+ summary_data = company_data.merge(
112
+ word_limit: 50,
113
+ focus_areas: ['AI capabilities', 'Client base', 'Innovation']
114
+ )
115
+
116
+ response = LlmConductor.generate(
117
+ model: 'gpt-3.5-turbo',
118
+ data: summary_data,
119
+ type: :company_summary
120
+ )
121
+
122
+ puts 'Generated Prompt Preview:'
123
+ puts LlmConductor::PromptManager.render(:company_summary, summary_data)
124
+ puts "\n#{'=' * 50}\n"
125
+
126
+ if response.success?
127
+ puts "Summary: #{response.output}"
128
+ else
129
+ puts "Error: #{response.metadata[:error]}"
130
+ end
131
+
132
+ puts "\n=== Available Prompt Types ==="
133
+ puts "Registered types: #{LlmConductor::PromptManager.types.join(', ')}"
@@ -0,0 +1,108 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example of RAG (Retrieval-Augmented Generation) usage with LlmConductor
5
+ require_relative '../lib/llm_conductor'
6
+
7
+ # Configure the gem
8
+ LlmConductor.configure do |config|
9
+ config.default_model = 'gpt-3.5-turbo'
10
+ config.openai(api_key: ENV['OPENAI_API_KEY'])
11
+ end
12
+
13
+ # RAG Example 1: Using template-based generation with structured data
14
+ puts '=== RAG Example 1: Template-based with structured data ==='
15
+
16
+ # Simulate retrieved context from your knowledge base
17
+ retrieved_context = {
18
+ title: 'Quantum Computing Overview',
19
+ content: 'Quantum computing leverages quantum mechanical phenomena like superposition and entanglement...',
20
+ source: 'Scientific Journal of Computing, 2024',
21
+ relevance_score: 0.95
22
+ }
23
+
24
+ # Use the legacy template approach for structured RAG
25
+ result = LlmConductor.generate(
26
+ model: 'gpt-3.5-turbo',
27
+ type: :custom,
28
+ data: {
29
+ template: "Based on the following context: {{context}}\n\nAnswer this question: {{question}}",
30
+ context: "#{retrieved_context[:title]}: #{retrieved_context[:content]} (Source: #{retrieved_context[:source]})",
31
+ question: 'What are the key principles of quantum computing?'
32
+ }
33
+ )
34
+
35
+ puts 'Template-based RAG Response:'
36
+ puts result[:output] if result[:output]
37
+ puts "Tokens: #{result[:input_tokens]} + #{result[:output_tokens]} = #{result[:input_tokens] + result[:output_tokens]}"
38
+ puts
39
+
40
+ # RAG Example 2: Using simple prompt generation with embedded context
41
+ puts '=== RAG Example 2: Simple prompt with embedded context ==='
42
+
43
+ # Build your RAG prompt manually for maximum control
44
+ rag_prompt = <<~PROMPT
45
+ You are an expert assistant. Use the following context to answer the question accurately.
46
+
47
+ Context:
48
+ #{retrieved_context[:content]}
49
+ Source: #{retrieved_context[:source]}
50
+
51
+ Question: What are the practical applications of quantum computing?
52
+
53
+ Please provide a comprehensive answer based on the context above.
54
+ PROMPT
55
+
56
+ # Use the new simple API
57
+ response = LlmConductor.generate(
58
+ model: 'gpt-3.5-turbo',
59
+ prompt: rag_prompt
60
+ )
61
+
62
+ puts 'Simple RAG Response:'
63
+ puts response.output if response.success?
64
+ puts "Tokens used: #{response.total_tokens}"
65
+ puts "Estimated cost: $#{response.estimated_cost}"
66
+ puts
67
+
68
+ # RAG Example 3: Multi-document RAG with multiple contexts
69
+ puts '=== RAG Example 3: Multi-document RAG ==='
70
+
71
+ contexts = [
72
+ 'Quantum computers use qubits that can exist in superposition states...',
73
+ 'Current quantum computers are limited by decoherence and error rates...',
74
+ 'Major companies like IBM, Google, and Microsoft are investing heavily in quantum research...'
75
+ ]
76
+
77
+ multi_doc_prompt = <<~PROMPT
78
+ Based on the following multiple sources, provide a comprehensive answer:
79
+
80
+ #{contexts.map.with_index { |ctx, i| "Source #{i + 1}: #{ctx}" }.join("\n\n")}
81
+
82
+ Question: What is the current state and future outlook of quantum computing?
83
+ PROMPT
84
+
85
+ response = LlmConductor.generate(prompt: multi_doc_prompt)
86
+
87
+ puts 'Multi-document RAG Response:'
88
+ puts response.output if response.success?
89
+ puts "Success: #{response.success?}"
90
+ puts
91
+
92
+ # RAG Example 4: Error handling in RAG scenarios
93
+ puts '=== RAG Example 4: Error handling ==='
94
+
95
+ begin
96
+ response = LlmConductor.generate(
97
+ model: 'invalid-model',
98
+ prompt: 'This should fail gracefully'
99
+ )
100
+
101
+ if response.success?
102
+ puts response.output
103
+ else
104
+ puts "RAG Error handled gracefully: #{response.metadata[:error]}"
105
+ end
106
+ rescue StandardError => e
107
+ puts "Exception in RAG: #{e.message}"
108
+ end
@@ -0,0 +1,48 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Example of simple LlmConductor usage
5
+ require_relative '../lib/llm_conductor'
6
+
7
+ # Configure the gem (optional - will use environment variables by default)
8
+ LlmConductor.configure do |config|
9
+ config.default_model = 'gpt-3.5-turbo'
10
+ config.openai(api_key: ENV['OPENAI_API_KEY'])
11
+ end
12
+
13
+ # Simple text generation
14
+ response = LlmConductor.generate(
15
+ model: 'gpt-3.5-turbo',
16
+ prompt: 'Explain quantum computing in simple terms'
17
+ )
18
+
19
+ puts response.output
20
+ puts "Tokens used: #{response.total_tokens}"
21
+ puts "Cost: $#{response.estimated_cost}" if response.estimated_cost
22
+
23
+ # Example with default model (from configuration)
24
+ response2 = LlmConductor.generate(
25
+ prompt: 'What are the benefits of renewable energy?'
26
+ )
27
+
28
+ puts "\n--- Second Example ---"
29
+ puts response2.output
30
+ puts "Input tokens: #{response2.input_tokens}"
31
+ puts "Output tokens: #{response2.output_tokens}"
32
+ puts "Success: #{response2.success?}"
33
+
34
+ # Example with error handling
35
+ begin
36
+ response3 = LlmConductor.generate(
37
+ model: 'invalid-model',
38
+ prompt: 'This should fail'
39
+ )
40
+
41
+ if response3.success?
42
+ puts response3.output
43
+ else
44
+ puts "Error: #{response3.metadata[:error]}"
45
+ end
46
+ rescue StandardError => e
47
+ puts "Exception: #{e.message}"
48
+ end
@@ -0,0 +1,33 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LlmConductor
4
+ # Factory class for creating appropriate LLM client instances based on model and vendor
5
+ class ClientFactory
6
+ def self.build(model:, type:, vendor: nil)
7
+ vendor ||= determine_vendor(model)
8
+
9
+ client_class = case vendor
10
+ when :openai, :gpt
11
+ Clients::GptClient
12
+ when :openrouter
13
+ Clients::OpenrouterClient
14
+ when :ollama
15
+ Clients::OllamaClient
16
+ else
17
+ raise ArgumentError,
18
+ "Unsupported vendor: #{vendor}. Supported vendors: openai, openrouter, ollama"
19
+ end
20
+
21
+ client_class.new(model:, type:)
22
+ end
23
+
24
+ def self.determine_vendor(model)
25
+ case model
26
+ when /^gpt/i
27
+ :openai
28
+ else
29
+ :ollama # Default to Ollama for non-specific model names
30
+ end
31
+ end
32
+ end
33
+ end