llm_conductor 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 94848876574eca5294236d61d79175b6e28fc918c6bcbd0af969d70fc50cc6c8
4
+ data.tar.gz: 18865a4228201006c9ebf84e0aac4bd6b946e1c551a121e323eed827477bb3ca
5
+ SHA512:
6
+ metadata.gz: f16ee4255f54f3daef35a2557cd19fc171dbc238c2525475d95c3209ca5771f07f9ccd7d6e485f6adef979625008c0fb360d5705adea23bd14d987f051b236c0
7
+ data.tar.gz: 62f464b4dcc76ce676f735db190e9a16ba9bee5f10b4dbdbd4c212badbfb7c8a736465bc40cd8228c11e2e1fed6f7afe823d05dfcd2dfc6c0cb689870e0e4acc
data/.DS_Store ADDED
Binary file
data/.rspec ADDED
@@ -0,0 +1,4 @@
1
+ --format documentation
2
+ --color
3
+ --require spec_helper
4
+ --exclude-pattern "cache/**/*"
data/.rubocop.yml ADDED
@@ -0,0 +1,103 @@
1
+ # RuboCop configuration for GitLab CI
2
+ plugins:
3
+ - rubocop-performance
4
+ - rubocop-rake
5
+
6
+ require:
7
+ - rubocop-capybara
8
+ - rubocop-factory_bot
9
+ - rubocop-rspec
10
+
11
+ inherit_from: .rubocop_todo.yml
12
+
13
+ AllCops:
14
+ TargetRubyVersion: 3.4.1
15
+ NewCops: disable
16
+ SuggestExtensions: false
17
+ Exclude:
18
+ - 'cache/**/*'
19
+ - 'vendor/**/*'
20
+
21
+
22
+ Style/StringLiterals:
23
+ EnforcedStyle: single_quotes
24
+
25
+ Style/StringLiteralsInInterpolation:
26
+ EnforcedStyle: single_quotes
27
+
28
+ Style/HashSyntax:
29
+ EnforcedShorthandSyntax: always
30
+ # This is not rails application.
31
+ # Rails/Blank:
32
+ # Enabled: false
33
+
34
+ # Rails/Present:
35
+ # Enabled: false
36
+
37
+ # Rails/TimeZone:
38
+ # Enabled: false
39
+
40
+ Lint/ConstantDefinitionInBlock:
41
+ Enabled: false
42
+
43
+ Metrics/MethodLength:
44
+ Max: 15
45
+
46
+ RSpec/ExampleLength:
47
+ Enabled: false
48
+
49
+ RSpec/MultipleExpectations:
50
+ Enabled: false
51
+
52
+ RSpec/MultipleMemoizedHelpers:
53
+ Enabled: false
54
+
55
+ RSpec/NestedGroups:
56
+ Enabled: false
57
+
58
+ RSpec/ContextWording:
59
+ Enabled: false
60
+
61
+ RSpec/DescribeClass:
62
+ Enabled: false
63
+
64
+ RSpec/MultipleDescribes:
65
+ Enabled: false
66
+
67
+ RSpec/SpecFilePathFormat:
68
+ Enabled: false
69
+
70
+ RSpec/FilePath:
71
+ Enabled: false
72
+
73
+ RSpec/UnspecifiedException:
74
+ Enabled: false
75
+
76
+ RSpec/VerifiedDoubles:
77
+ Enabled: false
78
+
79
+ RSpec/LeakyConstantDeclaration:
80
+ Enabled: false
81
+
82
+ RSpec/DescribeMethod:
83
+ Enabled: false
84
+
85
+ # Disable Ruby version mismatch warning
86
+ Gemspec/RequiredRubyVersion:
87
+ Enabled: false
88
+
89
+ # GitLab CI specific - generate JSON report
90
+ Style/Documentation:
91
+ Enabled: false
92
+
93
+ Metrics/BlockLength:
94
+ Exclude:
95
+ - '*.gemspec'
96
+
97
+ Layout/LineLength:
98
+ Max: 120
99
+
100
+ # Performance cops (from .rubocop_todo.yml)
101
+ Performance/RedundantEqualityComparisonBlock:
102
+ Exclude:
103
+ - 'spec/llm_conductor/performance_spec.rb'
data/.rubocop_todo.yml ADDED
@@ -0,0 +1,54 @@
1
+ # This configuration was generated by
2
+ # `rubocop --auto-gen-config`
3
+ # on 2024-09-12 using RuboCop version 1.80.2.
4
+ # The point is for the user to remove these configuration records
5
+ # one by one as the offenses are removed from the code base.
6
+ # Note that changes in the inspected code, or installation of new
7
+ # versions of RuboCop, may require this file to be generated again.
8
+
9
+ # Offense count: 10
10
+ # This cop supports safe autocorrection (--autocorrect).
11
+ # Configuration parameters: CheckForMethodsWithNoSideEffects.
12
+ Performance/RedundantEqualityComparisonBlock:
13
+ Exclude:
14
+ - 'spec/llm_conductor/performance_spec.rb'
15
+
16
+ # Offense count: 2
17
+ # Configuration parameters: .
18
+ RSpec/ContextWording:
19
+ Exclude:
20
+ - 'spec/llm_conductor/client_factory_spec.rb'
21
+
22
+ # Offense count: 2
23
+ RSpec/DescribeClass:
24
+ Exclude:
25
+ - 'spec/llm_conductor/error_handling_spec.rb'
26
+ - 'spec/llm_conductor/performance_spec.rb'
27
+
28
+ # Offense count: 1
29
+ RSpec/MultipleDescribes:
30
+ Exclude:
31
+ - 'spec/llm_conductor/configuration_spec.rb'
32
+
33
+ # Offense count: 10
34
+ # Configuration parameters: Max, AllowSubject.
35
+ RSpec/NestedGroups:
36
+ Exclude:
37
+ - 'spec/llm_conductor/client_factory_spec.rb'
38
+ - 'spec/llm_conductor/integration_spec.rb'
39
+
40
+ # Offense count: 1
41
+ RSpec/SpecFilePathFormat:
42
+ Exclude:
43
+ - 'spec/llm_conductor/integration_spec.rb'
44
+
45
+ # Offense count: 1
46
+ RSpec/UnspecifiedException:
47
+ Exclude:
48
+ - 'spec/llm_conductor/error_handling_spec.rb'
49
+
50
+ # Offense count: 33
51
+ # Configuration parameters: EnforcedStyle, IgnoreNameless, IgnoreSymbolicNames.
52
+ # SupportedStyles: strict, flexible
53
+ RSpec/VerifiedDoubles:
54
+ Enabled: false
data/.ruby-version ADDED
@@ -0,0 +1 @@
1
+ 3.4.1
data/README.md ADDED
@@ -0,0 +1,413 @@
1
+ # LLM Conductor
2
+
3
+ A powerful Ruby gem for orchestrating multiple Language Model providers with a unified, modern interface. LLM Conductor provides seamless integration with OpenAI GPT and Ollama with advanced prompt management, data building patterns, and comprehensive response handling.
4
+
5
+ ## Features
6
+
7
+ 🚀 **Multi-Provider Support** - OpenAI GPT and Ollama with automatic vendor detection
8
+ 🎯 **Unified Modern API** - Simple `LlmConductor.generate()` interface with rich Response objects
9
+ 📝 **Advanced Prompt Management** - Registrable prompt classes with inheritance and templating
10
+ 🏗️ **Data Builder Pattern** - Structured data preparation for complex LLM inputs
11
+ ⚡ **Smart Configuration** - Rails-style configuration with environment variable support
12
+ 💰 **Cost Tracking** - Automatic token counting and cost estimation
13
+ 🔧 **Extensible Architecture** - Easy to add new providers and prompt types
14
+ 🛡️ **Robust Error Handling** - Comprehensive error handling with detailed metadata
15
+
16
+ ## Installation
17
+
18
+ Add this line to your application's Gemfile:
19
+
20
+ ```ruby
21
+ gem 'llm_conductor'
22
+ ```
23
+
24
+ And then execute:
25
+
26
+ ```bash
27
+ $ bundle install
28
+ ```
29
+
30
+ Or install it yourself as:
31
+
32
+ ```bash
33
+ $ gem install llm_conductor
34
+ ```
35
+
36
+ ## Quick Start
37
+
38
+ ### 1. Simple Text Generation
39
+
40
+ ```ruby
41
+ # Direct prompt generation - easiest way to get started
42
+ response = LlmConductor.generate(
43
+ model: 'gpt-5-mini',
44
+ prompt: 'Explain quantum computing in simple terms'
45
+ )
46
+
47
+ puts response.output # The generated text
48
+ puts response.total_tokens # Token usage
49
+ puts response.estimated_cost # Cost in USD
50
+ ```
51
+
52
+ ### 2. Template-Based Generation
53
+
54
+ ```ruby
55
+ # Use built-in templates with structured data
56
+ response = LlmConductor.generate(
57
+ model: 'gpt-5-mini',
58
+ type: :summarize_description,
59
+ data: {
60
+ name: 'TechCorp',
61
+ domain_name: 'techcorp.com',
62
+ description: 'An AI company specializing in...'
63
+ }
64
+ )
65
+
66
+ # Response object provides rich information
67
+ if response.success?
68
+ puts "Generated: #{response.output}"
69
+ puts "Tokens: #{response.total_tokens}"
70
+ puts "Cost: $#{response.estimated_cost}"
71
+ else
72
+ puts "Error: #{response.metadata[:error]}"
73
+ end
74
+ ```
75
+
76
+ ## Configuration
77
+
78
+ ### Rails-Style Configuration
79
+
80
+ Create `config/initializers/llm_conductor.rb` (Rails) or configure in your application:
81
+
82
+ ```ruby
83
+ LlmConductor.configure do |config|
84
+ # Default settings
85
+ config.default_model = 'gpt-5-mini'
86
+ config.default_vendor = :openai
87
+ config.timeout = 30
88
+ config.max_retries = 3
89
+ config.retry_delay = 1.0
90
+
91
+ # Provider configurations
92
+ config.openai(
93
+ api_key: ENV['OPENAI_API_KEY'],
94
+ organization: ENV['OPENAI_ORG_ID'] # Optional
95
+ )
96
+
97
+ config.ollama(
98
+ base_url: ENV['OLLAMA_ADDRESS'] || 'http://localhost:11434'
99
+ )
100
+ end
101
+ ```
102
+
103
+ ### Environment Variables
104
+
105
+ The gem automatically detects these environment variables:
106
+
107
+ - `OPENAI_API_KEY` - OpenAI API key
108
+ - `OPENAI_ORG_ID` - OpenAI organization ID (optional)
109
+ - `OLLAMA_ADDRESS` - Ollama server address
110
+
111
+ ## Supported Providers & Models
112
+
113
+ ### OpenAI (Automatic for GPT models)
114
+ ```ruby
115
+ response = LlmConductor.generate(
116
+ model: 'gpt-5-mini', # Auto-detects OpenAI
117
+ prompt: 'Your prompt here'
118
+ )
119
+ ```
120
+
121
+ ### Ollama (Default for non-GPT models)
122
+ ```ruby
123
+ response = LlmConductor.generate(
124
+ model: 'llama3.2', # Auto-detects Ollama for non-GPT models
125
+ prompt: 'Your prompt here'
126
+ )
127
+ ```
128
+
129
+ ## Advanced Features
130
+
131
+ ### 1. Custom Prompt Registration
132
+
133
+ Create reusable, testable prompt classes:
134
+
135
+ ```ruby
136
+ class CompanyAnalysisPrompt < LlmConductor::Prompts::BasePrompt
137
+ def render
138
+ <<~PROMPT
139
+ Company: #{name}
140
+ Domain: #{domain_name}
141
+ Description: #{truncate_text(description, max_length: 1000)}
142
+
143
+ Please analyze this company and provide:
144
+ 1. Core business model
145
+ 2. Target market
146
+ 3. Competitive advantages
147
+ 4. Growth potential
148
+
149
+ Format as JSON.
150
+ PROMPT
151
+ end
152
+ end
153
+
154
+ # Register the prompt
155
+ LlmConductor::PromptManager.register(:detailed_analysis, CompanyAnalysisPrompt)
156
+
157
+ # Use the registered prompt
158
+ response = LlmConductor.generate(
159
+ model: 'gpt-5-mini',
160
+ type: :detailed_analysis,
161
+ data: {
162
+ name: 'TechCorp',
163
+ domain_name: 'techcorp.com',
164
+ description: 'A leading AI company...'
165
+ }
166
+ )
167
+
168
+ # Parse structured responses
169
+ analysis = response.parse_json
170
+ puts analysis
171
+ ```
172
+
173
+ ### 2. Data Builder Pattern
174
+
175
+ Structure complex data for LLM consumption:
176
+
177
+ ```ruby
178
+ class CompanyDataBuilder < LlmConductor::DataBuilder
179
+ def build
180
+ {
181
+ id: source_object.id,
182
+ name: source_object.name,
183
+ description: format_for_llm(source_object.description, max_length: 500),
184
+ industry: extract_nested_data(:data, 'categories', 'primary'),
185
+ metrics: build_metrics,
186
+ summary: build_company_summary,
187
+ domain_name: source_object.domain_name
188
+
189
+ }
190
+ end
191
+
192
+ private
193
+
194
+ def build_metrics
195
+ {
196
+ employees: format_number(source_object.employee_count),
197
+ revenue: format_number(source_object.annual_revenue),
198
+ growth_rate: "#{source_object.growth_rate}%"
199
+ }
200
+ end
201
+
202
+ def build_company_summary
203
+ name = safe_extract(:name, default: 'Company')
204
+ industry = extract_nested_data(:data, 'categories', 'primary')
205
+ "#{name} is a #{industry} company..."
206
+ end
207
+ end
208
+
209
+ # Usage
210
+ company = Company.find(123)
211
+ data = CompanyDataBuilder.new(company).build
212
+
213
+ response = LlmConductor.generate(
214
+ model: 'gpt-5-mini',
215
+ type: :detailed_analysis,
216
+ data: data
217
+ )
218
+ ```
219
+
220
+ ### 3. Built-in Prompt Templates
221
+
222
+ #### Featured Links Extraction
223
+ ```ruby
224
+ response = LlmConductor.generate(
225
+ model: 'gpt-5-mini',
226
+ type: :featured_links,
227
+ data: {
228
+ htmls: '<html>...</html>',
229
+ current_url: 'https://example.com'
230
+ }
231
+ )
232
+ ```
233
+
234
+ #### HTML Summarization
235
+ ```ruby
236
+ response = LlmConductor.generate(
237
+ model: 'gpt-5-mini',
238
+ type: :summarize_htmls,
239
+ data: { htmls: '<html>...</html>' }
240
+ )
241
+ ```
242
+
243
+ #### Description Summarization
244
+ ```ruby
245
+ response = LlmConductor.generate(
246
+ model: 'gpt-5-mini',
247
+ type: :summarize_description,
248
+ data: {
249
+ name: 'Company Name',
250
+ description: 'Long description...',
251
+ industries: ['Tech', 'AI']
252
+ }
253
+ )
254
+ ```
255
+
256
+ #### Custom Templates
257
+ ```ruby
258
+ response = LlmConductor.generate(
259
+ model: 'gpt-5-mini',
260
+ type: :custom,
261
+ data: {
262
+ template: "Analyze this data: %{data}",
263
+ data: "Your data here"
264
+ }
265
+ )
266
+ ```
267
+
268
+ ### 4. Response Object
269
+
270
+ All methods return a rich `LlmConductor::Response` object:
271
+
272
+ ```ruby
273
+ response = LlmConductor.generate(...)
274
+
275
+ # Main content
276
+ response.output # Generated text
277
+ response.success? # Boolean success status
278
+
279
+ # Token information
280
+ response.input_tokens # Input tokens used
281
+ response.output_tokens # Output tokens generated
282
+ response.total_tokens # Total tokens
283
+
284
+ # Cost tracking (for supported models)
285
+ response.estimated_cost # Estimated cost in USD
286
+
287
+ # Metadata
288
+ response.model # Model used
289
+ response.metadata # Hash with vendor, timestamp, etc.
290
+
291
+ # Structured data parsing
292
+ response.parse_json # Parse as JSON
293
+ response.extract_code_block('json') # Extract code blocks
294
+ ```
295
+
296
+ ### 5. Error Handling
297
+
298
+ The gem provides comprehensive error handling:
299
+
300
+ ```ruby
301
+ response = LlmConductor.generate(
302
+ model: 'gpt-5-mini',
303
+ prompt: 'Your prompt'
304
+ )
305
+
306
+ if response.success?
307
+ puts response.output
308
+ else
309
+ puts "Error: #{response.metadata[:error]}"
310
+ puts "Failed model: #{response.model}"
311
+ end
312
+
313
+ # Exception handling for critical errors
314
+ begin
315
+ response = LlmConductor.generate(...)
316
+ rescue LlmConductor::Error => e
317
+ puts "LLM Conductor error: #{e.message}"
318
+ rescue StandardError => e
319
+ puts "General error: #{e.message}"
320
+ end
321
+ ```
322
+
323
+ ## Extending the Gem
324
+
325
+ ### Adding Custom Clients
326
+
327
+ ```ruby
328
+ module LlmConductor
329
+ module Clients
330
+ class CustomClient < BaseClient
331
+ private
332
+
333
+ def generate_content(prompt)
334
+ # Implement your provider's API call
335
+ your_custom_api.generate(prompt)
336
+ end
337
+ end
338
+ end
339
+ end
340
+ ```
341
+
342
+ ### Adding Prompt Types
343
+
344
+ ```ruby
345
+ module LlmConductor
346
+ module Prompts
347
+ def prompt_custom_analysis(data)
348
+ <<~PROMPT
349
+ Custom analysis for: #{data[:subject]}
350
+ Context: #{data[:context]}
351
+
352
+ Please provide detailed analysis.
353
+ PROMPT
354
+ end
355
+ end
356
+ end
357
+ ```
358
+
359
+ ## Examples
360
+
361
+ Check the `/examples` directory for comprehensive usage examples:
362
+
363
+ - `simple_usage.rb` - Basic text generation
364
+ - `prompt_registration.rb` - Custom prompt classes
365
+ - `data_builder_usage.rb` - Data structuring patterns
366
+ - `rag_usage.rb` - RAG implementation examples
367
+
368
+ ## Development
369
+
370
+ After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests.
371
+
372
+ ```bash
373
+ # Install dependencies
374
+ bin/setup
375
+
376
+ # Run tests
377
+ rake spec
378
+
379
+ # Run RuboCop
380
+ rubocop
381
+
382
+ # Interactive console
383
+ bin/console
384
+ ```
385
+
386
+ ## Testing
387
+
388
+ The gem includes comprehensive test coverage with unit, integration, and performance tests. See `spec/TESTING_GUIDE.md` for detailed testing information.
389
+
390
+ ## Performance
391
+
392
+ - **Token Efficiency**: Automatic prompt optimization and token counting
393
+ - **Cost Tracking**: Real-time cost estimation for all supported models
394
+ - **Response Caching**: Built-in mechanisms to avoid redundant API calls
395
+ - **Async Support**: Ready for async/background processing
396
+
397
+ ## Contributing
398
+
399
+ Bug reports and pull requests are welcome on GitHub at https://github.com/ekohe/llm_conductor.
400
+
401
+ 1. Fork the repository
402
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
403
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
404
+ 4. Push to the branch (`git push origin my-new-feature`)
405
+ 5. Create a new Pull Request
406
+
407
+ ## License
408
+
409
+ The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
410
+
411
+ ## Code of Conduct
412
+
413
+ This project is intended to be a safe, welcoming space for collaboration. Contributors are expected to adhere to the [code of conduct](https://github.com/ekohe/llm_conductor/blob/main/CODE_OF_CONDUCT.md).
data/Rakefile ADDED
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/gem_tasks'
4
+ require 'rspec/core/rake_task'
5
+
6
+ RSpec::Core::RakeTask.new(:spec)
7
+
8
+ require 'rubocop/rake_task'
9
+
10
+ RuboCop::RakeTask.new
11
+
12
+ task default: %i[spec rubocop]
@@ -0,0 +1,27 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Example configuration file for LlmConductor
4
+ # Place this file in config/initializers/llm_conductor.rb in Rails applications
5
+
6
+ LlmConductor.configure do |config|
7
+ # Default settings
8
+ config.default_model = 'gpt-5-mini'
9
+ config.default_vendor = :openai
10
+ config.timeout = 30
11
+ config.max_retries = 3
12
+ config.retry_delay = 1.0
13
+
14
+ # Configure providers
15
+ config.openai(
16
+ api_key: ENV['OPENAI_API_KEY'],
17
+ organization: ENV['OPENAI_ORG_ID'] # optional
18
+ )
19
+
20
+ config.ollama(
21
+ base_url: ENV['OLLAMA_ADDRESS'] || 'http://localhost:11434'
22
+ )
23
+
24
+ config.openrouter(
25
+ api_key: ENV['OPENROUTER_API_KEY']
26
+ )
27
+ end