llm_orchestrator 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +9 -0
- data/LICENSE.txt +21 -0
- data/README.md +115 -0
- data/lib/llm_orchestrator/chain.rb +25 -0
- data/lib/llm_orchestrator/llm.rb +63 -0
- data/lib/llm_orchestrator/memory.rb +37 -0
- data/lib/llm_orchestrator/prompt.rb +24 -0
- data/lib/llm_orchestrator/version.rb +5 -0
- data/lib/llm_orchestrator.rb +30 -0
- metadata +123 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: b62d87e5a08bd0e6b5f0f8093a02f6ef0943b619bd0819c8f5ccb9ebfdd7ad4f
|
4
|
+
data.tar.gz: fc1e95d85209765113ad84ffb550d5c017b3dfef5536583ee777f1cc911f3a7b
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 541b681b947fc37b858bed7664ea0d099cd9fda4d3ae41858e0038f805af00def623dd432e920cb5e66b22ded2eaad55447495ad068e03b0d48e9ef00b418cd2
|
7
|
+
data.tar.gz: 2dc56f94da4305a0ad3ff32ae6504d3a7328b2fd67d059644d96d0ecdbf9efeee79d4528c51eb397062bd9f2a941d33c05fadf4510a33c1c3035b483a439bf68
|
data/CHANGELOG.md
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2025 TODO: Write your name
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,115 @@
|
|
1
|
+
# LlmOrchestrator
|
2
|
+
|
3
|
+
A lightweight Ruby framework for orchestrating LLM operations with OpenAI and Anthropic Claude. This gem provides a simple way to:
|
4
|
+
|
5
|
+
- Manage prompt templates
|
6
|
+
- Chain LLM operations
|
7
|
+
- Maintain conversation context
|
8
|
+
- Switch between OpenAI and Claude providers
|
9
|
+
|
10
|
+
## Installation
|
11
|
+
|
12
|
+
Add this line to your application's Gemfile:
|
13
|
+
|
14
|
+
```ruby
|
15
|
+
gem 'llm_orchestrator'
|
16
|
+
```
|
17
|
+
|
18
|
+
And then execute:
|
19
|
+
|
20
|
+
```bash
|
21
|
+
$ bundle install
|
22
|
+
```
|
23
|
+
|
24
|
+
## Configuration
|
25
|
+
|
26
|
+
Configure your API keys:
|
27
|
+
|
28
|
+
```ruby
|
29
|
+
LlmOrchestrator.configure do |config|
|
30
|
+
config.openai_api_key = ENV['OPENAI_API_KEY']
|
31
|
+
config.claude_api_key = ENV['CLAUDE_API_KEY']
|
32
|
+
config.default_llm_provider = :claude # or :openai
|
33
|
+
end
|
34
|
+
```
|
35
|
+
|
36
|
+
## Basic Usage
|
37
|
+
|
38
|
+
### Prompt Templates
|
39
|
+
|
40
|
+
```ruby
|
41
|
+
# Create and use a prompt template
|
42
|
+
prompt = LlmOrchestrator::Prompt.new("Answer this {type} question: {question}")
|
43
|
+
formatted = prompt.format(
|
44
|
+
type: "math",
|
45
|
+
question: "What is 2+2?"
|
46
|
+
)
|
47
|
+
```
|
48
|
+
|
49
|
+
### LLM Providers
|
50
|
+
|
51
|
+
```ruby
|
52
|
+
# Using OpenAI
|
53
|
+
openai = LlmOrchestrator::OpenAI.new
|
54
|
+
response = openai.generate("What is 2+2?", model: "gpt-3.5-turbo")
|
55
|
+
|
56
|
+
# Using Claude
|
57
|
+
claude = LlmOrchestrator::Anthropic.new
|
58
|
+
response = claude.generate("What is 2+2?", model: "claude-3-opus-20240229")
|
59
|
+
```
|
60
|
+
|
61
|
+
### Chains with Memory
|
62
|
+
|
63
|
+
```ruby
|
64
|
+
# Create a chain with memory
|
65
|
+
memory = LlmOrchestrator::Memory.new
|
66
|
+
chain = LlmOrchestrator::Chain.new(memory: memory)
|
67
|
+
|
68
|
+
# Add processing steps
|
69
|
+
chain.add_step do |input, memory|
|
70
|
+
llm = LlmOrchestrator::Anthropic.new
|
71
|
+
llm.generate(input, context: memory.context_string)
|
72
|
+
end
|
73
|
+
|
74
|
+
# Run multiple interactions with context
|
75
|
+
result1 = chain.run("What is the capital of France?")
|
76
|
+
result2 = chain.run("What is its population?") # Uses previous context
|
77
|
+
|
78
|
+
# Clear memory when needed
|
79
|
+
chain.clear_memory
|
80
|
+
```
|
81
|
+
|
82
|
+
## Development
|
83
|
+
|
84
|
+
After checking out the repo, run:
|
85
|
+
|
86
|
+
```bash
|
87
|
+
$ bundle install
|
88
|
+
$ bundle exec rspec
|
89
|
+
```
|
90
|
+
|
91
|
+
The tests use VCR to record HTTP interactions. To run the tests with real API calls:
|
92
|
+
|
93
|
+
1. Set up your environment variables:
|
94
|
+
```bash
|
95
|
+
export OPENAI_API_KEY="your-key-here"
|
96
|
+
export CLAUDE_API_KEY="your-key-here"
|
97
|
+
```
|
98
|
+
|
99
|
+
2. Delete the VCR cassettes (if they exist):
|
100
|
+
```bash
|
101
|
+
rm -rf spec/fixtures/vcr_cassettes
|
102
|
+
```
|
103
|
+
|
104
|
+
3. Run the tests:
|
105
|
+
```bash
|
106
|
+
bundle exec rspec
|
107
|
+
```
|
108
|
+
|
109
|
+
## Contributing
|
110
|
+
|
111
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/llm_orchestrator.
|
112
|
+
|
113
|
+
## License
|
114
|
+
|
115
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module LlmOrchestrator
|
2
|
+
class Chain
|
3
|
+
def initialize(memory: nil)
|
4
|
+
@steps = []
|
5
|
+
@memory = memory || Memory.new
|
6
|
+
end
|
7
|
+
|
8
|
+
def add_step(&block)
|
9
|
+
@steps << block
|
10
|
+
self
|
11
|
+
end
|
12
|
+
|
13
|
+
def run(input)
|
14
|
+
@steps.reduce(input) do |result, step|
|
15
|
+
output = step.call(result, @memory)
|
16
|
+
@memory.add_message('assistant', output) if output.is_a?(String)
|
17
|
+
output
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def clear_memory
|
22
|
+
@memory.clear
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
require 'openai'
|
2
|
+
require 'anthropic'
|
3
|
+
require 'pry'
|
4
|
+
|
5
|
+
module LlmOrchestrator
|
6
|
+
class LLM
|
7
|
+
def initialize(api_key: nil)
|
8
|
+
@api_key = api_key
|
9
|
+
end
|
10
|
+
|
11
|
+
def generate(prompt, context: nil, **options)
|
12
|
+
raise NotImplementedError, "Subclasses must implement generate method"
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
class OpenAI < LLM
|
17
|
+
def initialize(api_key: nil)
|
18
|
+
super
|
19
|
+
@api_key ||= LlmOrchestrator.configuration.openai_api_key
|
20
|
+
@client = ::OpenAI::Client.new(access_token: @api_key)
|
21
|
+
end
|
22
|
+
|
23
|
+
def generate(prompt, context: nil, **options)
|
24
|
+
messages = []
|
25
|
+
messages << { role: 'system', content: context } if context
|
26
|
+
messages << { role: 'user', content: prompt }
|
27
|
+
|
28
|
+
response = @client.chat(
|
29
|
+
parameters: {
|
30
|
+
model: options[:model] || 'gpt-3.5-turbo',
|
31
|
+
messages: messages,
|
32
|
+
temperature: options[:temperature] || 0.7
|
33
|
+
}
|
34
|
+
)
|
35
|
+
|
36
|
+
response.dig('choices', 0, 'message', 'content')
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
class Anthropic < LLM
|
41
|
+
def initialize(api_key: nil)
|
42
|
+
super
|
43
|
+
@api_key ||= LlmOrchestrator.configuration.claude_api_key
|
44
|
+
@client = ::Anthropic::Client.new(access_token: @api_key)
|
45
|
+
end
|
46
|
+
|
47
|
+
def generate(prompt, context: nil, **options)
|
48
|
+
response = @client.messages(
|
49
|
+
parameters: {
|
50
|
+
model: options[:model] || 'claude-3-opus-20240229',
|
51
|
+
system: context,
|
52
|
+
messages: [
|
53
|
+
{ role: 'user', content: prompt }
|
54
|
+
],
|
55
|
+
temperature: options[:temperature] || 0.7,
|
56
|
+
max_tokens: options[:max_tokens] || 1000
|
57
|
+
}
|
58
|
+
)
|
59
|
+
|
60
|
+
response.content.first.text
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
module LlmOrchestrator
|
2
|
+
class Memory
|
3
|
+
attr_reader :messages
|
4
|
+
|
5
|
+
def initialize
|
6
|
+
@messages = []
|
7
|
+
@max_tokens = 2000 # Adjust based on your needs
|
8
|
+
end
|
9
|
+
|
10
|
+
def add_message(role, content)
|
11
|
+
@messages << { role: role, content: content }
|
12
|
+
trim_messages if exceeds_token_limit?
|
13
|
+
end
|
14
|
+
|
15
|
+
def clear
|
16
|
+
@messages.clear
|
17
|
+
end
|
18
|
+
|
19
|
+
def context_string
|
20
|
+
@messages.map { |msg| "#{msg[:role]}: #{msg[:content]}" }.join("\n")
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def exceeds_token_limit?
|
26
|
+
# Simple approximation: 4 chars ~= 1 token
|
27
|
+
total_chars = @messages.sum { |msg| msg[:content].length }
|
28
|
+
(total_chars / 4) > @max_tokens
|
29
|
+
end
|
30
|
+
|
31
|
+
def trim_messages
|
32
|
+
while exceeds_token_limit? && @messages.size > 1
|
33
|
+
@messages.shift # Remove oldest message
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
module LlmOrchestrator
|
2
|
+
class Prompt
|
3
|
+
attr_reader :template, :variables
|
4
|
+
|
5
|
+
def initialize(template)
|
6
|
+
@template = template
|
7
|
+
@variables = extract_variables(template)
|
8
|
+
end
|
9
|
+
|
10
|
+
def format(values = {})
|
11
|
+
result = template.dup
|
12
|
+
values.each do |key, value|
|
13
|
+
result.gsub!("{#{key}}", value.to_s)
|
14
|
+
end
|
15
|
+
result
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
def extract_variables(template)
|
21
|
+
template.scan(/\{(\w+)\}/).flatten.map(&:to_sym)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "llm_orchestrator/version"
|
4
|
+
require_relative "llm_orchestrator/prompt"
|
5
|
+
require_relative "llm_orchestrator/chain"
|
6
|
+
require_relative "llm_orchestrator/llm"
|
7
|
+
require_relative "llm_orchestrator/memory"
|
8
|
+
|
9
|
+
module LlmOrchestrator
|
10
|
+
class Error < StandardError; end
|
11
|
+
|
12
|
+
class << self
|
13
|
+
attr_accessor :configuration
|
14
|
+
|
15
|
+
def configure
|
16
|
+
self.configuration ||= Configuration.new
|
17
|
+
yield(configuration) if block_given?
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
class Configuration
|
22
|
+
attr_accessor :default_llm_provider, :openai_api_key, :claude_api_key
|
23
|
+
|
24
|
+
def initialize
|
25
|
+
@default_llm_provider = :openai
|
26
|
+
@openai_api_key = nil
|
27
|
+
@claude_api_key = nil
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
metadata
ADDED
@@ -0,0 +1,123 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: llm_orchestrator
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- "@aquaflamingo"
|
8
|
+
bindir: bin
|
9
|
+
cert_chain: []
|
10
|
+
date: 2025-01-15 00:00:00.000000000 Z
|
11
|
+
dependencies:
|
12
|
+
- !ruby/object:Gem::Dependency
|
13
|
+
name: ruby-openai
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - "~>"
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: '6.0'
|
19
|
+
type: :runtime
|
20
|
+
prerelease: false
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - "~>"
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: '6.0'
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: anthropic
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - "~>"
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: 0.3.2
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - "~>"
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: 0.3.2
|
40
|
+
- !ruby/object:Gem::Dependency
|
41
|
+
name: rspec
|
42
|
+
requirement: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '3.12'
|
47
|
+
type: :development
|
48
|
+
prerelease: false
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - "~>"
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: '3.12'
|
54
|
+
- !ruby/object:Gem::Dependency
|
55
|
+
name: vcr
|
56
|
+
requirement: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - "~>"
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '6.1'
|
61
|
+
type: :development
|
62
|
+
prerelease: false
|
63
|
+
version_requirements: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - "~>"
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: '6.1'
|
68
|
+
- !ruby/object:Gem::Dependency
|
69
|
+
name: webmock
|
70
|
+
requirement: !ruby/object:Gem::Requirement
|
71
|
+
requirements:
|
72
|
+
- - "~>"
|
73
|
+
- !ruby/object:Gem::Version
|
74
|
+
version: '3.18'
|
75
|
+
type: :development
|
76
|
+
prerelease: false
|
77
|
+
version_requirements: !ruby/object:Gem::Requirement
|
78
|
+
requirements:
|
79
|
+
- - "~>"
|
80
|
+
- !ruby/object:Gem::Version
|
81
|
+
version: '3.18'
|
82
|
+
description: A simple and flexible framework for managing prompts and LLM interactions
|
83
|
+
with OpenAI and Anthropic Claude
|
84
|
+
email:
|
85
|
+
- aquaflamingo@nitrousmail.com
|
86
|
+
executables: []
|
87
|
+
extensions: []
|
88
|
+
extra_rdoc_files: []
|
89
|
+
files:
|
90
|
+
- CHANGELOG.md
|
91
|
+
- LICENSE.txt
|
92
|
+
- README.md
|
93
|
+
- lib/llm_orchestrator.rb
|
94
|
+
- lib/llm_orchestrator/chain.rb
|
95
|
+
- lib/llm_orchestrator/llm.rb
|
96
|
+
- lib/llm_orchestrator/memory.rb
|
97
|
+
- lib/llm_orchestrator/prompt.rb
|
98
|
+
- lib/llm_orchestrator/version.rb
|
99
|
+
homepage: https://github.com/aquaflamingo/llm_orchestrator
|
100
|
+
licenses:
|
101
|
+
- MIT
|
102
|
+
metadata:
|
103
|
+
homepage_uri: https://github.com/aquaflamingo/llm_orchestrator
|
104
|
+
source_code_uri: https://github.com/aquaflamingo/llm_orchestrator
|
105
|
+
changelog_uri: https://github.com/aquaflamingo/llm_orchestrator/blob/main/CHANGELOG.md
|
106
|
+
rdoc_options: []
|
107
|
+
require_paths:
|
108
|
+
- lib
|
109
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
110
|
+
requirements:
|
111
|
+
- - ">="
|
112
|
+
- !ruby/object:Gem::Version
|
113
|
+
version: 3.1.0
|
114
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
115
|
+
requirements:
|
116
|
+
- - ">="
|
117
|
+
- !ruby/object:Gem::Version
|
118
|
+
version: '0'
|
119
|
+
requirements: []
|
120
|
+
rubygems_version: 3.6.2
|
121
|
+
specification_version: 4
|
122
|
+
summary: A lightweight ruby framework for orchestrating operations via LLM APIs
|
123
|
+
test_files: []
|