llm_orchestrator 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/llm_orchestrator/chain.rb +7 -4
- data/lib/llm_orchestrator/llm.rb +43 -23
- data/lib/llm_orchestrator/memory.rb +9 -4
- data/lib/llm_orchestrator/prompt.rb +9 -5
- data/lib/llm_orchestrator/version.rb +1 -1
- data/lib/llm_orchestrator.rb +42 -6
- metadata +8 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a207e199498828a5b42e113f2183a44a34f5864f0d9cd0212fc5eba187fa1a4c
|
4
|
+
data.tar.gz: 5cdef23a3b55c8178f9e1838a9973baf0fc39c9bb30476e972d66482e69f790f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6d2ebd79ca45384ef7078abe1e623266040bcf8703edaeaafbdd367c0c6cfd1b0fafed78cfe4392dd206dd90ab56b3e57f2ac56ac3fdc131f43f311bf5d3a924
|
7
|
+
data.tar.gz: 9b65087f747ce7d6376fed20495d0f13047093e58b890679edb8a3c4d5c72ba468f23ff1b9dd4f09610215a11a0b4853c170bcf01eb3ac43337742ebda65d682
|
@@ -1,19 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
module LlmOrchestrator
|
3
|
+
# Chain represents a sequence of processing steps for LLM operations
|
4
|
+
# It allows for composing multiple LLM operations together with memory persistence
|
2
5
|
class Chain
|
3
6
|
def initialize(memory: nil)
|
4
7
|
@steps = []
|
5
8
|
@memory = memory || Memory.new
|
6
9
|
end
|
7
|
-
|
10
|
+
|
8
11
|
def add_step(&block)
|
9
12
|
@steps << block
|
10
13
|
self
|
11
14
|
end
|
12
|
-
|
15
|
+
|
13
16
|
def run(input)
|
14
17
|
@steps.reduce(input) do |result, step|
|
15
18
|
output = step.call(result, @memory)
|
16
|
-
@memory.add_message(
|
19
|
+
@memory.add_message("assistant", output) if output.is_a?(String)
|
17
20
|
output
|
18
21
|
end
|
19
22
|
end
|
@@ -22,4 +25,4 @@ module LlmOrchestrator
|
|
22
25
|
@memory.clear
|
23
26
|
end
|
24
27
|
end
|
25
|
-
end
|
28
|
+
end
|
data/lib/llm_orchestrator/llm.rb
CHANGED
@@ -1,63 +1,83 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
require
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "openai"
|
4
|
+
require "anthropic"
|
4
5
|
|
5
6
|
module LlmOrchestrator
|
7
|
+
# Base class for LLM providers
|
8
|
+
# Defines the interface that all LLM implementations must follow
|
6
9
|
class LLM
|
7
|
-
def initialize(api_key: nil)
|
10
|
+
def initialize(api_key: nil, model: nil, temperature: nil, max_tokens: nil)
|
8
11
|
@api_key = api_key
|
12
|
+
@model = model
|
13
|
+
@temperature = temperature
|
14
|
+
@max_tokens = max_tokens
|
9
15
|
end
|
10
|
-
|
16
|
+
|
11
17
|
def generate(prompt, context: nil, **options)
|
12
18
|
raise NotImplementedError, "Subclasses must implement generate method"
|
13
19
|
end
|
14
20
|
end
|
15
|
-
|
21
|
+
|
22
|
+
# OpenAI LLM provider implementation
|
23
|
+
# Handles interactions with OpenAI's GPT models
|
16
24
|
class OpenAI < LLM
|
17
|
-
def initialize(api_key: nil)
|
25
|
+
def initialize(api_key: nil, model: nil, temperature: nil, max_tokens: nil)
|
18
26
|
super
|
19
|
-
@api_key ||= LlmOrchestrator.configuration.
|
27
|
+
@api_key ||= LlmOrchestrator.configuration.openai.api_key
|
20
28
|
@client = ::OpenAI::Client.new(access_token: @api_key)
|
29
|
+
@model = model || LlmOrchestrator.configuration.openai.model
|
30
|
+
@temperature = temperature || LlmOrchestrator.configuration.openai.temperature
|
31
|
+
@max_tokens = max_tokens || LlmOrchestrator.configuration.openai.max_tokens
|
21
32
|
end
|
22
33
|
|
34
|
+
# rubocop:disable Metrics/MethodLength
|
23
35
|
def generate(prompt, context: nil, **options)
|
24
36
|
messages = []
|
25
|
-
messages << { role:
|
26
|
-
messages << { role:
|
27
|
-
|
37
|
+
messages << { role: "system", content: context } if context
|
38
|
+
messages << { role: "user", content: prompt }
|
39
|
+
|
28
40
|
response = @client.chat(
|
29
41
|
parameters: {
|
30
|
-
model: options[:model] ||
|
42
|
+
model: options[:model] || @model,
|
31
43
|
messages: messages,
|
32
|
-
temperature: options[:temperature] ||
|
44
|
+
temperature: options[:temperature] || @temperature
|
33
45
|
}
|
34
46
|
)
|
35
|
-
|
36
|
-
response.dig(
|
47
|
+
|
48
|
+
response.dig("choices", 0, "message", "content")
|
37
49
|
end
|
50
|
+
# rubocop:enable Metrics/MethodLength
|
38
51
|
end
|
39
52
|
|
53
|
+
# Anthropic LLM provider implementation
|
54
|
+
# Handles interactions with Anthropic's Claude models
|
40
55
|
class Anthropic < LLM
|
41
|
-
def initialize(api_key: nil)
|
56
|
+
def initialize(api_key: nil, model: nil, temperature: nil, max_tokens: nil)
|
42
57
|
super
|
43
|
-
@api_key ||= LlmOrchestrator.configuration.
|
58
|
+
@api_key ||= LlmOrchestrator.configuration.claude.api_key
|
44
59
|
@client = ::Anthropic::Client.new(access_token: @api_key)
|
60
|
+
@model = model || LlmOrchestrator.configuration.claude.model
|
61
|
+
@temperature = temperature || LlmOrchestrator.configuration.claude.temperature
|
62
|
+
@max_tokens = max_tokens || LlmOrchestrator.configuration.claude.max_tokens
|
45
63
|
end
|
46
64
|
|
65
|
+
# rubocop:disable Metrics/MethodLength
|
47
66
|
def generate(prompt, context: nil, **options)
|
48
67
|
response = @client.messages(
|
49
68
|
parameters: {
|
50
|
-
model: options[:model] ||
|
69
|
+
model: options[:model] || @model,
|
51
70
|
system: context,
|
52
71
|
messages: [
|
53
|
-
{ role:
|
72
|
+
{ role: "user", content: prompt }
|
54
73
|
],
|
55
|
-
temperature: options[:temperature] ||
|
56
|
-
max_tokens: options[:max_tokens] ||
|
74
|
+
temperature: options[:temperature] || @temperature,
|
75
|
+
max_tokens: options[:max_tokens] || @max_tokens
|
57
76
|
}
|
58
77
|
)
|
59
|
-
|
78
|
+
|
60
79
|
response.content.first.text
|
61
80
|
end
|
81
|
+
# rubocop:enable Metrics/MethodLength
|
62
82
|
end
|
63
|
-
end
|
83
|
+
end
|
@@ -1,10 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module LlmOrchestrator
|
4
|
+
# Memory manages conversation history and context for LLM interactions
|
5
|
+
# Handles message storage, token limits, and context management
|
2
6
|
class Memory
|
3
7
|
attr_reader :messages
|
4
8
|
|
5
|
-
|
9
|
+
# Input prompts of 4-6K are ideal
|
10
|
+
def initialize(max_tokens: 2000)
|
6
11
|
@messages = []
|
7
|
-
@max_tokens =
|
12
|
+
@max_tokens = max_tokens # Adjust based on your needs
|
8
13
|
end
|
9
14
|
|
10
15
|
def add_message(role, content)
|
@@ -30,8 +35,8 @@ module LlmOrchestrator
|
|
30
35
|
|
31
36
|
def trim_messages
|
32
37
|
while exceeds_token_limit? && @messages.size > 1
|
33
|
-
@messages.shift
|
38
|
+
@messages.shift # Remove oldest message
|
34
39
|
end
|
35
40
|
end
|
36
41
|
end
|
37
|
-
end
|
42
|
+
end
|
@@ -1,12 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
module LlmOrchestrator
|
4
|
+
# Prompt handles template-based prompt generation for LLM interactions
|
5
|
+
# Supports variable interpolation and template management
|
2
6
|
class Prompt
|
3
7
|
attr_reader :template, :variables
|
4
|
-
|
8
|
+
|
5
9
|
def initialize(template)
|
6
10
|
@template = template
|
7
11
|
@variables = extract_variables(template)
|
8
12
|
end
|
9
|
-
|
13
|
+
|
10
14
|
def format(values = {})
|
11
15
|
result = template.dup
|
12
16
|
values.each do |key, value|
|
@@ -14,11 +18,11 @@ module LlmOrchestrator
|
|
14
18
|
end
|
15
19
|
result
|
16
20
|
end
|
17
|
-
|
21
|
+
|
18
22
|
private
|
19
|
-
|
23
|
+
|
20
24
|
def extract_variables(template)
|
21
25
|
template.scan(/\{(\w+)\}/).flatten.map(&:to_sym)
|
22
26
|
end
|
23
27
|
end
|
24
|
-
end
|
28
|
+
end
|
data/lib/llm_orchestrator.rb
CHANGED
@@ -6,25 +6,61 @@ require_relative "llm_orchestrator/chain"
|
|
6
6
|
require_relative "llm_orchestrator/llm"
|
7
7
|
require_relative "llm_orchestrator/memory"
|
8
8
|
|
9
|
+
# LlmOrchestrator is a framework for managing interactions with Large Language Models (LLMs).
|
10
|
+
# It provides a unified interface for working with different LLM providers like OpenAI and Anthropic.
|
9
11
|
module LlmOrchestrator
|
10
12
|
class Error < StandardError; end
|
11
|
-
|
13
|
+
|
12
14
|
class << self
|
13
15
|
attr_accessor :configuration
|
14
|
-
|
16
|
+
|
15
17
|
def configure
|
16
18
|
self.configuration ||= Configuration.new
|
17
19
|
yield(configuration) if block_given?
|
18
20
|
end
|
19
21
|
end
|
20
|
-
|
22
|
+
|
23
|
+
# Configuration class for LlmOrchestrator
|
24
|
+
# Manages global settings like API keys and default LLM provider
|
21
25
|
class Configuration
|
22
|
-
attr_accessor :default_llm_provider
|
26
|
+
attr_accessor :default_llm_provider
|
23
27
|
|
24
28
|
def initialize
|
25
29
|
@default_llm_provider = :openai
|
26
|
-
@
|
27
|
-
@
|
30
|
+
@openai = OpenAIConfig.new
|
31
|
+
@claude = ClaudeConfig.new
|
32
|
+
end
|
33
|
+
|
34
|
+
def openai
|
35
|
+
@openai ||= OpenAIConfig.new
|
36
|
+
end
|
37
|
+
|
38
|
+
def claude
|
39
|
+
@claude ||= ClaudeConfig.new
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Configuration class for OpenAI-specific settings
|
44
|
+
class OpenAIConfig
|
45
|
+
attr_accessor :api_key, :model, :temperature, :max_tokens
|
46
|
+
|
47
|
+
def initialize
|
48
|
+
@api_key = nil
|
49
|
+
@model = "gpt-3.5-turbo"
|
50
|
+
@temperature = 0.7
|
51
|
+
@max_tokens = 1000
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
# Configuration class for Claude-specific settings
|
56
|
+
class ClaudeConfig
|
57
|
+
attr_accessor :api_key, :model, :temperature, :max_tokens
|
58
|
+
|
59
|
+
def initialize
|
60
|
+
@api_key = nil
|
61
|
+
@model = "claude-3-opus-20240229"
|
62
|
+
@temperature = 0.7
|
63
|
+
@max_tokens = 1000
|
28
64
|
end
|
29
65
|
end
|
30
66
|
end
|
metadata
CHANGED
@@ -1,42 +1,42 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm_orchestrator
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- "@aquaflamingo"
|
8
8
|
bindir: bin
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-01-
|
10
|
+
date: 2025-01-24 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
|
-
name:
|
13
|
+
name: anthropic
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|
15
15
|
requirements:
|
16
16
|
- - "~>"
|
17
17
|
- !ruby/object:Gem::Version
|
18
|
-
version:
|
18
|
+
version: 0.3.2
|
19
19
|
type: :runtime
|
20
20
|
prerelease: false
|
21
21
|
version_requirements: !ruby/object:Gem::Requirement
|
22
22
|
requirements:
|
23
23
|
- - "~>"
|
24
24
|
- !ruby/object:Gem::Version
|
25
|
-
version:
|
25
|
+
version: 0.3.2
|
26
26
|
- !ruby/object:Gem::Dependency
|
27
|
-
name:
|
27
|
+
name: ruby-openai
|
28
28
|
requirement: !ruby/object:Gem::Requirement
|
29
29
|
requirements:
|
30
30
|
- - "~>"
|
31
31
|
- !ruby/object:Gem::Version
|
32
|
-
version: 0
|
32
|
+
version: '6.0'
|
33
33
|
type: :runtime
|
34
34
|
prerelease: false
|
35
35
|
version_requirements: !ruby/object:Gem::Requirement
|
36
36
|
requirements:
|
37
37
|
- - "~>"
|
38
38
|
- !ruby/object:Gem::Version
|
39
|
-
version: 0
|
39
|
+
version: '6.0'
|
40
40
|
- !ruby/object:Gem::Dependency
|
41
41
|
name: rspec
|
42
42
|
requirement: !ruby/object:Gem::Requirement
|