llm_hub 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rspec +3 -0
- data/.rubocop.yml +13 -0
- data/CHANGELOG.md +5 -0
- data/LICENSE.txt +21 -0
- data/README.md +50 -0
- data/Rakefile +12 -0
- data/examples/basic_usage.rb +50 -0
- data/lib/llm_hub/common/abstract_methods.rb +24 -0
- data/lib/llm_hub/common/client_base.rb +64 -0
- data/lib/llm_hub/common/http_helper.rb +38 -0
- data/lib/llm_hub/completion/client.rb +61 -0
- data/lib/llm_hub/completion/providers/anthropic.rb +50 -0
- data/lib/llm_hub/completion/providers/base.rb +22 -0
- data/lib/llm_hub/completion/providers/openai.rb +58 -0
- data/lib/llm_hub/config.rb +9 -0
- data/lib/llm_hub/embedding/client.rb +59 -0
- data/lib/llm_hub/embedding/providers/base.rb +22 -0
- data/lib/llm_hub/embedding/providers/openai.rb +46 -0
- data/lib/llm_hub/version.rb +5 -0
- data/lib/llm_hub.rb +30 -0
- metadata +135 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 8dd3ffff8762625d2ab27200f05b3e6e2d1c4b79aa458a1497d208eda656db38
|
4
|
+
data.tar.gz: 9e98d2e9bf3a1afb8d071e7740a4aef5c10bfb719f03ac51745609c3dd382a55
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 5a353fcd5c607bd96c30dbdb41103d92cf042c04c04d7b2a1f3982961f52f2e3d60241cf02237f1ac93bfab76e80c6e724ea50e830633e22f5f5bd2d426bf184
|
7
|
+
data.tar.gz: '08262623da034f3091866f2c0e3e983731145091016533f902af1d7e8140de0e24f1c3480dd9e5d35752f4ed6eeb497694329823b914bfb5056a3461e7852bf4'
|
data/.rspec
ADDED
data/.rubocop.yml
ADDED
data/CHANGELOG.md
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2025 upft-akiranumakura
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
# LlmHub
|
2
|
+
|
3
|
+
TODO: Delete this and the text below, and describe your gem
|
4
|
+
|
5
|
+
Welcome to your new gem! In this directory, you'll find the files you need to be able to package up your Ruby library into a gem. Put your Ruby code in the file `lib/llm_hub`. To experiment with that code, run `bin/console` for an interactive prompt.
|
6
|
+
|
7
|
+
## Installation
|
8
|
+
|
9
|
+
Install the gem and add to the application's Gemfile by executing:
|
10
|
+
|
11
|
+
```bash
|
12
|
+
bundle add llm_hub
|
13
|
+
```
|
14
|
+
|
15
|
+
If bundler is not being used to manage dependencies, install the gem by executing:
|
16
|
+
|
17
|
+
```bash
|
18
|
+
gem install llm_hub
|
19
|
+
```
|
20
|
+
|
21
|
+
## Usage
|
22
|
+
|
23
|
+
```ruby
|
24
|
+
client = LlmHub::Completion::Client.new(
|
25
|
+
api_key: ENV['OPENAI_API_KEY'],
|
26
|
+
provider: :openai
|
27
|
+
)
|
28
|
+
|
29
|
+
response = client.ask_single_question(
|
30
|
+
system_prompt: 'You are a helpful assistant.',
|
31
|
+
content: 'What is the capital of Japan?',
|
32
|
+
model_name: 'gpt-4o-mini'
|
33
|
+
)
|
34
|
+
|
35
|
+
puts response
|
36
|
+
```
|
37
|
+
|
38
|
+
## Development
|
39
|
+
|
40
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
41
|
+
|
42
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
43
|
+
|
44
|
+
## Contributing
|
45
|
+
|
46
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/akiraNuma/llm_hub.
|
47
|
+
|
48
|
+
## License
|
49
|
+
|
50
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require 'bundler/setup'
|
5
|
+
require 'llm_hub'
|
6
|
+
|
7
|
+
# Completion example using OpenAI's GPT-4o-mini model
|
8
|
+
client = LlmHub::Completion::Client.new(
|
9
|
+
api_key: ENV['OPENAI_API_KEY'],
|
10
|
+
provider: :openai
|
11
|
+
)
|
12
|
+
|
13
|
+
response = client.ask_single_question(
|
14
|
+
system_prompt: 'You are a helpful assistant.',
|
15
|
+
content: 'What is the capital of Japan?',
|
16
|
+
model_name: 'gpt-4o-mini'
|
17
|
+
)
|
18
|
+
|
19
|
+
# Check for errors and display if any
|
20
|
+
if response[:error]
|
21
|
+
puts "Error occurred: #{response[:error]}"
|
22
|
+
else
|
23
|
+
puts "Answer: #{response[:answer]}"
|
24
|
+
puts "Tokens used: #{response[:tokens]}"
|
25
|
+
end
|
26
|
+
|
27
|
+
# Or, use it simply (returns nil on error)
|
28
|
+
puts response[:answer] if response[:answer]
|
29
|
+
|
30
|
+
# Embedding example using OpenAI's text-embedding-3-small model
|
31
|
+
embedding_client = LlmHub::Embedding::Client.new(
|
32
|
+
api_key: ENV['OPENAI_API_KEY'],
|
33
|
+
provider: :openai
|
34
|
+
)
|
35
|
+
|
36
|
+
embedding_response = embedding_client.post_embedding(
|
37
|
+
text: 'This is a sample text to generate an embedding.',
|
38
|
+
model_name: 'text-embedding-3-small'
|
39
|
+
)
|
40
|
+
|
41
|
+
# Check for errors and display if any
|
42
|
+
if embedding_response[:error]
|
43
|
+
puts "Error occurred: #{embedding_response[:error]}"
|
44
|
+
else
|
45
|
+
puts "Embedding: #{embedding_response[:embedding]&.length} dimensions"
|
46
|
+
puts "Tokens used: #{embedding_response[:tokens]}"
|
47
|
+
end
|
48
|
+
|
49
|
+
# Or, use it simply (returns nil on error)
|
50
|
+
puts embedding_response[:embedding]&.length if embedding_response[:embedding]
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Common
|
5
|
+
# Module to define abstract methods for base classes
|
6
|
+
module AbstractMethods
|
7
|
+
def self.included(base)
|
8
|
+
base.extend(ClassMethods)
|
9
|
+
end
|
10
|
+
|
11
|
+
# Provides class-level methods for defining abstract methods
|
12
|
+
module ClassMethods
|
13
|
+
# Define abstract methods that must be implemented by subclasses
|
14
|
+
def abstract_methods(*methods)
|
15
|
+
methods.each do |method_name|
|
16
|
+
define_method(method_name) do |*_args|
|
17
|
+
raise NotImplementedError, "#{self.class}##{method_name} must be implemented"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Common
|
5
|
+
# Base client class for LLM providers
|
6
|
+
# Provides common functionality for API clients including HTTP requests,
|
7
|
+
# retry logic, and provider initialization
|
8
|
+
class ClientBase
|
9
|
+
include LlmHub::Common::HttpHelper
|
10
|
+
|
11
|
+
attr_reader :api_key, :provider, :retry_count
|
12
|
+
|
13
|
+
# Initialize a new client
|
14
|
+
# @param api_key [String] API key for the provider (required)
|
15
|
+
# @param provider [Symbol, String] Provider name (required)
|
16
|
+
def initialize(api_key:, provider:)
|
17
|
+
@api_key = api_key
|
18
|
+
@provider = provider
|
19
|
+
@retry_count = LlmHub::Config::RETRY_COUNT
|
20
|
+
end
|
21
|
+
|
22
|
+
protected
|
23
|
+
|
24
|
+
def create_provider_client
|
25
|
+
# Use PROVIDER_CLASSES defined in subclasses
|
26
|
+
provider_classes = self.class::PROVIDER_CLASSES
|
27
|
+
# Convert to symbol to support both string and symbol
|
28
|
+
provider_key = @provider.to_sym
|
29
|
+
provider_class = provider_classes[provider_key]
|
30
|
+
|
31
|
+
raise ArgumentError, "Unknown provider: #{@provider}." unless provider_class
|
32
|
+
|
33
|
+
provider_class.new(@api_key)
|
34
|
+
end
|
35
|
+
|
36
|
+
def with_retry(&_block)
|
37
|
+
retries = 0
|
38
|
+
begin
|
39
|
+
yield
|
40
|
+
rescue StandardError => e
|
41
|
+
retries += 1
|
42
|
+
retry if retries < @retry_count
|
43
|
+
# Raise the exception if the last retry fails
|
44
|
+
raise "Request failed after #{@retry_count} retries: [#{e.class}] #{e.message}"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def make_request(url, request_body, headers)
|
49
|
+
res = http_post(url, request_body, headers)
|
50
|
+
|
51
|
+
unless res.is_a?(Net::HTTPSuccess)
|
52
|
+
error_message = begin
|
53
|
+
JSON.parse(res.body).to_s
|
54
|
+
rescue JSON::ParserError
|
55
|
+
res.body.to_s
|
56
|
+
end
|
57
|
+
raise "HTTP #{res.code} Error: #{error_message}"
|
58
|
+
end
|
59
|
+
|
60
|
+
JSON.parse(res.body)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Common
|
5
|
+
# Module to provide HTTP helper methods
|
6
|
+
module HttpHelper
|
7
|
+
# Call API endpoint for Post
|
8
|
+
#
|
9
|
+
# @param url [String] URL
|
10
|
+
# @param request_body [String] Request body
|
11
|
+
# @param headers [Hash] Headers
|
12
|
+
# @return [Hash] Response
|
13
|
+
def http_post(url, request_body, headers = {})
|
14
|
+
# generate uri http
|
15
|
+
uri = URI.parse(url)
|
16
|
+
http = http_client(uri)
|
17
|
+
# http request
|
18
|
+
http.post(uri.path, request_body.to_json, headers)
|
19
|
+
end
|
20
|
+
|
21
|
+
# Return HTTP client based on Uri
|
22
|
+
#
|
23
|
+
# @param uri [URI::HTTP] Uri
|
24
|
+
# @return [Net::HTTP] HTTP
|
25
|
+
def http_client(uri)
|
26
|
+
http_client = Net::HTTP.new(
|
27
|
+
uri.host,
|
28
|
+
uri.port
|
29
|
+
)
|
30
|
+
http_client.use_ssl = uri.scheme == 'https'
|
31
|
+
http_client.verify_mode = OpenSSL::SSL::VERIFY_NONE
|
32
|
+
http_client.open_timeout = LlmHub::Config::OPEN_TIME_OUT
|
33
|
+
http_client.read_timeout = LlmHub::Config::READ_TIME_OUT
|
34
|
+
http_client
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Completion
|
5
|
+
# Client for LLM providers (OpenAI, Anthropic, etc.)
|
6
|
+
class Client < LlmHub::Common::ClientBase
|
7
|
+
# Available provider mappings
|
8
|
+
# @return [Hash<Symbol, Class>] mapping of provider names to their classes
|
9
|
+
PROVIDER_CLASSES = {
|
10
|
+
openai: Providers::OpenAI,
|
11
|
+
anthropic: Providers::Anthropic
|
12
|
+
}.freeze
|
13
|
+
|
14
|
+
# Initialize a new completion client
|
15
|
+
# @param api_key [String] API key for the provider (required)
|
16
|
+
# @param provider [Symbol, String] Provider name (:openai, :anthropic) (required)
|
17
|
+
# @see LlmHub::Common::ClientBase#initialize
|
18
|
+
def initialize(api_key:, provider:)
|
19
|
+
super
|
20
|
+
@provider_client = create_provider_client
|
21
|
+
end
|
22
|
+
|
23
|
+
# Execute a single question prompt and return the response
|
24
|
+
# @param system_prompt [String] System prompt for the LLM
|
25
|
+
# @param content [String] User content/question
|
26
|
+
# @param model_name [String] Model name to use
|
27
|
+
# @param option_params [Hash] Additional parameters for the provider
|
28
|
+
# @return [Hash{Symbol => String, Integer}] Response with :answer and :tokens keys on success
|
29
|
+
# or :error key on failure
|
30
|
+
def ask_single_question(
|
31
|
+
system_prompt:,
|
32
|
+
content:,
|
33
|
+
model_name:,
|
34
|
+
option_params: {}
|
35
|
+
)
|
36
|
+
with_retry do
|
37
|
+
url = @provider_client.url
|
38
|
+
request_body = @provider_client.request_body(system_prompt, content, model_name, option_params)
|
39
|
+
headers = @provider_client.headers
|
40
|
+
|
41
|
+
response_body = make_request(url, request_body, headers)
|
42
|
+
formatted_response(response_body)
|
43
|
+
end
|
44
|
+
rescue StandardError => e
|
45
|
+
{ error: e.message }.deep_symbolize_keys
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
# Format the response from provider
|
51
|
+
# @param response_body [Hash] Raw response from provider
|
52
|
+
# @return [Hash{Symbol => String, Integer}] Formatted response
|
53
|
+
def formatted_response(response_body)
|
54
|
+
{
|
55
|
+
answer: @provider_client.extract_answer(response_body),
|
56
|
+
tokens: @provider_client.extract_tokens(response_body)
|
57
|
+
}
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Completion
|
5
|
+
module Providers
|
6
|
+
# Anthropic completion provider
|
7
|
+
class Anthropic < Base
|
8
|
+
COMPLETIONS_URI = 'https://api.anthropic.com/v1/messages'
|
9
|
+
|
10
|
+
def url
|
11
|
+
COMPLETIONS_URI
|
12
|
+
end
|
13
|
+
|
14
|
+
def headers
|
15
|
+
{
|
16
|
+
'Content-Type' => 'application/json',
|
17
|
+
'x-api-key' => @api_key,
|
18
|
+
'anthropic-version' => '2023-06-01'
|
19
|
+
}
|
20
|
+
end
|
21
|
+
|
22
|
+
def request_body(system_prompt, content, model_name, option_params)
|
23
|
+
base_params = {
|
24
|
+
model: model_name,
|
25
|
+
max_tokens: 1024,
|
26
|
+
temperature: 0.2,
|
27
|
+
system: system_prompt,
|
28
|
+
messages: [
|
29
|
+
{ role: 'user', content: content }
|
30
|
+
]
|
31
|
+
}
|
32
|
+
base_params.merge(option_params)
|
33
|
+
end
|
34
|
+
|
35
|
+
def extract_answer(response_body)
|
36
|
+
response_body&.dig('content')&.first&.dig('text')
|
37
|
+
end
|
38
|
+
|
39
|
+
def extract_tokens(response_body)
|
40
|
+
usage = response_body&.dig('usage')
|
41
|
+
{
|
42
|
+
total_tokens: (usage&.dig('input_tokens') || 0) + (usage&.dig('output_tokens') || 0),
|
43
|
+
prompt_tokens: usage&.dig('input_tokens'),
|
44
|
+
completion_tokens: usage&.dig('output_tokens')
|
45
|
+
}
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Completion
|
5
|
+
module Providers
|
6
|
+
# Base class for LLM completion providers
|
7
|
+
# @abstract Subclass and override required methods to implement a provider
|
8
|
+
class Base
|
9
|
+
include LlmHub::Common::AbstractMethods
|
10
|
+
|
11
|
+
attr_reader :api_key
|
12
|
+
|
13
|
+
def initialize(api_key)
|
14
|
+
@api_key = api_key
|
15
|
+
end
|
16
|
+
|
17
|
+
# Required methods - must be implemented by subclasses
|
18
|
+
abstract_methods :url, :headers, :request_body, :extract_answer, :extract_tokens
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Completion
|
5
|
+
module Providers
|
6
|
+
# OpenAI completion provider
|
7
|
+
class OpenAI < Base
|
8
|
+
COMPLETIONS_URI = 'https://api.openai.com/v1/chat/completions'
|
9
|
+
|
10
|
+
def url
|
11
|
+
COMPLETIONS_URI
|
12
|
+
end
|
13
|
+
|
14
|
+
def headers
|
15
|
+
{
|
16
|
+
'Content-Type' => 'application/json',
|
17
|
+
'Authorization' => "Bearer #{@api_key}"
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def request_body(system_prompt, content, model_name, option_params)
|
22
|
+
base_params = {
|
23
|
+
model: model_name,
|
24
|
+
n: 1,
|
25
|
+
temperature: 0.2,
|
26
|
+
messages: build_messages(system_prompt, content)
|
27
|
+
}
|
28
|
+
base_params.merge(option_params)
|
29
|
+
end
|
30
|
+
|
31
|
+
def extract_answer(response_body)
|
32
|
+
choices = response_body&.dig('choices')
|
33
|
+
return nil if choices.nil? || choices.empty?
|
34
|
+
|
35
|
+
choices[0]&.dig('message')&.dig('content')
|
36
|
+
end
|
37
|
+
|
38
|
+
def extract_tokens(response_body)
|
39
|
+
usage = response_body&.dig('usage')
|
40
|
+
{
|
41
|
+
total_tokens: usage&.dig('total_tokens'),
|
42
|
+
prompt_tokens: usage&.dig('prompt_tokens'),
|
43
|
+
completion_tokens: usage&.dig('completion_tokens')
|
44
|
+
}
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
|
49
|
+
def build_messages(system_prompt, content)
|
50
|
+
[
|
51
|
+
{ role: 'system', content: system_prompt },
|
52
|
+
{ role: 'user', content: content }
|
53
|
+
]
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Embedding
|
5
|
+
# Client for LLM providers (OpenAI, Anthropic, etc.)
|
6
|
+
class Client < LlmHub::Common::ClientBase
|
7
|
+
# Available provider mappings
|
8
|
+
# @return [Hash<Symbol, Class>] mapping of provider names to their classes
|
9
|
+
PROVIDER_CLASSES = {
|
10
|
+
openai: Providers::OpenAI
|
11
|
+
}.freeze
|
12
|
+
|
13
|
+
# Initialize a new embedding client
|
14
|
+
# @param api_key [String] API key for the provider (required)
|
15
|
+
# @param provider [Symbol, String] Provider name (:openai) (required)
|
16
|
+
# @see LlmHub::Common::ClientBase#initialize
|
17
|
+
def initialize(api_key:, provider:)
|
18
|
+
super
|
19
|
+
@provider_client = create_provider_client
|
20
|
+
end
|
21
|
+
|
22
|
+
# Generate embeddings for the given text
|
23
|
+
# @param text [String] The text to generate embeddings for
|
24
|
+
# @param model_name [String] The model to use for embedding generation
|
25
|
+
# @param option_params [Hash] Additional parameters to pass to the provider
|
26
|
+
# @return [Hash{Symbol => Array<Float>, Hash}] Response with :embedding and :tokens keys on success
|
27
|
+
# or :error key on failure
|
28
|
+
def post_embedding(
|
29
|
+
text:,
|
30
|
+
model_name:,
|
31
|
+
option_params: {}
|
32
|
+
)
|
33
|
+
with_retry do
|
34
|
+
url = @provider_client.url
|
35
|
+
request_body = @provider_client.request_body(text, model_name, option_params)
|
36
|
+
headers = @provider_client.headers
|
37
|
+
|
38
|
+
response_body = make_request(url, request_body, headers)
|
39
|
+
formatted_response(response_body)
|
40
|
+
end
|
41
|
+
rescue StandardError => e
|
42
|
+
{ error: e.message }.deep_symbolize_keys
|
43
|
+
end
|
44
|
+
|
45
|
+
private
|
46
|
+
|
47
|
+
# Format the API response into a standardized structure
|
48
|
+
# @param response_body [Hash] The raw response from the API
|
49
|
+
# @return [Hash{Symbol => Object}] Formatted response with embedding and token information
|
50
|
+
# @api private
|
51
|
+
def formatted_response(response_body)
|
52
|
+
{
|
53
|
+
embedding: @provider_client.extract_embedding(response_body),
|
54
|
+
tokens: @provider_client.extract_tokens(response_body)
|
55
|
+
}
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Embedding
|
5
|
+
module Providers
|
6
|
+
# Base class for LLM embedding providers
|
7
|
+
# @abstract Subclass and override required methods to implement a provider
|
8
|
+
class Base
|
9
|
+
include LlmHub::Common::AbstractMethods
|
10
|
+
|
11
|
+
attr_reader :api_key
|
12
|
+
|
13
|
+
def initialize(api_key)
|
14
|
+
@api_key = api_key
|
15
|
+
end
|
16
|
+
|
17
|
+
# Required methods - must be implemented by subclasses
|
18
|
+
abstract_methods :url, :headers, :request_body, :extract_embedding, :extract_tokens
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LlmHub
|
4
|
+
module Embedding
|
5
|
+
module Providers
|
6
|
+
# OpenAI embedding provider
|
7
|
+
class OpenAI < Base
|
8
|
+
EMBEDDINGS_URI = 'https://api.openai.com/v1/embeddings'
|
9
|
+
|
10
|
+
def url
|
11
|
+
EMBEDDINGS_URI
|
12
|
+
end
|
13
|
+
|
14
|
+
def headers
|
15
|
+
{
|
16
|
+
'Content-Type' => 'application/json',
|
17
|
+
'Authorization' => "Bearer #{@api_key}"
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
def request_body(text, model_name, option_params)
|
22
|
+
base_params = {
|
23
|
+
model: model_name,
|
24
|
+
input: text
|
25
|
+
}
|
26
|
+
base_params.merge(option_params)
|
27
|
+
end
|
28
|
+
|
29
|
+
def extract_embedding(response_body)
|
30
|
+
data_array = response_body&.dig('data')
|
31
|
+
return nil if data_array.nil? || data_array.empty?
|
32
|
+
|
33
|
+
data_array[0]&.dig('embedding')
|
34
|
+
end
|
35
|
+
|
36
|
+
def extract_tokens(response_body)
|
37
|
+
usage = response_body&.dig('usage')
|
38
|
+
{
|
39
|
+
total_tokens: usage&.dig('total_tokens'),
|
40
|
+
prompt_tokens: usage&.dig('prompt_tokens')
|
41
|
+
}
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
data/lib/llm_hub.rb
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Standard libraries
|
4
|
+
require 'net/http'
|
5
|
+
require 'json'
|
6
|
+
require 'openssl'
|
7
|
+
require 'active_support/core_ext/hash/keys'
|
8
|
+
|
9
|
+
require_relative 'llm_hub/version'
|
10
|
+
require_relative 'llm_hub/config'
|
11
|
+
|
12
|
+
# Common modules
|
13
|
+
require_relative 'llm_hub/common/abstract_methods'
|
14
|
+
require_relative 'llm_hub/common/http_helper'
|
15
|
+
require_relative 'llm_hub/common/client_base'
|
16
|
+
|
17
|
+
# Completion providers
|
18
|
+
require_relative 'llm_hub/completion/providers/base'
|
19
|
+
require_relative 'llm_hub/completion/providers/openai'
|
20
|
+
require_relative 'llm_hub/completion/providers/anthropic'
|
21
|
+
require_relative 'llm_hub/completion/client'
|
22
|
+
|
23
|
+
# Embedding providers
|
24
|
+
require_relative 'llm_hub/embedding/providers/base'
|
25
|
+
require_relative 'llm_hub/embedding/providers/openai'
|
26
|
+
require_relative 'llm_hub/embedding/client'
|
27
|
+
|
28
|
+
module LlmHub
|
29
|
+
class Error < StandardError; end
|
30
|
+
end
|
metadata
ADDED
@@ -0,0 +1,135 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: llm_hub
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- akiraNuma
|
8
|
+
bindir: exe
|
9
|
+
cert_chain: []
|
10
|
+
date: 2025-05-29 00:00:00.000000000 Z
|
11
|
+
dependencies:
|
12
|
+
- !ruby/object:Gem::Dependency
|
13
|
+
name: activesupport
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
15
|
+
requirements:
|
16
|
+
- - ">="
|
17
|
+
- !ruby/object:Gem::Version
|
18
|
+
version: '0'
|
19
|
+
type: :runtime
|
20
|
+
prerelease: false
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
22
|
+
requirements:
|
23
|
+
- - ">="
|
24
|
+
- !ruby/object:Gem::Version
|
25
|
+
version: '0'
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: json
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - ">="
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
36
|
+
requirements:
|
37
|
+
- - ">="
|
38
|
+
- !ruby/object:Gem::Version
|
39
|
+
version: '0'
|
40
|
+
- !ruby/object:Gem::Dependency
|
41
|
+
name: rake
|
42
|
+
requirement: !ruby/object:Gem::Requirement
|
43
|
+
requirements:
|
44
|
+
- - "~>"
|
45
|
+
- !ruby/object:Gem::Version
|
46
|
+
version: '13.0'
|
47
|
+
type: :development
|
48
|
+
prerelease: false
|
49
|
+
version_requirements: !ruby/object:Gem::Requirement
|
50
|
+
requirements:
|
51
|
+
- - "~>"
|
52
|
+
- !ruby/object:Gem::Version
|
53
|
+
version: '13.0'
|
54
|
+
- !ruby/object:Gem::Dependency
|
55
|
+
name: rspec
|
56
|
+
requirement: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - "~>"
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '3.0'
|
61
|
+
type: :development
|
62
|
+
prerelease: false
|
63
|
+
version_requirements: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - "~>"
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: '3.0'
|
68
|
+
- !ruby/object:Gem::Dependency
|
69
|
+
name: rubocop
|
70
|
+
requirement: !ruby/object:Gem::Requirement
|
71
|
+
requirements:
|
72
|
+
- - "~>"
|
73
|
+
- !ruby/object:Gem::Version
|
74
|
+
version: '1.21'
|
75
|
+
type: :development
|
76
|
+
prerelease: false
|
77
|
+
version_requirements: !ruby/object:Gem::Requirement
|
78
|
+
requirements:
|
79
|
+
- - "~>"
|
80
|
+
- !ruby/object:Gem::Version
|
81
|
+
version: '1.21'
|
82
|
+
description: A Ruby interface for multiple LLM providers like OpenAI and Anthropic.
|
83
|
+
Provides easy access to Completion and Embedding functionalities.
|
84
|
+
email:
|
85
|
+
- akiran@akiranumakura.com
|
86
|
+
executables: []
|
87
|
+
extensions: []
|
88
|
+
extra_rdoc_files: []
|
89
|
+
files:
|
90
|
+
- ".rspec"
|
91
|
+
- ".rubocop.yml"
|
92
|
+
- CHANGELOG.md
|
93
|
+
- LICENSE.txt
|
94
|
+
- README.md
|
95
|
+
- Rakefile
|
96
|
+
- examples/basic_usage.rb
|
97
|
+
- lib/llm_hub.rb
|
98
|
+
- lib/llm_hub/common/abstract_methods.rb
|
99
|
+
- lib/llm_hub/common/client_base.rb
|
100
|
+
- lib/llm_hub/common/http_helper.rb
|
101
|
+
- lib/llm_hub/completion/client.rb
|
102
|
+
- lib/llm_hub/completion/providers/anthropic.rb
|
103
|
+
- lib/llm_hub/completion/providers/base.rb
|
104
|
+
- lib/llm_hub/completion/providers/openai.rb
|
105
|
+
- lib/llm_hub/config.rb
|
106
|
+
- lib/llm_hub/embedding/client.rb
|
107
|
+
- lib/llm_hub/embedding/providers/base.rb
|
108
|
+
- lib/llm_hub/embedding/providers/openai.rb
|
109
|
+
- lib/llm_hub/version.rb
|
110
|
+
homepage: https://github.com/akiraNuma/llm_hub
|
111
|
+
licenses:
|
112
|
+
- MIT
|
113
|
+
metadata:
|
114
|
+
allowed_push_host: https://rubygems.org
|
115
|
+
homepage_uri: https://github.com/akiraNuma/llm_hub
|
116
|
+
source_code_uri: https://github.com/akiraNuma/llm_hub
|
117
|
+
changelog_uri: https://github.com/akiraNuma/llm_hub/blob/main/CHANGELOG.md
|
118
|
+
rdoc_options: []
|
119
|
+
require_paths:
|
120
|
+
- lib
|
121
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
122
|
+
requirements:
|
123
|
+
- - ">="
|
124
|
+
- !ruby/object:Gem::Version
|
125
|
+
version: 3.1.0
|
126
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
127
|
+
requirements:
|
128
|
+
- - ">="
|
129
|
+
- !ruby/object:Gem::Version
|
130
|
+
version: '0'
|
131
|
+
requirements: []
|
132
|
+
rubygems_version: 3.6.3
|
133
|
+
specification_version: 4
|
134
|
+
summary: A Ruby interface for multiple LLM providers like OpenAI and Anthropic.
|
135
|
+
test_files: []
|