llm.rb 0.10.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +0 -0
- data/README.md +81 -122
- data/lib/llm/bot/builder.rb +2 -2
- data/lib/llm/bot/conversable.rb +0 -0
- data/lib/llm/bot/prompt/completion.rb +0 -0
- data/lib/llm/bot/prompt/respond.rb +0 -0
- data/lib/llm/bot.rb +9 -11
- data/lib/llm/buffer.rb +0 -0
- data/lib/llm/error.rb +0 -0
- data/lib/llm/event_handler.rb +0 -0
- data/lib/llm/eventstream/event.rb +0 -0
- data/lib/llm/eventstream/parser.rb +0 -0
- data/lib/llm/eventstream.rb +0 -0
- data/lib/llm/file.rb +18 -9
- data/lib/llm/function.rb +18 -13
- data/lib/llm/json/schema/array.rb +0 -0
- data/lib/llm/json/schema/boolean.rb +0 -0
- data/lib/llm/json/schema/integer.rb +0 -0
- data/lib/llm/json/schema/leaf.rb +0 -0
- data/lib/llm/json/schema/null.rb +0 -0
- data/lib/llm/json/schema/number.rb +0 -0
- data/lib/llm/json/schema/object.rb +0 -0
- data/lib/llm/json/schema/string.rb +0 -0
- data/lib/llm/json/schema/version.rb +0 -0
- data/lib/llm/json/schema.rb +0 -0
- data/lib/llm/message.rb +8 -0
- data/lib/llm/mime.rb +0 -0
- data/lib/llm/multipart.rb +0 -0
- data/lib/llm/object/builder.rb +0 -0
- data/lib/llm/object/kernel.rb +8 -0
- data/lib/llm/object.rb +7 -0
- data/lib/llm/provider.rb +9 -11
- data/lib/llm/providers/anthropic/error_handler.rb +0 -0
- data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
- data/lib/llm/providers/anthropic/format.rb +0 -0
- data/lib/llm/providers/anthropic/models.rb +2 -7
- data/lib/llm/providers/anthropic/response/completion.rb +39 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
- data/lib/llm/providers/anthropic.rb +3 -24
- data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
- data/lib/llm/providers/deepseek/format.rb +0 -0
- data/lib/llm/providers/deepseek.rb +6 -0
- data/lib/llm/providers/gemini/audio.rb +6 -10
- data/lib/llm/providers/gemini/error_handler.rb +0 -0
- data/lib/llm/providers/gemini/files.rb +11 -14
- data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
- data/lib/llm/providers/gemini/format.rb +0 -0
- data/lib/llm/providers/gemini/images.rb +8 -7
- data/lib/llm/providers/gemini/models.rb +2 -8
- data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
- data/lib/llm/providers/gemini/response/embedding.rb +8 -0
- data/lib/llm/providers/gemini/response/file.rb +11 -0
- data/lib/llm/providers/gemini/response/image.rb +26 -0
- data/lib/llm/providers/gemini/stream_parser.rb +0 -0
- data/lib/llm/providers/gemini.rb +5 -8
- data/lib/llm/providers/llamacpp.rb +6 -0
- data/lib/llm/providers/ollama/error_handler.rb +0 -0
- data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
- data/lib/llm/providers/ollama/format.rb +0 -0
- data/lib/llm/providers/ollama/models.rb +2 -8
- data/lib/llm/providers/ollama/response/completion.rb +28 -0
- data/lib/llm/providers/ollama/response/embedding.rb +10 -0
- data/lib/llm/providers/ollama/stream_parser.rb +0 -0
- data/lib/llm/providers/ollama.rb +5 -8
- data/lib/llm/providers/openai/audio.rb +6 -6
- data/lib/llm/providers/openai/error_handler.rb +0 -0
- data/lib/llm/providers/openai/files.rb +14 -15
- data/lib/llm/providers/openai/format/completion_format.rb +11 -4
- data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
- data/lib/llm/providers/openai/format/respond_format.rb +7 -4
- data/lib/llm/providers/openai/format.rb +0 -0
- data/lib/llm/providers/openai/images.rb +8 -7
- data/lib/llm/providers/openai/models.rb +2 -7
- data/lib/llm/providers/openai/moderations.rb +9 -11
- data/lib/llm/providers/openai/response/audio.rb +7 -0
- data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
- data/lib/llm/providers/openai/response/embedding.rb +9 -0
- data/lib/llm/providers/openai/response/file.rb +7 -0
- data/lib/llm/providers/openai/response/image.rb +16 -0
- data/lib/llm/providers/openai/response/moderations.rb +34 -0
- data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
- data/lib/llm/providers/openai/responses.rb +10 -9
- data/lib/llm/providers/openai/stream_parser.rb +0 -0
- data/lib/llm/providers/openai/vector_stores.rb +106 -0
- data/lib/llm/providers/openai.rb +14 -8
- data/lib/llm/response.rb +37 -13
- data/lib/llm/utils.rb +0 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +2 -12
- data/llm.gemspec +1 -1
- metadata +18 -29
- data/lib/llm/model.rb +0 -32
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
- data/lib/llm/providers/anthropic/response_parser.rb +0 -24
- data/lib/llm/providers/gemini/response_parser.rb +0 -46
- data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
- data/lib/llm/providers/ollama/response_parser.rb +0 -30
- data/lib/llm/providers/openai/response_parser.rb +0 -65
- data/lib/llm/providers/voyageai/error_handler.rb +0 -32
- data/lib/llm/providers/voyageai/response_parser.rb +0 -13
- data/lib/llm/providers/voyageai.rb +0 -44
- data/lib/llm/response/audio.rb +0 -13
- data/lib/llm/response/audio_transcription.rb +0 -14
- data/lib/llm/response/audio_translation.rb +0 -14
- data/lib/llm/response/completion.rb +0 -51
- data/lib/llm/response/download_file.rb +0 -15
- data/lib/llm/response/embedding.rb +0 -23
- data/lib/llm/response/file.rb +0 -42
- data/lib/llm/response/filelist.rb +0 -18
- data/lib/llm/response/image.rb +0 -29
- data/lib/llm/response/modellist.rb +0 -18
- data/lib/llm/response/moderationlist/moderation.rb +0 -47
- data/lib/llm/response/moderationlist.rb +0 -51
- data/lib/llm/response/respond.rb +0 -56
@@ -1,42 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM::Ollama::ResponseParser
|
4
|
-
##
|
5
|
-
# @private
|
6
|
-
class CompletionParser
|
7
|
-
def initialize(body)
|
8
|
-
@body = LLM::Object.from_hash(body)
|
9
|
-
end
|
10
|
-
|
11
|
-
def format(response)
|
12
|
-
{
|
13
|
-
model:,
|
14
|
-
choices: [format_choices(response)],
|
15
|
-
prompt_tokens:,
|
16
|
-
completion_tokens:
|
17
|
-
}
|
18
|
-
end
|
19
|
-
|
20
|
-
private
|
21
|
-
|
22
|
-
def format_choices(response)
|
23
|
-
role, content, calls = message.to_h.values_at(:role, :content, :tool_calls)
|
24
|
-
extra = {response:, tool_calls: format_tool_calls(calls)}
|
25
|
-
LLM::Message.new(role, content, extra)
|
26
|
-
end
|
27
|
-
|
28
|
-
def format_tool_calls(tools)
|
29
|
-
return [] unless tools
|
30
|
-
tools.filter_map do |tool|
|
31
|
-
next unless tool["function"]
|
32
|
-
LLM::Object.new(tool["function"])
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
def body = @body
|
37
|
-
def model = body.model
|
38
|
-
def prompt_tokens = body.prompt_eval_count
|
39
|
-
def completion_tokens = body.eval_count
|
40
|
-
def message = body.message
|
41
|
-
end
|
42
|
-
end
|
@@ -1,30 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::Ollama
|
4
|
-
##
|
5
|
-
# @private
|
6
|
-
module ResponseParser
|
7
|
-
require_relative "response_parser/completion_parser"
|
8
|
-
|
9
|
-
##
|
10
|
-
# @param [Hash] body
|
11
|
-
# The response body from the LLM provider
|
12
|
-
# @return [Hash]
|
13
|
-
def parse_completion(body)
|
14
|
-
CompletionParser.new(body).format(self)
|
15
|
-
end
|
16
|
-
|
17
|
-
##
|
18
|
-
# @param [Hash] body
|
19
|
-
# The response body from the LLM provider
|
20
|
-
# @return [Hash]
|
21
|
-
def parse_embedding(body)
|
22
|
-
{
|
23
|
-
model: body["model"],
|
24
|
-
embeddings: body["data"].map { _1["embedding"] },
|
25
|
-
prompt_tokens: body.dig("usage", "prompt_tokens"),
|
26
|
-
total_tokens: body.dig("usage", "total_tokens")
|
27
|
-
}
|
28
|
-
end
|
29
|
-
end
|
30
|
-
end
|
@@ -1,65 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::OpenAI
|
4
|
-
##
|
5
|
-
# @private
|
6
|
-
module ResponseParser
|
7
|
-
require_relative "response_parser/completion_parser"
|
8
|
-
require_relative "response_parser/respond_parser"
|
9
|
-
|
10
|
-
##
|
11
|
-
# @param [Hash] body
|
12
|
-
# The response body from the LLM provider
|
13
|
-
# @return [Hash]
|
14
|
-
def parse_completion(body)
|
15
|
-
CompletionParser.new(body).format(self)
|
16
|
-
end
|
17
|
-
|
18
|
-
##
|
19
|
-
# @param [Hash] body
|
20
|
-
# The response body from the LLM provider
|
21
|
-
# @return [Hash]
|
22
|
-
def parse_respond_response(body)
|
23
|
-
RespondParser.new(body).format(self)
|
24
|
-
end
|
25
|
-
|
26
|
-
##
|
27
|
-
# @param [Hash] body
|
28
|
-
# The response body from the LLM provider
|
29
|
-
# @return [Hash]
|
30
|
-
def parse_moderation_list(body)
|
31
|
-
{
|
32
|
-
id: body["id"],
|
33
|
-
model: body["model"],
|
34
|
-
moderations: body["results"].map { LLM::Response::ModerationList::Moderation.new(_1) }
|
35
|
-
}
|
36
|
-
end
|
37
|
-
|
38
|
-
##
|
39
|
-
# @param [Hash] body
|
40
|
-
# The response body from the LLM provider
|
41
|
-
# @return [Hash]
|
42
|
-
def parse_embedding(body)
|
43
|
-
{
|
44
|
-
model: body["model"],
|
45
|
-
embeddings: body["data"].map { _1["embedding"] },
|
46
|
-
prompt_tokens: body.dig("usage", "prompt_tokens"),
|
47
|
-
total_tokens: body.dig("usage", "total_tokens")
|
48
|
-
}
|
49
|
-
end
|
50
|
-
|
51
|
-
##
|
52
|
-
# @param [Hash] body
|
53
|
-
# The response body from the LLM provider
|
54
|
-
# @return [Hash]
|
55
|
-
def parse_image(body)
|
56
|
-
{
|
57
|
-
urls: body["data"].filter_map { _1["url"] },
|
58
|
-
images: body["data"].filter_map do
|
59
|
-
next unless _1["b64_json"]
|
60
|
-
StringIO.new(_1["b64_json"].unpack1("m0"))
|
61
|
-
end
|
62
|
-
}
|
63
|
-
end
|
64
|
-
end
|
65
|
-
end
|
@@ -1,32 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::VoyageAI
|
4
|
-
class ErrorHandler
|
5
|
-
##
|
6
|
-
# @return [Net::HTTPResponse]
|
7
|
-
# Non-2XX response from the server
|
8
|
-
attr_reader :res
|
9
|
-
|
10
|
-
##
|
11
|
-
# @param [Net::HTTPResponse] res
|
12
|
-
# The response from the server
|
13
|
-
# @return [LLM::OpenAI::ErrorHandler]
|
14
|
-
def initialize(res)
|
15
|
-
@res = res
|
16
|
-
end
|
17
|
-
|
18
|
-
##
|
19
|
-
# @raise [LLM::Error]
|
20
|
-
# Raises a subclass of {LLM::Error LLM::Error}
|
21
|
-
def raise_error!
|
22
|
-
case res
|
23
|
-
when Net::HTTPUnauthorized
|
24
|
-
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
25
|
-
when Net::HTTPTooManyRequests
|
26
|
-
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
27
|
-
else
|
28
|
-
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
29
|
-
end
|
30
|
-
end
|
31
|
-
end
|
32
|
-
end
|
@@ -1,13 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::VoyageAI
|
4
|
-
module ResponseParser
|
5
|
-
def parse_embedding(body)
|
6
|
-
{
|
7
|
-
model: body["model"],
|
8
|
-
embeddings: body["data"].map { _1["embedding"] },
|
9
|
-
total_tokens: body.dig("usage", "total_tokens")
|
10
|
-
}
|
11
|
-
end
|
12
|
-
end
|
13
|
-
end
|
@@ -1,44 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
class VoyageAI < Provider
|
5
|
-
require_relative "voyageai/error_handler"
|
6
|
-
require_relative "voyageai/response_parser"
|
7
|
-
HOST = "api.voyageai.com"
|
8
|
-
|
9
|
-
##
|
10
|
-
# @param key (see LLM::Provider#initialize)
|
11
|
-
def initialize(**)
|
12
|
-
super(host: HOST, **)
|
13
|
-
end
|
14
|
-
|
15
|
-
##
|
16
|
-
# Provides an embedding via VoyageAI per
|
17
|
-
# [Anthropic's recommendation](https://docs.anthropic.com/en/docs/build-with-claude/embeddings)
|
18
|
-
# @param input (see LLM::Provider#embed)
|
19
|
-
# @return (see LLM::Provider#embed)
|
20
|
-
def embed(input, model: "voyage-2", **params)
|
21
|
-
req = Net::HTTP::Post.new("/v1/embeddings", headers)
|
22
|
-
req.body = JSON.dump({input:, model:}.merge!(params))
|
23
|
-
res = execute(request: req)
|
24
|
-
Response::Embedding.new(res).extend(response_parser)
|
25
|
-
end
|
26
|
-
|
27
|
-
private
|
28
|
-
|
29
|
-
def headers
|
30
|
-
{
|
31
|
-
"Content-Type" => "application/json",
|
32
|
-
"Authorization" => "Bearer #{@key}"
|
33
|
-
}
|
34
|
-
end
|
35
|
-
|
36
|
-
def response_parser
|
37
|
-
LLM::VoyageAI::ResponseParser
|
38
|
-
end
|
39
|
-
|
40
|
-
def error_handler
|
41
|
-
LLM::VoyageAI::ErrorHandler
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
data/lib/llm/response/audio.rb
DELETED
@@ -1,13 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::Audio LLM::Response::Audio} class represents an
|
6
|
-
# audio file that has been returned by a provider. It wraps an IO object
|
7
|
-
# that can be used to read the contents of an audio stream (as binary data).
|
8
|
-
class Response::Audio < Response
|
9
|
-
##
|
10
|
-
# @return [StringIO]
|
11
|
-
attr_accessor :audio
|
12
|
-
end
|
13
|
-
end
|
@@ -1,14 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::AudioTranscription LLM::Response::AudioTranscription}
|
6
|
-
# class represents an audio transcription that has been returned by
|
7
|
-
# a provider (eg OpenAI, Gemini, etc)
|
8
|
-
class Response::AudioTranscription < Response
|
9
|
-
##
|
10
|
-
# Returns the text of the transcription
|
11
|
-
# @return [String]
|
12
|
-
attr_accessor :text
|
13
|
-
end
|
14
|
-
end
|
@@ -1,14 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::AudioTranslation LLM::Response::AudioTranslation}
|
6
|
-
# class represents an audio translation that has been returned by
|
7
|
-
# a provider (eg OpenAI, Gemini, etc)
|
8
|
-
class Response::AudioTranslation < Response
|
9
|
-
##
|
10
|
-
# Returns the text of the translation
|
11
|
-
# @return [String]
|
12
|
-
attr_accessor :text
|
13
|
-
end
|
14
|
-
end
|
@@ -1,51 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
class Response::Completion < Response
|
5
|
-
##
|
6
|
-
# @return [String]
|
7
|
-
# Returns the model name used for the completion
|
8
|
-
def model
|
9
|
-
parsed[:model]
|
10
|
-
end
|
11
|
-
|
12
|
-
##
|
13
|
-
# @return [Array<LLM::Message>]
|
14
|
-
# Returns an array of messages
|
15
|
-
def choices
|
16
|
-
parsed[:choices]
|
17
|
-
end
|
18
|
-
alias_method :messages, :choices
|
19
|
-
|
20
|
-
##
|
21
|
-
# @return [Integer]
|
22
|
-
# Returns the count of prompt tokens
|
23
|
-
def prompt_tokens
|
24
|
-
parsed[:prompt_tokens]
|
25
|
-
end
|
26
|
-
|
27
|
-
##
|
28
|
-
# @return [Integer]
|
29
|
-
# Returns the count of completion tokens
|
30
|
-
def completion_tokens
|
31
|
-
parsed[:completion_tokens]
|
32
|
-
end
|
33
|
-
|
34
|
-
##
|
35
|
-
# @return [Integer]
|
36
|
-
# Returns the total count of tokens
|
37
|
-
def total_tokens
|
38
|
-
prompt_tokens + completion_tokens
|
39
|
-
end
|
40
|
-
|
41
|
-
private
|
42
|
-
|
43
|
-
##
|
44
|
-
# @private
|
45
|
-
# @return [Hash]
|
46
|
-
# Returns the parsed completion response from the provider
|
47
|
-
def parsed
|
48
|
-
@parsed ||= parse_completion(body)
|
49
|
-
end
|
50
|
-
end
|
51
|
-
end
|
@@ -1,15 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::DownloadFile LLM::Response::DownloadFile} class
|
6
|
-
# represents the contents of a file that has been returned by a
|
7
|
-
# provider. It wraps an IO object that can be used to read the file
|
8
|
-
# contents.
|
9
|
-
class Response::DownloadFile < Response
|
10
|
-
##
|
11
|
-
# Returns a StringIO object
|
12
|
-
# @return [StringIO]
|
13
|
-
attr_accessor :file
|
14
|
-
end
|
15
|
-
end
|
@@ -1,23 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
class Response::Embedding < Response
|
5
|
-
def model
|
6
|
-
parsed[:model]
|
7
|
-
end
|
8
|
-
|
9
|
-
def embeddings
|
10
|
-
parsed[:embeddings]
|
11
|
-
end
|
12
|
-
|
13
|
-
def total_tokens
|
14
|
-
parsed[:total_tokens]
|
15
|
-
end
|
16
|
-
|
17
|
-
private
|
18
|
-
|
19
|
-
def parsed
|
20
|
-
@parsed ||= parse_embedding(body)
|
21
|
-
end
|
22
|
-
end
|
23
|
-
end
|
data/lib/llm/response/file.rb
DELETED
@@ -1,42 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::File LLM::Response::File} class represents a file
|
6
|
-
# that has been uploaded to a provider. Its properties are delegated
|
7
|
-
# to the underlying response body, and vary by provider.
|
8
|
-
class Response::File < Response
|
9
|
-
##
|
10
|
-
# Returns a normalized response body
|
11
|
-
# @return [Hash]
|
12
|
-
def body
|
13
|
-
@_body ||= if super["file"]
|
14
|
-
super["file"].transform_keys { snakecase(_1) }
|
15
|
-
else
|
16
|
-
super.transform_keys { snakecase(_1) }
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
##
|
21
|
-
# @return [String]
|
22
|
-
def inspect
|
23
|
-
"#<#{self.class}:0x#{object_id.to_s(16)} body=#{body}>"
|
24
|
-
end
|
25
|
-
|
26
|
-
private
|
27
|
-
|
28
|
-
include LLM::Utils
|
29
|
-
|
30
|
-
def respond_to_missing?(m, _)
|
31
|
-
body.key?(m.to_s) || super
|
32
|
-
end
|
33
|
-
|
34
|
-
def method_missing(m, *args, &block)
|
35
|
-
if body.key?(m.to_s)
|
36
|
-
body[m.to_s]
|
37
|
-
else
|
38
|
-
super
|
39
|
-
end
|
40
|
-
end
|
41
|
-
end
|
42
|
-
end
|
@@ -1,18 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::FileList LLM::Response::FileList} class represents a
|
6
|
-
# list of file objects that are returned by a provider. It is an Enumerable
|
7
|
-
# object, and can be used to iterate over the file objects in a way that is
|
8
|
-
# similar to an array. Each element is an instance of LLM::Object.
|
9
|
-
class Response::FileList < Response
|
10
|
-
include Enumerable
|
11
|
-
|
12
|
-
attr_accessor :files
|
13
|
-
|
14
|
-
def each(&)
|
15
|
-
@files.each(&)
|
16
|
-
end
|
17
|
-
end
|
18
|
-
end
|
data/lib/llm/response/image.rb
DELETED
@@ -1,29 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::Image LLM::Response::Image} class represents
|
6
|
-
# an image response. An image response might encapsulate one or more
|
7
|
-
# URLs, or a base64 encoded image -- depending on the provider.
|
8
|
-
class Response::Image < Response
|
9
|
-
##
|
10
|
-
# Returns one or more image objects, or nil
|
11
|
-
# @return [Array<LLM::Object>, nil]
|
12
|
-
def images
|
13
|
-
parsed[:images].any? ? parsed[:images] : nil
|
14
|
-
end
|
15
|
-
|
16
|
-
##
|
17
|
-
# Returns one or more image URLs, or nil
|
18
|
-
# @return [Array<String>, nil]
|
19
|
-
def urls
|
20
|
-
parsed[:urls].any? ? parsed[:urls] : nil
|
21
|
-
end
|
22
|
-
|
23
|
-
private
|
24
|
-
|
25
|
-
def parsed
|
26
|
-
@parsed ||= parse_image(body)
|
27
|
-
end
|
28
|
-
end
|
29
|
-
end
|
@@ -1,18 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
|
6
|
-
# list of model objects that are returned by a provider. It is an Enumerable
|
7
|
-
# object, and can be used to iterate over the model objects in a way that is
|
8
|
-
# similar to an array. Each element is an instance of LLM::Object.
|
9
|
-
class Response::ModelList < Response
|
10
|
-
include Enumerable
|
11
|
-
|
12
|
-
attr_accessor :models
|
13
|
-
|
14
|
-
def each(&)
|
15
|
-
@models.each(&)
|
16
|
-
end
|
17
|
-
end
|
18
|
-
end
|
@@ -1,47 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class LLM::Response::ModerationList
|
4
|
-
##
|
5
|
-
# The {LLM::Response::ModerationList::Moderation Moderation}
|
6
|
-
# class represents a moderation object that is returned by
|
7
|
-
# the moderations interface.
|
8
|
-
# @see LLM::Response::ModerationList
|
9
|
-
# @see LLM::OpenAI::Moderations
|
10
|
-
class Moderation
|
11
|
-
##
|
12
|
-
# @param [Hash] moderation
|
13
|
-
# @return [LLM::Response::ModerationList::Moderation]
|
14
|
-
def initialize(moderation)
|
15
|
-
@moderation = moderation
|
16
|
-
end
|
17
|
-
|
18
|
-
##
|
19
|
-
# Returns true if the moderation is flagged
|
20
|
-
# @return [Boolean]
|
21
|
-
def flagged?
|
22
|
-
@moderation["flagged"]
|
23
|
-
end
|
24
|
-
|
25
|
-
##
|
26
|
-
# Returns the moderation categories
|
27
|
-
# @return [Array<String>]
|
28
|
-
def categories
|
29
|
-
@moderation["categories"].filter_map { _2 ? _1 : nil }
|
30
|
-
end
|
31
|
-
|
32
|
-
##
|
33
|
-
# Returns the moderation scores
|
34
|
-
# @return [Hash]
|
35
|
-
def scores
|
36
|
-
@moderation["category_scores"].select { categories.include?(_1) }
|
37
|
-
end
|
38
|
-
|
39
|
-
##
|
40
|
-
# @return [String]
|
41
|
-
def inspect
|
42
|
-
"#<#{self.class}:0x#{object_id.to_s(16)} " \
|
43
|
-
"categories=#{categories} " \
|
44
|
-
"scores=#{scores}>"
|
45
|
-
end
|
46
|
-
end
|
47
|
-
end
|
@@ -1,51 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
##
|
5
|
-
# The {LLM::Response::ModerationList LLM::Response::ModerationList} class
|
6
|
-
# represents a response from the moderations API. It is an Enumerable that
|
7
|
-
# yields an instance of {LLM::Response::ModerationList::Moderation LLM::Response::ModerationList::Moderation},
|
8
|
-
# and each moderation object contains the categories and scores for a given
|
9
|
-
# input.
|
10
|
-
# @see LLM::OpenAI::Moderations LLM::OpenAI::Moderations
|
11
|
-
class Response::ModerationList < Response
|
12
|
-
require_relative "moderationlist/moderation"
|
13
|
-
include Enumerable
|
14
|
-
|
15
|
-
##
|
16
|
-
# Returns the moderation ID
|
17
|
-
# @return [String]
|
18
|
-
def id
|
19
|
-
parsed[:id]
|
20
|
-
end
|
21
|
-
|
22
|
-
##
|
23
|
-
# Returns the moderation model
|
24
|
-
# @return [String]
|
25
|
-
def model
|
26
|
-
parsed[:model]
|
27
|
-
end
|
28
|
-
|
29
|
-
##
|
30
|
-
# Yields each moderation object
|
31
|
-
# @yieldparam [OpenStruct] moderation
|
32
|
-
# @yieldreturn [void]
|
33
|
-
# @return [void]
|
34
|
-
def each(&)
|
35
|
-
moderations.each(&)
|
36
|
-
end
|
37
|
-
|
38
|
-
private
|
39
|
-
|
40
|
-
def parsed
|
41
|
-
@parsed ||= parse_moderation_list(body)
|
42
|
-
end
|
43
|
-
|
44
|
-
##
|
45
|
-
# Returns an array of moderation objects
|
46
|
-
# @return [Array<OpenStruct>]
|
47
|
-
def moderations
|
48
|
-
parsed[:moderations]
|
49
|
-
end
|
50
|
-
end
|
51
|
-
end
|
data/lib/llm/response/respond.rb
DELETED
@@ -1,56 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module LLM
|
4
|
-
class Response::Respond < Response
|
5
|
-
##
|
6
|
-
# @return [String]
|
7
|
-
# Returns the id of the response
|
8
|
-
def id
|
9
|
-
parsed[:id]
|
10
|
-
end
|
11
|
-
|
12
|
-
##
|
13
|
-
# @return [String]
|
14
|
-
# Returns the model name
|
15
|
-
def model
|
16
|
-
parsed[:model]
|
17
|
-
end
|
18
|
-
|
19
|
-
##
|
20
|
-
# @return [Array<LLM::Message>]
|
21
|
-
def outputs
|
22
|
-
parsed[:outputs]
|
23
|
-
end
|
24
|
-
|
25
|
-
##
|
26
|
-
# @return [Integer]
|
27
|
-
# Returns the input token count
|
28
|
-
def input_tokens
|
29
|
-
parsed[:input_tokens]
|
30
|
-
end
|
31
|
-
|
32
|
-
##
|
33
|
-
# @return [Integer]
|
34
|
-
# Returns the output token count
|
35
|
-
def output_tokens
|
36
|
-
parsed[:output_tokens]
|
37
|
-
end
|
38
|
-
|
39
|
-
##
|
40
|
-
# @return [Integer]
|
41
|
-
# Returns the total count of tokens
|
42
|
-
def total_tokens
|
43
|
-
parsed[:total_tokens]
|
44
|
-
end
|
45
|
-
|
46
|
-
private
|
47
|
-
|
48
|
-
##
|
49
|
-
# @private
|
50
|
-
# @return [Hash]
|
51
|
-
# Returns the parsed response from the provider
|
52
|
-
def parsed
|
53
|
-
@parsed ||= parse_respond_response(body)
|
54
|
-
end
|
55
|
-
end
|
56
|
-
end
|