llm.rb 0.7.2 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +93 -63
- data/lib/llm/{chat → bot}/builder.rb +1 -1
- data/lib/llm/bot/conversable.rb +31 -0
- data/lib/llm/{chat → bot}/prompt/completion.rb +14 -4
- data/lib/llm/{chat → bot}/prompt/respond.rb +16 -5
- data/lib/llm/{chat.rb → bot.rb} +48 -66
- data/lib/llm/buffer.rb +2 -2
- data/lib/llm/error.rb +24 -16
- data/lib/llm/event_handler.rb +44 -0
- data/lib/llm/eventstream/event.rb +69 -0
- data/lib/llm/eventstream/parser.rb +88 -0
- data/lib/llm/eventstream.rb +8 -0
- data/lib/llm/function.rb +9 -12
- data/lib/{json → llm/json}/schema/array.rb +1 -1
- data/lib/llm/message.rb +1 -1
- data/lib/llm/model.rb +1 -1
- data/lib/llm/object/builder.rb +38 -0
- data/lib/llm/object/kernel.rb +45 -0
- data/lib/llm/object.rb +77 -0
- data/lib/llm/provider.rb +68 -26
- data/lib/llm/providers/anthropic/error_handler.rb +3 -3
- data/lib/llm/providers/anthropic/models.rb +3 -7
- data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/anthropic/response_parser.rb +1 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +66 -0
- data/lib/llm/providers/anthropic.rb +9 -4
- data/lib/llm/providers/deepseek/format/completion_format.rb +68 -0
- data/lib/llm/providers/deepseek/format.rb +28 -0
- data/lib/llm/providers/deepseek.rb +60 -0
- data/lib/llm/providers/gemini/error_handler.rb +4 -4
- data/lib/llm/providers/gemini/files.rb +13 -16
- data/lib/llm/providers/gemini/images.rb +4 -8
- data/lib/llm/providers/gemini/models.rb +3 -7
- data/lib/llm/providers/gemini/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/gemini/stream_parser.rb +69 -0
- data/lib/llm/providers/gemini.rb +19 -11
- data/lib/llm/providers/llamacpp.rb +16 -2
- data/lib/llm/providers/ollama/error_handler.rb +3 -3
- data/lib/llm/providers/ollama/format/completion_format.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +3 -7
- data/lib/llm/providers/ollama/response_parser/completion_parser.rb +2 -2
- data/lib/llm/providers/ollama/stream_parser.rb +44 -0
- data/lib/llm/providers/ollama.rb +16 -9
- data/lib/llm/providers/openai/audio.rb +5 -9
- data/lib/llm/providers/openai/error_handler.rb +3 -3
- data/lib/llm/providers/openai/files.rb +15 -18
- data/lib/llm/providers/openai/format/moderation_format.rb +35 -0
- data/lib/llm/providers/openai/format.rb +3 -3
- data/lib/llm/providers/openai/images.rb +8 -11
- data/lib/llm/providers/openai/models.rb +3 -7
- data/lib/llm/providers/openai/moderations.rb +67 -0
- data/lib/llm/providers/openai/response_parser/completion_parser.rb +5 -5
- data/lib/llm/providers/openai/response_parser/respond_parser.rb +2 -2
- data/lib/llm/providers/openai/response_parser.rb +15 -0
- data/lib/llm/providers/openai/responses.rb +14 -16
- data/lib/llm/providers/openai/stream_parser.rb +77 -0
- data/lib/llm/providers/openai.rb +22 -7
- data/lib/llm/providers/voyageai/error_handler.rb +3 -3
- data/lib/llm/providers/voyageai.rb +1 -1
- data/lib/llm/response/filelist.rb +1 -1
- data/lib/llm/response/image.rb +1 -1
- data/lib/llm/response/modellist.rb +1 -1
- data/lib/llm/response/moderationlist/moderation.rb +47 -0
- data/lib/llm/response/moderationlist.rb +51 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +13 -4
- data/llm.gemspec +2 -2
- metadata +42 -28
- data/lib/llm/chat/conversable.rb +0 -53
- data/lib/llm/core_ext/ostruct.rb +0 -43
- /data/lib/{json → llm/json}/schema/boolean.rb +0 -0
- /data/lib/{json → llm/json}/schema/integer.rb +0 -0
- /data/lib/{json → llm/json}/schema/leaf.rb +0 -0
- /data/lib/{json → llm/json}/schema/null.rb +0 -0
- /data/lib/{json → llm/json}/schema/number.rb +0 -0
- /data/lib/{json → llm/json}/schema/object.rb +0 -0
- /data/lib/{json → llm/json}/schema/string.rb +0 -0
- /data/lib/{json → llm/json}/schema/version.rb +0 -0
- /data/lib/{json → llm/json}/schema.rb +0 -0
data/lib/llm/error.rb
CHANGED
@@ -8,26 +8,34 @@ module LLM
|
|
8
8
|
block_given? ? yield(self) : nil
|
9
9
|
super
|
10
10
|
end
|
11
|
+
end
|
11
12
|
|
13
|
+
##
|
14
|
+
# The superclass of all HTTP protocol errors
|
15
|
+
class ResponseError < Error
|
12
16
|
##
|
13
|
-
#
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
17
|
+
# @return [Net::HTTPResponse]
|
18
|
+
# Returns the response associated with an error
|
19
|
+
attr_accessor :response
|
20
|
+
|
21
|
+
def message
|
22
|
+
[super, response.body].join("\n")
|
19
23
|
end
|
24
|
+
end
|
20
25
|
|
21
|
-
|
22
|
-
|
23
|
-
|
26
|
+
##
|
27
|
+
# HTTPUnauthorized
|
28
|
+
UnauthorizedError = Class.new(ResponseError)
|
24
29
|
|
25
|
-
|
26
|
-
|
27
|
-
|
30
|
+
##
|
31
|
+
# HTTPTooManyRequests
|
32
|
+
RateLimitError = Class.new(ResponseError)
|
28
33
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
34
|
+
##
|
35
|
+
# When an given an input object that is not understood
|
36
|
+
FormatError = Class.new(Error)
|
37
|
+
|
38
|
+
##
|
39
|
+
# When given a prompt object that is not understood
|
40
|
+
PromptError = Class.new(FormatError)
|
33
41
|
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class EventHandler
|
7
|
+
##
|
8
|
+
# @param [#parse!] parser
|
9
|
+
# @return [LLM::EventHandler]
|
10
|
+
def initialize(parser)
|
11
|
+
@parser = parser
|
12
|
+
end
|
13
|
+
|
14
|
+
##
|
15
|
+
# "data:" event callback
|
16
|
+
# @param [LLM::EventStream::Event] event
|
17
|
+
# @return [void]
|
18
|
+
def on_data(event)
|
19
|
+
return if event.end?
|
20
|
+
chunk = JSON.parse(event.value)
|
21
|
+
@parser.parse!(chunk)
|
22
|
+
rescue JSON::ParserError
|
23
|
+
end
|
24
|
+
|
25
|
+
##
|
26
|
+
# Callback for when *any* of chunk of data
|
27
|
+
# is received, regardless of whether it has
|
28
|
+
# a field name or not. Primarily for ollama,
|
29
|
+
# which does emit Server-Sent Events (SSE).
|
30
|
+
# @param [LLM::EventStream::Event] event
|
31
|
+
# @return [void]
|
32
|
+
def on_chunk(event)
|
33
|
+
return if event.end?
|
34
|
+
chunk = JSON.parse(event.chunk)
|
35
|
+
@parser.parse!(chunk)
|
36
|
+
rescue JSON::ParserError
|
37
|
+
end
|
38
|
+
|
39
|
+
##
|
40
|
+
# Returns a fully constructed response body
|
41
|
+
# @return [LLM::Object]
|
42
|
+
def body = @parser.body
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM::EventStream
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class Event
|
7
|
+
FIELD_REGEXP = /[^:]+/
|
8
|
+
VALUE_REGEXP = /(?<=: ).+/
|
9
|
+
|
10
|
+
##
|
11
|
+
# Returns the field name
|
12
|
+
# @return [Symbol]
|
13
|
+
attr_reader :field
|
14
|
+
|
15
|
+
##
|
16
|
+
# Returns the field value
|
17
|
+
# @return [String]
|
18
|
+
attr_reader :value
|
19
|
+
|
20
|
+
##
|
21
|
+
# Returns the full chunk
|
22
|
+
# @return [String]
|
23
|
+
attr_reader :chunk
|
24
|
+
|
25
|
+
##
|
26
|
+
# @param [String] chunk
|
27
|
+
# @return [LLM::EventStream::Event]
|
28
|
+
def initialize(chunk)
|
29
|
+
@field = chunk[FIELD_REGEXP]
|
30
|
+
@value = chunk[VALUE_REGEXP]
|
31
|
+
@chunk = chunk
|
32
|
+
end
|
33
|
+
|
34
|
+
##
|
35
|
+
# Returns true when the event represents an "id" chunk
|
36
|
+
# @return [Boolean]
|
37
|
+
def id?
|
38
|
+
@field == "id"
|
39
|
+
end
|
40
|
+
|
41
|
+
##
|
42
|
+
# Returns true when the event represents a "data" chunk
|
43
|
+
# @return [Boolean]
|
44
|
+
def data?
|
45
|
+
@field == "data"
|
46
|
+
end
|
47
|
+
|
48
|
+
##
|
49
|
+
# Returns true when the event represents an "event" chunk
|
50
|
+
# @return [Boolean]
|
51
|
+
def event?
|
52
|
+
@field == "event"
|
53
|
+
end
|
54
|
+
|
55
|
+
##
|
56
|
+
# Returns true when the event represents a "retry" chunk
|
57
|
+
# @return [Boolean]
|
58
|
+
def retry?
|
59
|
+
@field == "retry"
|
60
|
+
end
|
61
|
+
|
62
|
+
##
|
63
|
+
# Returns true when a chunk represents the end of the stream
|
64
|
+
# @return [Boolean]
|
65
|
+
def end?
|
66
|
+
@value == "[DONE]"
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module LLM::EventStream
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class Parser
|
7
|
+
##
|
8
|
+
# @return [LLM::EventStream::Parser]
|
9
|
+
def initialize
|
10
|
+
@buffer = StringIO.new
|
11
|
+
@events = Hash.new { |h, k| h[k] = [] }
|
12
|
+
@offset = 0
|
13
|
+
@visitors = []
|
14
|
+
end
|
15
|
+
|
16
|
+
##
|
17
|
+
# Register a visitor
|
18
|
+
# @param [#on_data] visitor
|
19
|
+
# @return [void]
|
20
|
+
def register(visitor)
|
21
|
+
@visitors << visitor
|
22
|
+
end
|
23
|
+
|
24
|
+
##
|
25
|
+
# Subscribe to an event
|
26
|
+
# @param [Symbol] evtname
|
27
|
+
# @param [Proc] block
|
28
|
+
# @return [void]
|
29
|
+
def on(evtname, &block)
|
30
|
+
@events[evtname.to_s] << block
|
31
|
+
end
|
32
|
+
|
33
|
+
##
|
34
|
+
# Append an event to the internal buffer
|
35
|
+
# @return [void]
|
36
|
+
def <<(event)
|
37
|
+
io = StringIO.new(event)
|
38
|
+
IO.copy_stream io, @buffer
|
39
|
+
each_line { parse!(_1) }
|
40
|
+
end
|
41
|
+
|
42
|
+
##
|
43
|
+
# Returns the internal buffer
|
44
|
+
# @return [String]
|
45
|
+
def body
|
46
|
+
@buffer.string
|
47
|
+
end
|
48
|
+
|
49
|
+
##
|
50
|
+
# Free the internal buffer
|
51
|
+
# @return [void]
|
52
|
+
def free
|
53
|
+
@buffer.truncate(0)
|
54
|
+
@buffer.rewind
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
|
59
|
+
def parse!(event)
|
60
|
+
event = Event.new(event)
|
61
|
+
dispatch(event)
|
62
|
+
end
|
63
|
+
|
64
|
+
def dispatch(event)
|
65
|
+
@visitors.each { dispatch_visitor(_1, event) }
|
66
|
+
@events[event.field].each { _1.call(event) }
|
67
|
+
end
|
68
|
+
|
69
|
+
def dispatch_visitor(visitor, event)
|
70
|
+
method = "on_#{event.field}"
|
71
|
+
if visitor.respond_to?(method)
|
72
|
+
visitor.public_send(method, event)
|
73
|
+
elsif visitor.respond_to?("on_chunk")
|
74
|
+
visitor.on_chunk(event)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def each_line
|
79
|
+
string.each_line.with_index do
|
80
|
+
next if _2 < @offset
|
81
|
+
yield(_1)
|
82
|
+
@offset += 1
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def string = @buffer.string
|
87
|
+
end
|
88
|
+
end
|
data/lib/llm/function.rb
CHANGED
@@ -1,32 +1,29 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
##
|
4
|
-
# The {LLM::Function LLM::Function} class represents a
|
5
|
-
# be called by an LLM.
|
6
|
-
# or a Class-based function.
|
4
|
+
# The {LLM::Function LLM::Function} class represents a
|
5
|
+
# local function that can be called by an LLM.
|
7
6
|
#
|
8
|
-
# @example
|
9
|
-
# # Proc-based
|
7
|
+
# @example example #1
|
10
8
|
# LLM.function(:system) do |fn|
|
11
|
-
# fn.description "Runs system commands
|
9
|
+
# fn.description "Runs system commands"
|
12
10
|
# fn.params do |schema|
|
13
11
|
# schema.object(command: schema.string.required)
|
14
12
|
# end
|
15
13
|
# fn.define do |params|
|
16
|
-
# Kernel.system(params.command)
|
14
|
+
# {success: Kernel.system(params.command)}
|
17
15
|
# end
|
18
16
|
# end
|
19
17
|
#
|
20
|
-
# @example
|
21
|
-
# # Class-based
|
18
|
+
# @example example #2
|
22
19
|
# class System
|
23
20
|
# def call(params)
|
24
|
-
# Kernel.system(params.command)
|
21
|
+
# {success: Kernel.system(params.command)}
|
25
22
|
# end
|
26
23
|
# end
|
27
24
|
#
|
28
25
|
# LLM.function(:system) do |fn|
|
29
|
-
# fn.description "Runs system commands
|
26
|
+
# fn.description "Runs system commands"
|
30
27
|
# fn.params do |schema|
|
31
28
|
# schema.object(command: schema.string.required)
|
32
29
|
# end
|
@@ -99,7 +96,7 @@ class LLM::Function
|
|
99
96
|
# Returns a value that communicates that the function call was cancelled
|
100
97
|
# @example
|
101
98
|
# llm = LLM.openai(key: ENV["KEY"])
|
102
|
-
# bot = LLM::
|
99
|
+
# bot = LLM::Bot.new(llm, tools: [fn1, fn2])
|
103
100
|
# bot.chat "I want to run the functions"
|
104
101
|
# bot.chat bot.functions.map(&:cancel)
|
105
102
|
# @return [LLM::Function::Return]
|
data/lib/llm/message.rb
CHANGED
data/lib/llm/model.rb
CHANGED
@@ -4,7 +4,7 @@
|
|
4
4
|
# The {LLM::Model LLM::Model} class represents an LLM model that
|
5
5
|
# is available to use. Its properties are delegated to the underlying
|
6
6
|
# response body, and vary by provider.
|
7
|
-
class LLM::Model <
|
7
|
+
class LLM::Model < LLM::Object
|
8
8
|
##
|
9
9
|
# Returns a subclass of {LLM::Provider LLM::Provider}
|
10
10
|
# @return [LLM::Provider]
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Object
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
module Builder
|
7
|
+
##
|
8
|
+
# @example
|
9
|
+
# obj = LLM::Object.from_hash(person: {name: 'John'})
|
10
|
+
# obj.person.name # => 'John'
|
11
|
+
# obj.person.class # => LLM::Object
|
12
|
+
# @param [Hash, LLM::Object, Array] obj
|
13
|
+
# A Hash object
|
14
|
+
# @return [LLM::Object]
|
15
|
+
# An LLM::Object object initialized by visiting `obj` with recursion
|
16
|
+
def from_hash(obj)
|
17
|
+
case obj
|
18
|
+
when self then from_hash(obj.to_h)
|
19
|
+
when Array then obj.map { |v| from_hash(v) }
|
20
|
+
else
|
21
|
+
visited = {}
|
22
|
+
obj.each { visited[_1] = visit(_2) }
|
23
|
+
new(visited)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def visit(value)
|
30
|
+
case value
|
31
|
+
when self then from_hash(value.to_h)
|
32
|
+
when Hash then from_hash(value)
|
33
|
+
when Array then value.map { |v| visit(v) }
|
34
|
+
else value
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::Object
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
module Kernel
|
7
|
+
def tap(...)
|
8
|
+
::Kernel.instance_method(:tap).bind(self).call(...)
|
9
|
+
end
|
10
|
+
|
11
|
+
def instance_of?(...)
|
12
|
+
::Kernel.instance_method(:instance_of?).bind(self).call(...)
|
13
|
+
end
|
14
|
+
|
15
|
+
def method(...)
|
16
|
+
::Kernel.instance_method(:method).bind(self).call(...)
|
17
|
+
end
|
18
|
+
|
19
|
+
def kind_of?(...)
|
20
|
+
::Kernel.instance_method(:kind_of?).bind(self).call(...)
|
21
|
+
end
|
22
|
+
alias_method :is_a?, :kind_of?
|
23
|
+
|
24
|
+
def respond_to?(m, include_private = false)
|
25
|
+
@h.key?(m.to_sym) || self.class.instance_methods.include?(m) || super
|
26
|
+
end
|
27
|
+
|
28
|
+
def respond_to_missing?(m, include_private = false)
|
29
|
+
@h.key?(m.to_sym) || super
|
30
|
+
end
|
31
|
+
|
32
|
+
def object_id
|
33
|
+
::Kernel.instance_method(:object_id).bind(self).call
|
34
|
+
end
|
35
|
+
|
36
|
+
def class
|
37
|
+
::Kernel.instance_method(:class).bind(self).call
|
38
|
+
end
|
39
|
+
|
40
|
+
def inspect
|
41
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} properties=#{to_h.inspect}>"
|
42
|
+
end
|
43
|
+
alias_method :to_s, :inspect
|
44
|
+
end
|
45
|
+
end
|
data/lib/llm/object.rb
ADDED
@@ -0,0 +1,77 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
##
|
4
|
+
# The {LLM::Object LLM::Object} class encapsulates a Hash object, and it
|
5
|
+
# allows a consumer to get and set Hash keys via regular methods. It is
|
6
|
+
# similar in spirit to OpenStruct, and it was introduced after OpenStruct
|
7
|
+
# became a bundled gem (and not a default gem) in Ruby 3.5.
|
8
|
+
class LLM::Object < BasicObject
|
9
|
+
require_relative "object/builder"
|
10
|
+
require_relative "object/kernel"
|
11
|
+
|
12
|
+
extend Builder
|
13
|
+
include Kernel
|
14
|
+
include ::Enumerable
|
15
|
+
defined?(::PP) ? include(::PP::ObjectMixin) : nil
|
16
|
+
|
17
|
+
##
|
18
|
+
# @param [Hash] h
|
19
|
+
# @return [LLM::Object]
|
20
|
+
def initialize(h = {})
|
21
|
+
@h = h.transform_keys(&:to_sym) || h
|
22
|
+
end
|
23
|
+
|
24
|
+
##
|
25
|
+
# Yields a key|value pair to a block.
|
26
|
+
# @yieldparam [Symbol] k
|
27
|
+
# @yieldparam [Object] v
|
28
|
+
# @return [void]
|
29
|
+
def each(&)
|
30
|
+
@h.each(&)
|
31
|
+
end
|
32
|
+
|
33
|
+
##
|
34
|
+
# @param [Symbol, #to_sym] k
|
35
|
+
# @return [Object]
|
36
|
+
def [](k)
|
37
|
+
@h[k.to_sym]
|
38
|
+
end
|
39
|
+
|
40
|
+
##
|
41
|
+
# @param [Symbol, #to_sym] k
|
42
|
+
# @param [Object] v
|
43
|
+
# @return [void]
|
44
|
+
def []=(k, v)
|
45
|
+
@h[k.to_sym] = v
|
46
|
+
end
|
47
|
+
|
48
|
+
##
|
49
|
+
# @return [String]
|
50
|
+
def to_json(...)
|
51
|
+
to_h.to_json(...)
|
52
|
+
end
|
53
|
+
|
54
|
+
##
|
55
|
+
# @return [Boolean]
|
56
|
+
def empty?
|
57
|
+
@h.empty?
|
58
|
+
end
|
59
|
+
|
60
|
+
##
|
61
|
+
# @return [Hash]
|
62
|
+
def to_h
|
63
|
+
@h
|
64
|
+
end
|
65
|
+
|
66
|
+
private
|
67
|
+
|
68
|
+
def method_missing(m, *args, &b)
|
69
|
+
if m.to_s.end_with?("=")
|
70
|
+
@h[m[0..-2].to_sym] = args.first
|
71
|
+
elsif @h.key?(m)
|
72
|
+
@h[m]
|
73
|
+
else
|
74
|
+
nil
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
data/lib/llm/provider.rb
CHANGED
@@ -21,10 +21,9 @@ class LLM::Provider
|
|
21
21
|
# Whether to use SSL for the connection
|
22
22
|
def initialize(key:, host:, port: 443, timeout: 60, ssl: true)
|
23
23
|
@key = key
|
24
|
-
@
|
25
|
-
|
26
|
-
|
27
|
-
end
|
24
|
+
@client = Net::HTTP.new(host, port)
|
25
|
+
@client.use_ssl = ssl
|
26
|
+
@client.read_timeout = timeout
|
28
27
|
end
|
29
28
|
|
30
29
|
##
|
@@ -78,55 +77,55 @@ class LLM::Provider
|
|
78
77
|
# Starts a new lazy chat powered by the chat completions API
|
79
78
|
# @note
|
80
79
|
# This method creates a lazy version of a
|
81
|
-
# {LLM::
|
80
|
+
# {LLM::Bot LLM::Bot} object.
|
82
81
|
# @param prompt (see LLM::Provider#complete)
|
83
82
|
# @param params (see LLM::Provider#complete)
|
84
|
-
# @return [LLM::
|
83
|
+
# @return [LLM::Bot]
|
85
84
|
def chat(prompt, params = {})
|
86
85
|
role = params.delete(:role)
|
87
|
-
LLM::
|
86
|
+
LLM::Bot.new(self, params).chat(prompt, role:)
|
88
87
|
end
|
89
88
|
|
90
89
|
##
|
91
90
|
# Starts a new chat powered by the chat completions API
|
92
91
|
# @note
|
93
92
|
# This method creates a non-lazy version of a
|
94
|
-
# {LLM::
|
93
|
+
# {LLM::Bot LLM::Bot} object.
|
95
94
|
# @param prompt (see LLM::Provider#complete)
|
96
95
|
# @param params (see LLM::Provider#complete)
|
97
96
|
# @raise (see LLM::Provider#complete)
|
98
|
-
# @return [LLM::
|
97
|
+
# @return [LLM::Bot]
|
99
98
|
def chat!(prompt, params = {})
|
100
99
|
role = params.delete(:role)
|
101
|
-
LLM::
|
100
|
+
LLM::Bot.new(self, params).chat(prompt, role:)
|
102
101
|
end
|
103
102
|
|
104
103
|
##
|
105
104
|
# Starts a new lazy chat powered by the responses API
|
106
105
|
# @note
|
107
106
|
# This method creates a lazy variant of a
|
108
|
-
# {LLM::
|
107
|
+
# {LLM::Bot LLM::Bot} object.
|
109
108
|
# @param prompt (see LLM::Provider#complete)
|
110
109
|
# @param params (see LLM::Provider#complete)
|
111
110
|
# @raise (see LLM::Provider#complete)
|
112
|
-
# @return [LLM::
|
111
|
+
# @return [LLM::Bot]
|
113
112
|
def respond(prompt, params = {})
|
114
113
|
role = params.delete(:role)
|
115
|
-
LLM::
|
114
|
+
LLM::Bot.new(self, params).respond(prompt, role:)
|
116
115
|
end
|
117
116
|
|
118
117
|
##
|
119
118
|
# Starts a new chat powered by the responses API
|
120
119
|
# @note
|
121
120
|
# This method creates a non-lazy variant of a
|
122
|
-
# {LLM::
|
121
|
+
# {LLM::Bot LLM::Bot} object.
|
123
122
|
# @param prompt (see LLM::Provider#complete)
|
124
123
|
# @param params (see LLM::Provider#complete)
|
125
124
|
# @raise (see LLM::Provider#complete)
|
126
|
-
# @return [LLM::
|
125
|
+
# @return [LLM::Bot]
|
127
126
|
def respond!(prompt, params = {})
|
128
127
|
role = params.delete(:role)
|
129
|
-
LLM::
|
128
|
+
LLM::Bot.new(self, params).respond(prompt, role:)
|
130
129
|
end
|
131
130
|
|
132
131
|
##
|
@@ -168,6 +167,13 @@ class LLM::Provider
|
|
168
167
|
raise NotImplementedError
|
169
168
|
end
|
170
169
|
|
170
|
+
##
|
171
|
+
# @return [LLM::OpenAI::Moderations]
|
172
|
+
# Returns an interface to the moderations API
|
173
|
+
def moderations
|
174
|
+
raise NotImplementedError
|
175
|
+
end
|
176
|
+
|
171
177
|
##
|
172
178
|
# @return [String]
|
173
179
|
# Returns the role of the assistant in the conversation.
|
@@ -187,10 +193,7 @@ class LLM::Provider
|
|
187
193
|
# Returns an object that can generate a JSON schema
|
188
194
|
# @return [JSON::Schema]
|
189
195
|
def schema
|
190
|
-
@schema ||=
|
191
|
-
require_relative "../json/schema"
|
192
|
-
JSON::Schema.new
|
193
|
-
end
|
196
|
+
@schema ||= JSON::Schema.new
|
194
197
|
end
|
195
198
|
|
196
199
|
##
|
@@ -209,6 +212,8 @@ class LLM::Provider
|
|
209
212
|
|
210
213
|
private
|
211
214
|
|
215
|
+
attr_reader :client
|
216
|
+
|
212
217
|
##
|
213
218
|
# The headers to include with a request
|
214
219
|
# @raise [NotImplementedError]
|
@@ -236,10 +241,21 @@ class LLM::Provider
|
|
236
241
|
end
|
237
242
|
|
238
243
|
##
|
239
|
-
#
|
240
|
-
|
241
|
-
|
242
|
-
|
244
|
+
# @return [Class]
|
245
|
+
def event_handler
|
246
|
+
LLM::EventHandler
|
247
|
+
end
|
248
|
+
|
249
|
+
##
|
250
|
+
# @return [Class]
|
251
|
+
# Returns the provider-specific Server-Side Events (SSE) parser
|
252
|
+
def stream_parser
|
253
|
+
raise NotImplementedError
|
254
|
+
end
|
255
|
+
|
256
|
+
##
|
257
|
+
# Executes a HTTP request
|
258
|
+
# @param [Net::HTTPRequest] request
|
243
259
|
# The request to send
|
244
260
|
# @param [Proc] b
|
245
261
|
# A block to yield the response to (optional)
|
@@ -253,8 +269,34 @@ class LLM::Provider
|
|
253
269
|
# When any other unsuccessful status code is returned
|
254
270
|
# @raise [SystemCallError]
|
255
271
|
# When there is a network error at the operating system level
|
256
|
-
|
257
|
-
|
272
|
+
# @return [Net::HTTPResponse]
|
273
|
+
def execute(request:, stream: nil, &b)
|
274
|
+
res = if stream
|
275
|
+
client.request(request) do |res|
|
276
|
+
handler = event_handler.new stream_parser.new(stream)
|
277
|
+
parser = LLM::EventStream::Parser.new
|
278
|
+
parser.register(handler)
|
279
|
+
res.read_body(parser)
|
280
|
+
# If the handler body is empty, it means the
|
281
|
+
# response was most likely not streamed or
|
282
|
+
# parsing has failed. In that case, we fallback
|
283
|
+
# on the original response body.
|
284
|
+
res.body = handler.body.empty? ? parser.body.dup : handler.body
|
285
|
+
ensure
|
286
|
+
parser&.free
|
287
|
+
end
|
288
|
+
else
|
289
|
+
client.request(request, &b)
|
290
|
+
end
|
291
|
+
handle_response(res)
|
292
|
+
end
|
293
|
+
|
294
|
+
##
|
295
|
+
# Handles the response from a request
|
296
|
+
# @param [Net::HTTPResponse] res
|
297
|
+
# The response to handle
|
298
|
+
# @return [Net::HTTPResponse]
|
299
|
+
def handle_response(res)
|
258
300
|
case res
|
259
301
|
when Net::HTTPOK then res
|
260
302
|
else error_handler.new(res).raise_error!
|
@@ -23,11 +23,11 @@ class LLM::Anthropic
|
|
23
23
|
def raise_error!
|
24
24
|
case res
|
25
25
|
when Net::HTTPUnauthorized
|
26
|
-
raise LLM::
|
26
|
+
raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
|
27
27
|
when Net::HTTPTooManyRequests
|
28
|
-
raise LLM::
|
28
|
+
raise LLM::RateLimitError.new { _1.response = res }, "Too many requests"
|
29
29
|
else
|
30
|
-
raise LLM::
|
30
|
+
raise LLM::ResponseError.new { _1.response = res }, "Unexpected response"
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end
|