llm.rb 4.15.0 → 4.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +22 -0
- data/README.md +34 -12
- data/data/anthropic.json +218 -198
- data/data/deepseek.json +1 -1
- data/data/google.json +481 -429
- data/data/openai.json +742 -704
- data/data/xai.json +277 -277
- data/data/zai.json +160 -126
- data/lib/llm/active_record/acts_as_llm.rb +238 -0
- data/lib/llm/active_record.rb +3 -0
- data/lib/llm/provider.rb +16 -1
- data/lib/llm/providers/openai/audio.rb +4 -4
- data/lib/llm/providers/openai/files.rb +6 -6
- data/lib/llm/providers/openai/images.rb +4 -4
- data/lib/llm/providers/openai/models.rb +2 -2
- data/lib/llm/providers/openai/moderations.rb +2 -2
- data/lib/llm/providers/openai/responses.rb +4 -4
- data/lib/llm/providers/openai/vector_stores.rb +12 -12
- data/lib/llm/providers/openai.rb +4 -4
- data/lib/llm/version.rb +1 -1
- metadata +3 -1
data/lib/llm/provider.rb
CHANGED
|
@@ -22,15 +22,18 @@ class LLM::Provider
|
|
|
22
22
|
# The number of seconds to wait for a response
|
|
23
23
|
# @param [Boolean] ssl
|
|
24
24
|
# Whether to use SSL for the connection
|
|
25
|
+
# @param [String] base_path
|
|
26
|
+
# Optional base path prefix for HTTP API routes.
|
|
25
27
|
# @param [Boolean] persistent
|
|
26
28
|
# Whether to use a persistent connection.
|
|
27
29
|
# Requires the net-http-persistent gem.
|
|
28
|
-
def initialize(key:, host:, port: 443, timeout: 60, ssl: true, persistent: false)
|
|
30
|
+
def initialize(key:, host:, port: 443, timeout: 60, ssl: true, base_path: "", persistent: false)
|
|
29
31
|
@key = key
|
|
30
32
|
@host = host
|
|
31
33
|
@port = port
|
|
32
34
|
@timeout = timeout
|
|
33
35
|
@ssl = ssl
|
|
36
|
+
@base_path = normalize_base_path(base_path)
|
|
34
37
|
@base_uri = URI("#{ssl ? "https" : "http"}://#{host}:#{port}/")
|
|
35
38
|
@headers = {"User-Agent" => "llm.rb v#{LLM::VERSION}"}
|
|
36
39
|
@transport = Transport::HTTP.new(host:, port:, timeout:, ssl:, persistent:)
|
|
@@ -330,6 +333,18 @@ class LLM::Provider
|
|
|
330
333
|
|
|
331
334
|
private
|
|
332
335
|
|
|
336
|
+
def path(suffix)
|
|
337
|
+
return suffix if @base_path.empty?
|
|
338
|
+
"#{@base_path}#{suffix}"
|
|
339
|
+
end
|
|
340
|
+
|
|
341
|
+
def normalize_base_path(path)
|
|
342
|
+
path = path.to_s.strip
|
|
343
|
+
return "" if path.empty? || path == "/"
|
|
344
|
+
path = "/#{path}" unless path.start_with?("/")
|
|
345
|
+
path.sub(%r{/+\z}, "")
|
|
346
|
+
end
|
|
347
|
+
|
|
333
348
|
attr_reader :base_uri, :host, :port, :timeout, :ssl, :transport
|
|
334
349
|
|
|
335
350
|
##
|
|
@@ -32,7 +32,7 @@ class LLM::OpenAI
|
|
|
32
32
|
# @raise (see LLM::Provider#request)
|
|
33
33
|
# @return [LLM::Response]
|
|
34
34
|
def create_speech(input:, voice: "alloy", model: "gpt-4o-mini-tts", response_format: "mp3", **params)
|
|
35
|
-
req = Net::HTTP::Post.new("/
|
|
35
|
+
req = Net::HTTP::Post.new(path("/audio/speech"), headers)
|
|
36
36
|
req.body = LLM.json.dump({input:, voice:, model:, response_format:}.merge!(params))
|
|
37
37
|
io = StringIO.new("".b)
|
|
38
38
|
res, span, tracer = execute(request: req, operation: "request") { _1.read_body { |chunk| io << chunk } }
|
|
@@ -55,7 +55,7 @@ class LLM::OpenAI
|
|
|
55
55
|
# @return [LLM::Response]
|
|
56
56
|
def create_transcription(file:, model: "whisper-1", **params)
|
|
57
57
|
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
|
|
58
|
-
req = Net::HTTP::Post.new("/
|
|
58
|
+
req = Net::HTTP::Post.new(path("/audio/transcriptions"), headers)
|
|
59
59
|
req["content-type"] = multi.content_type
|
|
60
60
|
set_body_stream(req, multi.body)
|
|
61
61
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -79,7 +79,7 @@ class LLM::OpenAI
|
|
|
79
79
|
# @return [LLM::Response]
|
|
80
80
|
def create_translation(file:, model: "whisper-1", **params)
|
|
81
81
|
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
|
|
82
|
-
req = Net::HTTP::Post.new("/
|
|
82
|
+
req = Net::HTTP::Post.new(path("/audio/translations"), headers)
|
|
83
83
|
req["content-type"] = multi.content_type
|
|
84
84
|
set_body_stream(req, multi.body)
|
|
85
85
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -90,7 +90,7 @@ class LLM::OpenAI
|
|
|
90
90
|
|
|
91
91
|
private
|
|
92
92
|
|
|
93
|
-
[:headers, :execute, :set_body_stream].each do |m|
|
|
93
|
+
[:path, :headers, :execute, :set_body_stream].each do |m|
|
|
94
94
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
95
95
|
end
|
|
96
96
|
end
|
|
@@ -40,7 +40,7 @@ class LLM::OpenAI
|
|
|
40
40
|
# @return [LLM::Response]
|
|
41
41
|
def all(**params)
|
|
42
42
|
query = URI.encode_www_form(params)
|
|
43
|
-
req = Net::HTTP::Get.new("/
|
|
43
|
+
req = Net::HTTP::Get.new(path("/files?#{query}"), headers)
|
|
44
44
|
res, span, tracer = execute(request: req, operation: "request")
|
|
45
45
|
res = ResponseAdapter.adapt(res, type: :enumerable)
|
|
46
46
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -60,7 +60,7 @@ class LLM::OpenAI
|
|
|
60
60
|
# @return [LLM::Response]
|
|
61
61
|
def create(file:, purpose: "assistants", **params)
|
|
62
62
|
multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), purpose:))
|
|
63
|
-
req = Net::HTTP::Post.new("/
|
|
63
|
+
req = Net::HTTP::Post.new(path("/files"), headers)
|
|
64
64
|
req["content-type"] = multi.content_type
|
|
65
65
|
set_body_stream(req, multi.body)
|
|
66
66
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -83,7 +83,7 @@ class LLM::OpenAI
|
|
|
83
83
|
def get(file:, **params)
|
|
84
84
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
85
85
|
query = URI.encode_www_form(params)
|
|
86
|
-
req = Net::HTTP::Get.new("/
|
|
86
|
+
req = Net::HTTP::Get.new(path("/files/#{file_id}?#{query}"), headers)
|
|
87
87
|
res, span, tracer = execute(request: req, operation: "request")
|
|
88
88
|
res = ResponseAdapter.adapt(res, type: :file)
|
|
89
89
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -105,7 +105,7 @@ class LLM::OpenAI
|
|
|
105
105
|
def download(file:, **params)
|
|
106
106
|
query = URI.encode_www_form(params)
|
|
107
107
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
108
|
-
req = Net::HTTP::Get.new("/
|
|
108
|
+
req = Net::HTTP::Get.new(path("/files/#{file_id}/content?#{query}"), headers)
|
|
109
109
|
io = StringIO.new("".b)
|
|
110
110
|
res, span, tracer = execute(request: req, operation: "request") { |res| res.read_body { |chunk| io << chunk } }
|
|
111
111
|
res = LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
|
|
@@ -125,7 +125,7 @@ class LLM::OpenAI
|
|
|
125
125
|
# @return [LLM::Response]
|
|
126
126
|
def delete(file:)
|
|
127
127
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
128
|
-
req = Net::HTTP::Delete.new("/
|
|
128
|
+
req = Net::HTTP::Delete.new(path("/files/#{file_id}"), headers)
|
|
129
129
|
res, span, tracer = execute(request: req, operation: "request")
|
|
130
130
|
res = LLM::Response.new(res)
|
|
131
131
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -134,7 +134,7 @@ class LLM::OpenAI
|
|
|
134
134
|
|
|
135
135
|
private
|
|
136
136
|
|
|
137
|
-
[:headers, :execute, :set_body_stream].each do |m|
|
|
137
|
+
[:path, :headers, :execute, :set_body_stream].each do |m|
|
|
138
138
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
139
139
|
end
|
|
140
140
|
end
|
|
@@ -50,7 +50,7 @@ class LLM::OpenAI
|
|
|
50
50
|
# @raise (see LLM::Provider#request)
|
|
51
51
|
# @return [LLM::Response]
|
|
52
52
|
def create(prompt:, model: "dall-e-3", response_format: "b64_json", **params)
|
|
53
|
-
req = Net::HTTP::Post.new("/
|
|
53
|
+
req = Net::HTTP::Post.new(path("/images/generations"), headers)
|
|
54
54
|
req.body = LLM.json.dump({prompt:, n: 1, model:, response_format:}.merge!(params))
|
|
55
55
|
res, span, tracer = execute(request: req, operation: "request")
|
|
56
56
|
res = ResponseAdapter.adapt(res, type: :image)
|
|
@@ -76,7 +76,7 @@ class LLM::OpenAI
|
|
|
76
76
|
def create_variation(image:, model: "dall-e-2", response_format: "b64_json", **params)
|
|
77
77
|
image = LLM.File(image)
|
|
78
78
|
multi = LLM::Multipart.new(params.merge!(image:, model:, response_format:))
|
|
79
|
-
req = Net::HTTP::Post.new("/
|
|
79
|
+
req = Net::HTTP::Post.new(path("/images/variations"), headers)
|
|
80
80
|
req["content-type"] = multi.content_type
|
|
81
81
|
set_body_stream(req, multi.body)
|
|
82
82
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -102,7 +102,7 @@ class LLM::OpenAI
|
|
|
102
102
|
def edit(image:, prompt:, model: "dall-e-2", response_format: "b64_json", **params)
|
|
103
103
|
image = LLM.File(image)
|
|
104
104
|
multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:, response_format:))
|
|
105
|
-
req = Net::HTTP::Post.new("/
|
|
105
|
+
req = Net::HTTP::Post.new(path("/images/edits"), headers)
|
|
106
106
|
req["content-type"] = multi.content_type
|
|
107
107
|
set_body_stream(req, multi.body)
|
|
108
108
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -113,7 +113,7 @@ class LLM::OpenAI
|
|
|
113
113
|
|
|
114
114
|
private
|
|
115
115
|
|
|
116
|
-
[:headers, :execute, :set_body_stream].each do |m|
|
|
116
|
+
[:path, :headers, :execute, :set_body_stream].each do |m|
|
|
117
117
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
118
118
|
end
|
|
119
119
|
end
|
|
@@ -39,7 +39,7 @@ class LLM::OpenAI
|
|
|
39
39
|
# @return [LLM::Response]
|
|
40
40
|
def all(**params)
|
|
41
41
|
query = URI.encode_www_form(params)
|
|
42
|
-
req = Net::HTTP::Get.new("/
|
|
42
|
+
req = Net::HTTP::Get.new(path("/models?#{query}"), headers)
|
|
43
43
|
res, span, tracer = execute(request: req, operation: "request")
|
|
44
44
|
res = ResponseAdapter.adapt(res, type: :models)
|
|
45
45
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -48,7 +48,7 @@ class LLM::OpenAI
|
|
|
48
48
|
|
|
49
49
|
private
|
|
50
50
|
|
|
51
|
-
[:headers, :execute, :set_body_stream].each do |m|
|
|
51
|
+
[:path, :headers, :execute, :set_body_stream].each do |m|
|
|
52
52
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
53
53
|
end
|
|
54
54
|
end
|
|
@@ -47,7 +47,7 @@ class LLM::OpenAI
|
|
|
47
47
|
# @param [String, LLM::Model] model The model to use
|
|
48
48
|
# @return [LLM::Response]
|
|
49
49
|
def create(input:, model: "omni-moderation-latest", **params)
|
|
50
|
-
req = Net::HTTP::Post.new("/
|
|
50
|
+
req = Net::HTTP::Post.new(path("/moderations"), headers)
|
|
51
51
|
input = RequestAdapter::Moderation.new(input).adapt
|
|
52
52
|
req.body = LLM.json.dump({input:, model:}.merge!(params))
|
|
53
53
|
res, span, tracer = execute(request: req, operation: "request")
|
|
@@ -58,7 +58,7 @@ class LLM::OpenAI
|
|
|
58
58
|
|
|
59
59
|
private
|
|
60
60
|
|
|
61
|
-
[:headers, :execute].each do |m|
|
|
61
|
+
[:path, :headers, :execute].each do |m|
|
|
62
62
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
63
63
|
end
|
|
64
64
|
end
|
|
@@ -40,7 +40,7 @@ class LLM::OpenAI
|
|
|
40
40
|
params = [params, adapt_schema(params), adapt_tools(tools)].inject({}, &:merge!).compact
|
|
41
41
|
role, stream = params.delete(:role), params.delete(:stream)
|
|
42
42
|
params[:stream] = true if @provider.streamable?(stream) || stream == true
|
|
43
|
-
req = Net::HTTP::Post.new("/
|
|
43
|
+
req = Net::HTTP::Post.new(path("/responses"), headers)
|
|
44
44
|
messages = build_complete_messages(prompt, params, role)
|
|
45
45
|
@provider.tracer.set_request_metadata(user_input: extract_user_input(messages, fallback: prompt))
|
|
46
46
|
body = LLM.json.dump({input: [adapt(messages, mode: :response)].flatten}.merge!(params))
|
|
@@ -61,7 +61,7 @@ class LLM::OpenAI
|
|
|
61
61
|
def get(response, **params)
|
|
62
62
|
response_id = response.respond_to?(:id) ? response.id : response
|
|
63
63
|
query = URI.encode_www_form(params)
|
|
64
|
-
req = Net::HTTP::Get.new("/
|
|
64
|
+
req = Net::HTTP::Get.new(path("/responses/#{response_id}?#{query}"), headers)
|
|
65
65
|
res, span, tracer = execute(request: req, operation: "request")
|
|
66
66
|
res = ResponseAdapter.adapt(res, type: :responds)
|
|
67
67
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -76,7 +76,7 @@ class LLM::OpenAI
|
|
|
76
76
|
# @return [LLM::Object] Response body
|
|
77
77
|
def delete(response)
|
|
78
78
|
response_id = response.respond_to?(:id) ? response.id : response
|
|
79
|
-
req = Net::HTTP::Delete.new("/
|
|
79
|
+
req = Net::HTTP::Delete.new(path("/responses/#{response_id}"), headers)
|
|
80
80
|
res, span, tracer = execute(request: req, operation: "request")
|
|
81
81
|
res = LLM::Response.new(res)
|
|
82
82
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -85,7 +85,7 @@ class LLM::OpenAI
|
|
|
85
85
|
|
|
86
86
|
private
|
|
87
87
|
|
|
88
|
-
[:headers, :execute, :set_body_stream, :resolve_tools].each do |m|
|
|
88
|
+
[:path, :headers, :execute, :set_body_stream, :resolve_tools].each do |m|
|
|
89
89
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
90
90
|
end
|
|
91
91
|
|
|
@@ -31,7 +31,7 @@ class LLM::OpenAI
|
|
|
31
31
|
# @return [LLM::Response]
|
|
32
32
|
def all(**params)
|
|
33
33
|
query = URI.encode_www_form(params)
|
|
34
|
-
req = Net::HTTP::Get.new("/
|
|
34
|
+
req = Net::HTTP::Get.new(path("/vector_stores?#{query}"), headers)
|
|
35
35
|
res, span, tracer = execute(request: req, operation: "request")
|
|
36
36
|
res = ResponseAdapter.adapt(res, type: :enumerable)
|
|
37
37
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -47,7 +47,7 @@ class LLM::OpenAI
|
|
|
47
47
|
# @return [LLM::Response]
|
|
48
48
|
# @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
|
|
49
49
|
def create(name:, file_ids: nil, **params)
|
|
50
|
-
req = Net::HTTP::Post.new("/
|
|
50
|
+
req = Net::HTTP::Post.new(path("/vector_stores"), headers)
|
|
51
51
|
req.body = LLM.json.dump(params.merge({name:, file_ids:}).compact)
|
|
52
52
|
res, span, tracer = execute(request: req, operation: "request")
|
|
53
53
|
res = LLM::Response.new(res)
|
|
@@ -72,7 +72,7 @@ class LLM::OpenAI
|
|
|
72
72
|
# @see https://platform.openai.com/docs/api-reference/vector_stores/retrieve OpenAI docs
|
|
73
73
|
def get(vector:)
|
|
74
74
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
75
|
-
req = Net::HTTP::Get.new("/
|
|
75
|
+
req = Net::HTTP::Get.new(path("/vector_stores/#{vector_id}"), headers)
|
|
76
76
|
res, span, tracer = execute(request: req, operation: "request")
|
|
77
77
|
res = LLM::Response.new(res)
|
|
78
78
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -89,7 +89,7 @@ class LLM::OpenAI
|
|
|
89
89
|
# @see https://platform.openai.com/docs/api-reference/vector_stores/modify OpenAI docs
|
|
90
90
|
def modify(vector:, name: nil, **params)
|
|
91
91
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
92
|
-
req = Net::HTTP::Post.new("/
|
|
92
|
+
req = Net::HTTP::Post.new(path("/vector_stores/#{vector_id}"), headers)
|
|
93
93
|
req.body = LLM.json.dump(params.merge({name:}).compact)
|
|
94
94
|
res, span, tracer = execute(request: req, operation: "request")
|
|
95
95
|
res = LLM::Response.new(res)
|
|
@@ -105,7 +105,7 @@ class LLM::OpenAI
|
|
|
105
105
|
# @see https://platform.openai.com/docs/api-reference/vector_stores/delete OpenAI docs
|
|
106
106
|
def delete(vector:)
|
|
107
107
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
108
|
-
req = Net::HTTP::Delete.new("/
|
|
108
|
+
req = Net::HTTP::Delete.new(path("/vector_stores/#{vector_id}"), headers)
|
|
109
109
|
res, span, tracer = execute(request: req, operation: "request")
|
|
110
110
|
res = LLM::Response.new(res)
|
|
111
111
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -122,7 +122,7 @@ class LLM::OpenAI
|
|
|
122
122
|
# @see https://platform.openai.com/docs/api-reference/vector_stores/search OpenAI docs
|
|
123
123
|
def search(vector:, query:, **params)
|
|
124
124
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
125
|
-
req = Net::HTTP::Post.new("/
|
|
125
|
+
req = Net::HTTP::Post.new(path("/vector_stores/#{vector_id}/search"), headers)
|
|
126
126
|
req.body = LLM.json.dump(params.merge({query:}).compact)
|
|
127
127
|
res, span, tracer = execute(request: req, operation: "retrieval")
|
|
128
128
|
res = ResponseAdapter.adapt(res, type: :enumerable)
|
|
@@ -140,7 +140,7 @@ class LLM::OpenAI
|
|
|
140
140
|
def all_files(vector:, **params)
|
|
141
141
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
142
142
|
query = URI.encode_www_form(params)
|
|
143
|
-
req = Net::HTTP::Get.new("/
|
|
143
|
+
req = Net::HTTP::Get.new(path("/vector_stores/#{vector_id}/files?#{query}"), headers)
|
|
144
144
|
res, span, tracer = execute(request: req, operation: "request")
|
|
145
145
|
res = ResponseAdapter.adapt(res, type: :enumerable)
|
|
146
146
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -159,7 +159,7 @@ class LLM::OpenAI
|
|
|
159
159
|
def add_file(vector:, file:, attributes: nil, **params)
|
|
160
160
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
161
161
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
162
|
-
req = Net::HTTP::Post.new("/
|
|
162
|
+
req = Net::HTTP::Post.new(path("/vector_stores/#{vector_id}/files"), headers)
|
|
163
163
|
req.body = LLM.json.dump(params.merge({file_id:, attributes:}).compact)
|
|
164
164
|
res, span, tracer = execute(request: req, operation: "request")
|
|
165
165
|
res = LLM::Response.new(res)
|
|
@@ -190,7 +190,7 @@ class LLM::OpenAI
|
|
|
190
190
|
def update_file(vector:, file:, attributes:, **params)
|
|
191
191
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
192
192
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
193
|
-
req = Net::HTTP::Post.new("/
|
|
193
|
+
req = Net::HTTP::Post.new(path("/vector_stores/#{vector_id}/files/#{file_id}"), headers)
|
|
194
194
|
req.body = LLM.json.dump(params.merge({attributes:}).compact)
|
|
195
195
|
res, span, tracer = execute(request: req, operation: "request")
|
|
196
196
|
res = LLM::Response.new(res)
|
|
@@ -209,7 +209,7 @@ class LLM::OpenAI
|
|
|
209
209
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
210
210
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
211
211
|
query = URI.encode_www_form(params)
|
|
212
|
-
req = Net::HTTP::Get.new("/
|
|
212
|
+
req = Net::HTTP::Get.new(path("/vector_stores/#{vector_id}/files/#{file_id}?#{query}"), headers)
|
|
213
213
|
res, span, tracer = execute(request: req, operation: "request")
|
|
214
214
|
res = LLM::Response.new(res)
|
|
215
215
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -226,7 +226,7 @@ class LLM::OpenAI
|
|
|
226
226
|
def delete_file(vector:, file:)
|
|
227
227
|
vector_id = vector.respond_to?(:id) ? vector.id : vector
|
|
228
228
|
file_id = file.respond_to?(:id) ? file.id : file
|
|
229
|
-
req = Net::HTTP::Delete.new("/
|
|
229
|
+
req = Net::HTTP::Delete.new(path("/vector_stores/#{vector_id}/files/#{file_id}"), headers)
|
|
230
230
|
res, span, tracer = execute(request: req, operation: "request")
|
|
231
231
|
res = LLM::Response.new(res)
|
|
232
232
|
tracer.on_request_finish(operation: "request", res:, span:)
|
|
@@ -259,7 +259,7 @@ class LLM::OpenAI
|
|
|
259
259
|
|
|
260
260
|
private
|
|
261
261
|
|
|
262
|
-
[:headers, :execute, :set_body_stream].each do |m|
|
|
262
|
+
[:path, :headers, :execute, :set_body_stream].each do |m|
|
|
263
263
|
define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
|
|
264
264
|
end
|
|
265
265
|
end
|
data/lib/llm/providers/openai.rb
CHANGED
|
@@ -32,8 +32,8 @@ module LLM
|
|
|
32
32
|
|
|
33
33
|
##
|
|
34
34
|
# @param key (see LLM::Provider#initialize)
|
|
35
|
-
def initialize(**)
|
|
36
|
-
super(host: HOST, **)
|
|
35
|
+
def initialize(base_path: "/v1", **)
|
|
36
|
+
super(host: HOST, base_path:, **)
|
|
37
37
|
end
|
|
38
38
|
|
|
39
39
|
##
|
|
@@ -52,7 +52,7 @@ module LLM
|
|
|
52
52
|
# @raise (see LLM::Provider#request)
|
|
53
53
|
# @return (see LLM::Provider#embed)
|
|
54
54
|
def embed(input, model: "text-embedding-3-small", **params)
|
|
55
|
-
req = Net::HTTP::Post.new("/
|
|
55
|
+
req = Net::HTTP::Post.new(path("/embeddings"), headers)
|
|
56
56
|
req.body = LLM.json.dump({input:, model:}.merge!(params))
|
|
57
57
|
res, span, tracer = execute(request: req, operation: "embeddings", model:)
|
|
58
58
|
res = ResponseAdapter.adapt(res, type: :embedding)
|
|
@@ -187,7 +187,7 @@ module LLM
|
|
|
187
187
|
private
|
|
188
188
|
|
|
189
189
|
def completions_path
|
|
190
|
-
"/
|
|
190
|
+
path("/chat/completions")
|
|
191
191
|
end
|
|
192
192
|
|
|
193
193
|
def headers
|
data/lib/llm/version.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llm.rb
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 4.
|
|
4
|
+
version: 4.16.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Antar Azri
|
|
@@ -228,6 +228,8 @@ files:
|
|
|
228
228
|
- data/xai.json
|
|
229
229
|
- data/zai.json
|
|
230
230
|
- lib/llm.rb
|
|
231
|
+
- lib/llm/active_record.rb
|
|
232
|
+
- lib/llm/active_record/acts_as_llm.rb
|
|
231
233
|
- lib/llm/agent.rb
|
|
232
234
|
- lib/llm/bot.rb
|
|
233
235
|
- lib/llm/buffer.rb
|