llm.rb 0.11.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +0 -0
- data/README.md +51 -14
- data/lib/llm/bot/builder.rb +0 -0
- data/lib/llm/bot/conversable.rb +0 -0
- data/lib/llm/bot/prompt/completion.rb +0 -0
- data/lib/llm/bot/prompt/respond.rb +0 -0
- data/lib/llm/bot.rb +10 -17
- data/lib/llm/buffer.rb +7 -0
- data/lib/llm/error.rb +0 -0
- data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
- data/lib/llm/eventstream/event.rb +0 -0
- data/lib/llm/eventstream/parser.rb +0 -0
- data/lib/llm/eventstream.rb +0 -0
- data/lib/llm/file.rb +4 -3
- data/lib/llm/function.rb +2 -2
- data/lib/llm/json/schema/array.rb +0 -0
- data/lib/llm/json/schema/boolean.rb +0 -0
- data/lib/llm/json/schema/integer.rb +0 -0
- data/lib/llm/json/schema/leaf.rb +0 -0
- data/lib/llm/json/schema/null.rb +0 -0
- data/lib/llm/json/schema/number.rb +0 -0
- data/lib/llm/json/schema/object.rb +0 -0
- data/lib/llm/json/schema/string.rb +0 -0
- data/lib/llm/json/schema/version.rb +0 -0
- data/lib/llm/json/schema.rb +0 -0
- data/lib/llm/message.rb +0 -0
- data/lib/llm/mime.rb +0 -0
- data/lib/llm/multipart.rb +0 -1
- data/lib/llm/object/builder.rb +0 -0
- data/lib/llm/object/kernel.rb +0 -0
- data/lib/llm/object.rb +2 -3
- data/lib/llm/provider.rb +1 -1
- data/lib/llm/providers/anthropic/error_handler.rb +0 -0
- data/lib/llm/providers/anthropic/format/completion_format.rb +0 -0
- data/lib/llm/providers/anthropic/format.rb +0 -0
- data/lib/llm/providers/anthropic/models.rb +2 -2
- data/lib/llm/providers/anthropic/response/completion.rb +0 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
- data/lib/llm/providers/anthropic.rb +10 -1
- data/lib/llm/providers/deepseek/format/completion_format.rb +0 -0
- data/lib/llm/providers/deepseek/format.rb +0 -0
- data/lib/llm/providers/deepseek.rb +10 -1
- data/lib/llm/providers/gemini/audio.rb +3 -3
- data/lib/llm/providers/gemini/error_handler.rb +0 -0
- data/lib/llm/providers/gemini/files.rb +8 -20
- data/lib/llm/providers/gemini/format/completion_format.rb +2 -2
- data/lib/llm/providers/gemini/format.rb +0 -0
- data/lib/llm/providers/gemini/images.rb +4 -4
- data/lib/llm/providers/gemini/models.rb +2 -2
- data/lib/llm/providers/gemini/response/completion.rb +0 -0
- data/lib/llm/providers/gemini/response/embedding.rb +1 -1
- data/lib/llm/providers/gemini/response/file.rb +0 -0
- data/lib/llm/providers/gemini/response/image.rb +0 -0
- data/lib/llm/providers/gemini/stream_parser.rb +0 -0
- data/lib/llm/providers/gemini.rb +13 -21
- data/lib/llm/providers/llamacpp.rb +12 -1
- data/lib/llm/providers/ollama/error_handler.rb +0 -0
- data/lib/llm/providers/ollama/format/completion_format.rb +0 -0
- data/lib/llm/providers/ollama/format.rb +0 -0
- data/lib/llm/providers/ollama/models.rb +0 -0
- data/lib/llm/providers/ollama/response/completion.rb +0 -0
- data/lib/llm/providers/ollama/response/embedding.rb +1 -2
- data/lib/llm/providers/ollama/stream_parser.rb +0 -0
- data/lib/llm/providers/ollama.rb +8 -11
- data/lib/llm/providers/openai/audio.rb +4 -4
- data/lib/llm/providers/openai/error_handler.rb +0 -0
- data/lib/llm/providers/openai/files.rb +8 -19
- data/lib/llm/providers/openai/format/completion_format.rb +0 -0
- data/lib/llm/providers/openai/format/moderation_format.rb +0 -0
- data/lib/llm/providers/openai/format/respond_format.rb +0 -0
- data/lib/llm/providers/openai/format.rb +0 -0
- data/lib/llm/providers/openai/images.rb +10 -10
- data/lib/llm/providers/openai/models.rb +2 -2
- data/lib/llm/providers/openai/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/audio.rb +0 -0
- data/lib/llm/providers/openai/response/completion.rb +2 -2
- data/lib/llm/providers/openai/response/embedding.rb +3 -3
- data/lib/llm/providers/openai/response/file.rb +0 -0
- data/lib/llm/providers/openai/response/image.rb +0 -0
- data/lib/llm/providers/openai/response/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/responds.rb +0 -1
- data/lib/llm/providers/openai/responses.rb +6 -25
- data/lib/llm/providers/openai/stream_parser.rb +1 -0
- data/lib/llm/providers/openai/vector_stores.rb +85 -3
- data/lib/llm/providers/openai.rb +10 -1
- data/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/response.rb +5 -0
- data/lib/llm/utils.rb +0 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +10 -1
- data/llm.gemspec +4 -4
- metadata +12 -10
@@ -0,0 +1,72 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "openai" unless defined?(LLM::OpenAI)
|
4
|
+
|
5
|
+
module LLM
|
6
|
+
##
|
7
|
+
# The XAI class implements a provider for [xAI](https://docs.x.ai).
|
8
|
+
#
|
9
|
+
# @example
|
10
|
+
# #!/usr/bin/env ruby
|
11
|
+
# require "llm"
|
12
|
+
#
|
13
|
+
# llm = LLM.xai(key: ENV["KEY"])
|
14
|
+
# bot = LLM::Bot.new(llm)
|
15
|
+
# bot.chat ["Tell me about this photo", File.open("/images/crow.jpg", "rb")]
|
16
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
17
|
+
class XAI < OpenAI
|
18
|
+
require_relative "xai/images"
|
19
|
+
|
20
|
+
##
|
21
|
+
# @param [String] host A regional host or the default ("api.x.ai")
|
22
|
+
# @param key (see LLM::Provider#initialize)
|
23
|
+
# @see https://docs.x.ai/docs/key-information/regions Regional endpoints
|
24
|
+
def initialize(host: "api.x.ai", **)
|
25
|
+
super
|
26
|
+
end
|
27
|
+
|
28
|
+
##
|
29
|
+
# @raise [NotImplementedError]
|
30
|
+
def files
|
31
|
+
raise NotImplementedError
|
32
|
+
end
|
33
|
+
|
34
|
+
##
|
35
|
+
# @return [LLM::XAI::Images]
|
36
|
+
def images
|
37
|
+
LLM::XAI::Images.new(self)
|
38
|
+
end
|
39
|
+
|
40
|
+
##
|
41
|
+
# @raise [NotImplementedError]
|
42
|
+
def audio
|
43
|
+
raise NotImplementedError
|
44
|
+
end
|
45
|
+
|
46
|
+
##
|
47
|
+
# @raise [NotImplementedError]
|
48
|
+
def moderations
|
49
|
+
raise NotImplementedError
|
50
|
+
end
|
51
|
+
|
52
|
+
##
|
53
|
+
# @raise [NotImplementedError]
|
54
|
+
def responses
|
55
|
+
raise NotImplementedError
|
56
|
+
end
|
57
|
+
|
58
|
+
##
|
59
|
+
# @raise [NotImplementedError]
|
60
|
+
def vector_stores
|
61
|
+
raise NotImplementedError
|
62
|
+
end
|
63
|
+
|
64
|
+
##
|
65
|
+
# Returns the default model for chat completions
|
66
|
+
# #see https://docs.x.ai/docs/models grok-4-0709
|
67
|
+
# @return [String]
|
68
|
+
def default_model
|
69
|
+
"grok-4-0709"
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
data/lib/llm/response.rb
CHANGED
@@ -1,6 +1,11 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module LLM
|
4
|
+
##
|
5
|
+
# {LLM::Response LLM::Response} encapsulates a response
|
6
|
+
# from an LLM provider. It is returned by all methods
|
7
|
+
# that make requests to a provider, and sometimes extended
|
8
|
+
# with provider-specific functionality.
|
4
9
|
class Response
|
5
10
|
require "json"
|
6
11
|
|
data/lib/llm/utils.rb
CHANGED
File without changes
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
@@ -17,7 +17,7 @@ module LLM
|
|
17
17
|
require_relative "llm/buffer"
|
18
18
|
require_relative "llm/function"
|
19
19
|
require_relative "llm/eventstream"
|
20
|
-
require_relative "llm/
|
20
|
+
require_relative "llm/eventhandler"
|
21
21
|
|
22
22
|
module_function
|
23
23
|
|
@@ -69,6 +69,15 @@ module LLM
|
|
69
69
|
LLM::OpenAI.new(**)
|
70
70
|
end
|
71
71
|
|
72
|
+
##
|
73
|
+
# @param key (see LLM::XAI#initialize)
|
74
|
+
# @param host (see LLM::XAI#initialize)
|
75
|
+
# @return (see LLM::XAI#initialize)
|
76
|
+
def xai(**)
|
77
|
+
require_relative "llm/providers/xai" unless defined?(LLM::XAI)
|
78
|
+
LLM::XAI.new(**)
|
79
|
+
end
|
80
|
+
|
72
81
|
##
|
73
82
|
# Define a function
|
74
83
|
# @example
|
data/llm.gemspec
CHANGED
@@ -10,9 +10,9 @@ Gem::Specification.new do |spec|
|
|
10
10
|
|
11
11
|
spec.summary = <<~SUMMARY
|
12
12
|
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
13
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and
|
14
|
-
toolkit includes full support for chat, streaming, tool calling,
|
15
|
-
images, files, and JSON Schema generation.
|
13
|
+
includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and
|
14
|
+
LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
|
15
|
+
audio, images, files, and JSON Schema generation.
|
16
16
|
SUMMARY
|
17
17
|
|
18
18
|
spec.description = spec.summary
|
@@ -37,7 +37,7 @@ Gem::Specification.new do |spec|
|
|
37
37
|
spec.add_development_dependency "test-cmd.rb", "~> 0.12.0"
|
38
38
|
spec.add_development_dependency "rake", "~> 13.0"
|
39
39
|
spec.add_development_dependency "rspec", "~> 3.0"
|
40
|
-
spec.add_development_dependency "standard", "~> 1.
|
40
|
+
spec.add_development_dependency "standard", "~> 1.50"
|
41
41
|
spec.add_development_dependency "vcr", "~> 6.0"
|
42
42
|
spec.add_development_dependency "dotenv", "~> 2.8"
|
43
43
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.12.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -114,14 +114,14 @@ dependencies:
|
|
114
114
|
requirements:
|
115
115
|
- - "~>"
|
116
116
|
- !ruby/object:Gem::Version
|
117
|
-
version: '1.
|
117
|
+
version: '1.50'
|
118
118
|
type: :development
|
119
119
|
prerelease: false
|
120
120
|
version_requirements: !ruby/object:Gem::Requirement
|
121
121
|
requirements:
|
122
122
|
- - "~>"
|
123
123
|
- !ruby/object:Gem::Version
|
124
|
-
version: '1.
|
124
|
+
version: '1.50'
|
125
125
|
- !ruby/object:Gem::Dependency
|
126
126
|
name: vcr
|
127
127
|
requirement: !ruby/object:Gem::Requirement
|
@@ -151,9 +151,9 @@ dependencies:
|
|
151
151
|
- !ruby/object:Gem::Version
|
152
152
|
version: '2.8'
|
153
153
|
description: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
154
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp.
|
155
|
-
includes full support for chat, streaming, tool calling, audio, images,
|
156
|
-
JSON Schema generation.
|
154
|
+
includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and LlamaCpp.
|
155
|
+
The toolkit includes full support for chat, streaming, tool calling, audio, images,
|
156
|
+
files, and JSON Schema generation.
|
157
157
|
email:
|
158
158
|
- azantar@proton.me
|
159
159
|
- 0x1eef@proton.me
|
@@ -171,7 +171,7 @@ files:
|
|
171
171
|
- lib/llm/bot/prompt/respond.rb
|
172
172
|
- lib/llm/buffer.rb
|
173
173
|
- lib/llm/error.rb
|
174
|
-
- lib/llm/
|
174
|
+
- lib/llm/eventhandler.rb
|
175
175
|
- lib/llm/eventstream.rb
|
176
176
|
- lib/llm/eventstream/event.rb
|
177
177
|
- lib/llm/eventstream/parser.rb
|
@@ -247,6 +247,8 @@ files:
|
|
247
247
|
- lib/llm/providers/openai/responses.rb
|
248
248
|
- lib/llm/providers/openai/stream_parser.rb
|
249
249
|
- lib/llm/providers/openai/vector_stores.rb
|
250
|
+
- lib/llm/providers/xai.rb
|
251
|
+
- lib/llm/providers/xai/images.rb
|
250
252
|
- lib/llm/response.rb
|
251
253
|
- lib/llm/utils.rb
|
252
254
|
- lib/llm/version.rb
|
@@ -274,7 +276,7 @@ requirements: []
|
|
274
276
|
rubygems_version: 3.6.9
|
275
277
|
specification_version: 4
|
276
278
|
summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
|
277
|
-
OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The toolkit
|
278
|
-
full support for chat, streaming, tool calling, audio, images, files, and
|
279
|
-
generation.
|
279
|
+
OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and LlamaCpp. The toolkit
|
280
|
+
includes full support for chat, streaming, tool calling, audio, images, files, and
|
281
|
+
JSON Schema generation.
|
280
282
|
test_files: []
|