llm.rb 0.11.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LICENSE +0 -0
- data/README.md +51 -14
- data/lib/llm/bot/builder.rb +0 -0
- data/lib/llm/bot/conversable.rb +0 -0
- data/lib/llm/bot/prompt/completion.rb +0 -0
- data/lib/llm/bot/prompt/respond.rb +0 -0
- data/lib/llm/bot.rb +10 -17
- data/lib/llm/buffer.rb +7 -0
- data/lib/llm/error.rb +0 -0
- data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
- data/lib/llm/eventstream/event.rb +0 -0
- data/lib/llm/eventstream/parser.rb +0 -0
- data/lib/llm/eventstream.rb +0 -0
- data/lib/llm/file.rb +4 -3
- data/lib/llm/function.rb +2 -2
- data/lib/llm/json/schema/array.rb +0 -0
- data/lib/llm/json/schema/boolean.rb +0 -0
- data/lib/llm/json/schema/integer.rb +0 -0
- data/lib/llm/json/schema/leaf.rb +0 -0
- data/lib/llm/json/schema/null.rb +0 -0
- data/lib/llm/json/schema/number.rb +0 -0
- data/lib/llm/json/schema/object.rb +0 -0
- data/lib/llm/json/schema/string.rb +0 -0
- data/lib/llm/json/schema/version.rb +0 -0
- data/lib/llm/json/schema.rb +0 -0
- data/lib/llm/message.rb +0 -0
- data/lib/llm/mime.rb +0 -0
- data/lib/llm/multipart.rb +0 -1
- data/lib/llm/object/builder.rb +0 -0
- data/lib/llm/object/kernel.rb +0 -0
- data/lib/llm/object.rb +2 -3
- data/lib/llm/provider.rb +1 -1
- data/lib/llm/providers/anthropic/error_handler.rb +0 -0
- data/lib/llm/providers/anthropic/format/completion_format.rb +0 -0
- data/lib/llm/providers/anthropic/format.rb +0 -0
- data/lib/llm/providers/anthropic/models.rb +2 -2
- data/lib/llm/providers/anthropic/response/completion.rb +0 -0
- data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
- data/lib/llm/providers/anthropic.rb +10 -1
- data/lib/llm/providers/deepseek/format/completion_format.rb +0 -0
- data/lib/llm/providers/deepseek/format.rb +0 -0
- data/lib/llm/providers/deepseek.rb +10 -1
- data/lib/llm/providers/gemini/audio.rb +3 -3
- data/lib/llm/providers/gemini/error_handler.rb +0 -0
- data/lib/llm/providers/gemini/files.rb +8 -20
- data/lib/llm/providers/gemini/format/completion_format.rb +2 -2
- data/lib/llm/providers/gemini/format.rb +0 -0
- data/lib/llm/providers/gemini/images.rb +4 -4
- data/lib/llm/providers/gemini/models.rb +2 -2
- data/lib/llm/providers/gemini/response/completion.rb +0 -0
- data/lib/llm/providers/gemini/response/embedding.rb +1 -1
- data/lib/llm/providers/gemini/response/file.rb +0 -0
- data/lib/llm/providers/gemini/response/image.rb +0 -0
- data/lib/llm/providers/gemini/stream_parser.rb +0 -0
- data/lib/llm/providers/gemini.rb +13 -21
- data/lib/llm/providers/llamacpp.rb +12 -1
- data/lib/llm/providers/ollama/error_handler.rb +0 -0
- data/lib/llm/providers/ollama/format/completion_format.rb +0 -0
- data/lib/llm/providers/ollama/format.rb +0 -0
- data/lib/llm/providers/ollama/models.rb +0 -0
- data/lib/llm/providers/ollama/response/completion.rb +0 -0
- data/lib/llm/providers/ollama/response/embedding.rb +1 -2
- data/lib/llm/providers/ollama/stream_parser.rb +0 -0
- data/lib/llm/providers/ollama.rb +8 -11
- data/lib/llm/providers/openai/audio.rb +4 -4
- data/lib/llm/providers/openai/error_handler.rb +0 -0
- data/lib/llm/providers/openai/files.rb +8 -19
- data/lib/llm/providers/openai/format/completion_format.rb +0 -0
- data/lib/llm/providers/openai/format/moderation_format.rb +0 -0
- data/lib/llm/providers/openai/format/respond_format.rb +0 -0
- data/lib/llm/providers/openai/format.rb +0 -0
- data/lib/llm/providers/openai/images.rb +10 -10
- data/lib/llm/providers/openai/models.rb +2 -2
- data/lib/llm/providers/openai/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/audio.rb +0 -0
- data/lib/llm/providers/openai/response/completion.rb +2 -2
- data/lib/llm/providers/openai/response/embedding.rb +3 -3
- data/lib/llm/providers/openai/response/file.rb +0 -0
- data/lib/llm/providers/openai/response/image.rb +0 -0
- data/lib/llm/providers/openai/response/moderations.rb +0 -0
- data/lib/llm/providers/openai/response/responds.rb +0 -1
- data/lib/llm/providers/openai/responses.rb +6 -25
- data/lib/llm/providers/openai/stream_parser.rb +1 -0
- data/lib/llm/providers/openai/vector_stores.rb +85 -3
- data/lib/llm/providers/openai.rb +10 -1
- data/lib/llm/providers/xai/images.rb +58 -0
- data/lib/llm/providers/xai.rb +72 -0
- data/lib/llm/response.rb +5 -0
- data/lib/llm/utils.rb +0 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +10 -1
- data/llm.gemspec +4 -4
- metadata +12 -10
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2cd935a4ccd3b911e92b5ff54335cfc143247cbb5fe55214fd563551f7349da4
|
4
|
+
data.tar.gz: c76b36f2877c0cec7fdde54471a81ae19a4ec044158077742eba9acd26cd1483
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f654d042a6f44cba15b2dc0049d3933aa442f631293be446486e524773ff01bfc0f13f89ecbf09659a175c3ff9f7c6512ae8bde5716f6935c8d3d05528d3e4e9
|
7
|
+
data.tar.gz: 73945113e01d89188301a1a7db921c8c07ac19a847fa71528bc2e62eb5162ac656aca02a5f17e02bc4918dc973c298f0021afbb14a4cb38e7f81705950c4ed5b
|
data/LICENSE
CHANGED
File without changes
|
data/README.md
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
## About
|
2
2
|
|
3
3
|
llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
|
4
|
-
includes OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and
|
5
|
-
toolkit includes full support for chat, streaming, tool calling,
|
6
|
-
images, files, and JSON Schema generation.
|
4
|
+
includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and
|
5
|
+
LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
|
6
|
+
audio, images, files, and JSON Schema generation.
|
7
7
|
|
8
8
|
## Features
|
9
9
|
|
@@ -24,10 +24,39 @@ images, files, and JSON Schema generation.
|
|
24
24
|
- 📎 File uploads and prompt-aware file interaction
|
25
25
|
- 💡 Multimodal prompts (text, images, PDFs, URLs, files)
|
26
26
|
|
27
|
-
####
|
27
|
+
#### Embeddings
|
28
28
|
- 🧮 Text embeddings and vector support
|
29
|
-
-
|
30
|
-
|
29
|
+
- 🧱 Includes support for OpenAI's vector stores API
|
30
|
+
|
31
|
+
#### Miscellaneous
|
32
|
+
- 📜 Model management and selection
|
33
|
+
- 🔧 Includes support for OpenAI's responses, moderations, and vector stores APIs
|
34
|
+
|
35
|
+
## Matrix
|
36
|
+
|
37
|
+
While the Features section above gives you the high-level picture, the table below
|
38
|
+
breaks things down by provider, so you can see exactly what’s supported where.
|
39
|
+
|
40
|
+
|
41
|
+
| Feature / Provider | OpenAI | Anthropic | Gemini | DeepSeek | xAI (Grok) | Ollama | LlamaCpp |
|
42
|
+
|--------------------------------------|:------:|:---------:|:------:|:--------:|:----------:|:------:|:--------:|
|
43
|
+
| **Chat Completions** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
44
|
+
| **Streaming** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
45
|
+
| **Tool Calling** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
46
|
+
| **JSON Schema / Structured Output** | ✅ | ❌ | ✅ | ❌ | ✅ | ✅* | ✅* |
|
47
|
+
| **Audio (TTS / Transcribe / Translate)** | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
|
48
|
+
| **Image Generation & Editing** | ✅ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ |
|
49
|
+
| **File Uploads** | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
|
50
|
+
| **Multimodal Prompts** *(text+image)* | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
51
|
+
| **Embeddings** | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
|
52
|
+
| **Models API** g| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
53
|
+
| **Local Model Support** | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
|
54
|
+
| **Vector Stores (RAG)** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
|
55
|
+
| **Responses** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
|
56
|
+
| **Moderations** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
|
57
|
+
|
58
|
+
\* JSON Schema support in Ollama/LlamaCpp depends on the model, not the API.
|
59
|
+
|
31
60
|
|
32
61
|
## Examples
|
33
62
|
|
@@ -49,6 +78,7 @@ require "llm"
|
|
49
78
|
llm = LLM.openai(key: "yourapikey")
|
50
79
|
llm = LLM.gemini(key: "yourapikey")
|
51
80
|
llm = LLM.anthropic(key: "yourapikey")
|
81
|
+
llm = LLM.xai(key: "yourapikey")
|
52
82
|
llm = LLM.deepseek(key: "yourapikey")
|
53
83
|
|
54
84
|
##
|
@@ -78,13 +108,13 @@ is made before sending a request to the LLM:
|
|
78
108
|
#!/usr/bin/env ruby
|
79
109
|
require "llm"
|
80
110
|
|
81
|
-
llm = LLM.openai(key: ENV["
|
111
|
+
llm = LLM.openai(key: ENV["KEY"])
|
82
112
|
bot = LLM::Bot.new(llm)
|
83
113
|
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
|
84
114
|
msgs = bot.chat do |prompt|
|
85
115
|
prompt.system "Your task is to answer all user queries"
|
86
116
|
prompt.user ["Tell me about this URL", URI(url)]
|
87
|
-
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "
|
117
|
+
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "rb")]
|
88
118
|
prompt.user "Is the URL and PDF similar to each other?"
|
89
119
|
end
|
90
120
|
|
@@ -110,13 +140,13 @@ to process a response in the same way:
|
|
110
140
|
#!/usr/bin/env ruby
|
111
141
|
require "llm"
|
112
142
|
|
113
|
-
llm = LLM.openai(key: ENV["
|
143
|
+
llm = LLM.openai(key: ENV["KEY"])
|
114
144
|
bot = LLM::Bot.new(llm)
|
115
145
|
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
|
116
146
|
bot.chat(stream: $stdout) do |prompt|
|
117
147
|
prompt.system "Your task is to answer all user queries"
|
118
148
|
prompt.user ["Tell me about this URL", URI(url)]
|
119
|
-
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "
|
149
|
+
prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "rb")]
|
120
150
|
prompt.user "Is the URL and PDF similar to each other?"
|
121
151
|
end.to_a
|
122
152
|
```
|
@@ -137,7 +167,7 @@ require "llm"
|
|
137
167
|
##
|
138
168
|
# Objects
|
139
169
|
llm = LLM.openai(key: ENV["KEY"])
|
140
|
-
schema = llm.schema.object(
|
170
|
+
schema = llm.schema.object(probability: llm.schema.integer.required)
|
141
171
|
bot = LLM::Bot.new(llm, schema:)
|
142
172
|
bot.chat "Does the earth orbit the sun?", role: :user
|
143
173
|
bot.messages.find(&:assistant?).content! # => {probability: 1}
|
@@ -245,7 +275,7 @@ to represent a file stored with the LLM, and so on. These are objects you
|
|
245
275
|
can throw at the prompt and have them be understood automatically.
|
246
276
|
|
247
277
|
A prompt can also have multiple parts, and in that case, an array is given
|
248
|
-
as a prompt. Each element is considered to part of the prompt:
|
278
|
+
as a prompt. Each element is considered to be part of the prompt:
|
249
279
|
|
250
280
|
```ruby
|
251
281
|
#!/usr/bin/env ruby
|
@@ -463,8 +493,15 @@ over or doesn't cover at all. The API reference is available at
|
|
463
493
|
|
464
494
|
### Guides
|
465
495
|
|
466
|
-
|
467
|
-
|
496
|
+
* [An introduction to RAG with llm.rb](https://0x1eef.github.io/posts/an-introduction-to-rag-with-llm.rb/) –
|
497
|
+
a blog post that implements the RAG pattern in 32 lines of Ruby code
|
498
|
+
* [docs/](docs/) – the docs directory contains additional guides
|
499
|
+
|
500
|
+
|
501
|
+
## See also
|
502
|
+
|
503
|
+
* [llm-shell](https://github.com/llmrb/llm-shell) – a shell that uses llm.rb to
|
504
|
+
provide a command-line interface to LLMs.
|
468
505
|
|
469
506
|
## Install
|
470
507
|
|
data/lib/llm/bot/builder.rb
CHANGED
File without changes
|
data/lib/llm/bot/conversable.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/bot.rb
CHANGED
@@ -3,33 +3,26 @@
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
5
|
# {LLM::Bot LLM::Bot} provides an object that can maintain a
|
6
|
-
#
|
6
|
+
# conversation. A conversation can use the chat completions API
|
7
7
|
# that all LLM providers support or the responses API that currently
|
8
8
|
# only OpenAI supports.
|
9
9
|
#
|
10
|
-
# @example
|
10
|
+
# @example
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
-
# llm = LLM.openai(ENV["KEY"])
|
14
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
15
15
|
# bot = LLM::Bot.new(llm)
|
16
|
+
# url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
|
16
17
|
# msgs = bot.chat do |prompt|
|
17
|
-
# prompt.
|
18
|
-
# prompt.user "
|
19
|
-
# prompt.user "
|
18
|
+
# prompt.system "Your task is to answer all user queries"
|
19
|
+
# prompt.user ["Tell me about this URL", URI(url)]
|
20
|
+
# prompt.user ["Tell me about this pdf", File.open("freebsd_book.pdf", "rb")]
|
21
|
+
# prompt.user "Is the URL and PDF similar to each other?"
|
20
22
|
# end
|
21
|
-
# msgs.each { print "[#{_1.role}]", _1.content, "\n" }
|
22
23
|
#
|
23
|
-
#
|
24
|
-
#
|
25
|
-
# require "llm"
|
26
|
-
#
|
27
|
-
# llm = LLM.openai(ENV["KEY"])
|
28
|
-
# bot = LLM::Bot.new(llm)
|
29
|
-
# bot.chat "What programming language should I learn next ?", role: :user
|
30
|
-
# bot.chat "Can you recommend a good book ?", role: :user
|
31
|
-
# bot.chat "Can you suggest a fun project to practice ?", role: :user
|
32
|
-
# bot.messages.each { print "[#{_1.role}]", _1.content, "\n" }
|
24
|
+
# # At this point, we execute a single request
|
25
|
+
# msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
|
33
26
|
class Bot
|
34
27
|
require_relative "bot/prompt/completion"
|
35
28
|
require_relative "bot/prompt/respond"
|
data/lib/llm/buffer.rb
CHANGED
data/lib/llm/error.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/eventstream.rb
CHANGED
File without changes
|
data/lib/llm/file.rb
CHANGED
@@ -1,9 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
##
|
4
|
-
#
|
5
|
-
#
|
6
|
-
# and as an input with certain methods
|
4
|
+
# {LLM::File LLM::File} represents a local file. It can be used
|
5
|
+
# as a prompt with certain providers (eg: Ollama, Gemini),
|
6
|
+
# and as an input with certain methods. It is usually not necessary
|
7
|
+
# to create an instance of LLM::File directly.
|
7
8
|
class LLM::File
|
8
9
|
##
|
9
10
|
# @return [String]
|
data/lib/llm/function.rb
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
##
|
4
|
-
# The {LLM::Function LLM::Function} class represents a
|
5
|
-
#
|
4
|
+
# The {LLM::Function LLM::Function} class represents a local
|
5
|
+
# function that can be called by an LLM.
|
6
6
|
#
|
7
7
|
# @example example #1
|
8
8
|
# LLM.function(:system) do |fn|
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/json/schema/leaf.rb
CHANGED
File without changes
|
data/lib/llm/json/schema/null.rb
CHANGED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
data/lib/llm/json/schema.rb
CHANGED
File without changes
|
data/lib/llm/message.rb
CHANGED
File without changes
|
data/lib/llm/mime.rb
CHANGED
File without changes
|
data/lib/llm/multipart.rb
CHANGED
data/lib/llm/object/builder.rb
CHANGED
File without changes
|
data/lib/llm/object/kernel.rb
CHANGED
File without changes
|
data/lib/llm/object.rb
CHANGED
@@ -1,10 +1,9 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
##
|
4
|
-
# The {LLM::Object LLM::Object} class encapsulates a Hash object
|
5
|
-
# allows a consumer to get and set Hash keys via regular methods. It is
|
4
|
+
# The {LLM::Object LLM::Object} class encapsulates a Hash object. It is
|
6
5
|
# similar in spirit to OpenStruct, and it was introduced after OpenStruct
|
7
|
-
# became a bundled gem
|
6
|
+
# became a bundled gem rather than a default gem in Ruby 3.5.
|
8
7
|
class LLM::Object < BasicObject
|
9
8
|
require_relative "object/builder"
|
10
9
|
require_relative "object/kernel"
|
data/lib/llm/provider.rb
CHANGED
@@ -52,7 +52,7 @@ class LLM::Provider
|
|
52
52
|
##
|
53
53
|
# Provides an interface to the chat completions API
|
54
54
|
# @example
|
55
|
-
# llm = LLM.openai(ENV["KEY"])
|
55
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
56
56
|
# messages = [{role: "system", content: "Your task is to answer all of my questions"}]
|
57
57
|
# res = llm.complete("5 + 2 ?", messages:)
|
58
58
|
# print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
|
File without changes
|
File without changes
|
File without changes
|
@@ -11,7 +11,7 @@ class LLM::Anthropic
|
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
-
# llm = LLM.anthropic(ENV["KEY"])
|
14
|
+
# llm = LLM.anthropic(key: ENV["KEY"])
|
15
15
|
# res = llm.models.all
|
16
16
|
# res.each do |model|
|
17
17
|
# print "id: ", model.id, "\n"
|
@@ -28,7 +28,7 @@ class LLM::Anthropic
|
|
28
28
|
##
|
29
29
|
# List all models
|
30
30
|
# @example
|
31
|
-
# llm = LLM.anthropic(ENV["KEY"])
|
31
|
+
# llm = LLM.anthropic(key: ENV["KEY"])
|
32
32
|
# res = llm.models.all
|
33
33
|
# res.each do |model|
|
34
34
|
# print "id: ", model.id, "\n"
|
File without changes
|
File without changes
|
@@ -3,7 +3,16 @@
|
|
3
3
|
module LLM
|
4
4
|
##
|
5
5
|
# The Anthropic class implements a provider for
|
6
|
-
# [Anthropic](https://www.anthropic.com)
|
6
|
+
# [Anthropic](https://www.anthropic.com).
|
7
|
+
#
|
8
|
+
# @example
|
9
|
+
# #!/usr/bin/env ruby
|
10
|
+
# require "llm"
|
11
|
+
#
|
12
|
+
# llm = LLM.anthropic(key: ENV["KEY"])
|
13
|
+
# bot = LLM::Bot.new(llm)
|
14
|
+
# bot.chat ["Tell me about this photo", File.open("/images/dog.jpg", "rb")]
|
15
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
7
16
|
class Anthropic < Provider
|
8
17
|
require_relative "anthropic/response/completion"
|
9
18
|
require_relative "anthropic/format"
|
File without changes
|
File without changes
|
@@ -6,8 +6,17 @@ module LLM
|
|
6
6
|
##
|
7
7
|
# The DeepSeek class implements a provider for
|
8
8
|
# [DeepSeek](https://deepseek.com)
|
9
|
-
# through its OpenAI-compatible API
|
9
|
+
# through its OpenAI-compatible API available via
|
10
10
|
# their [web platform](https://platform.deepseek.com).
|
11
|
+
#
|
12
|
+
# @example
|
13
|
+
# #!/usr/bin/env ruby
|
14
|
+
# require "llm"
|
15
|
+
#
|
16
|
+
# llm = LLM.deepseek(key: ENV["KEY"])
|
17
|
+
# bot = LLM::Bot.new(llm)
|
18
|
+
# bot.chat ["Tell me about this photo", File.open("/images/cat.jpg", "rb")]
|
19
|
+
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
11
20
|
class DeepSeek < OpenAI
|
12
21
|
require_relative "deepseek/format"
|
13
22
|
include DeepSeek::Format
|
@@ -8,7 +8,7 @@ class LLM::Gemini
|
|
8
8
|
# #!/usr/bin/env ruby
|
9
9
|
# require "llm"
|
10
10
|
#
|
11
|
-
# llm = LLM.gemini(ENV["KEY"])
|
11
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
12
12
|
# res = llm.audio.create_transcription(input: "/audio/rocket.mp3")
|
13
13
|
# res.text # => "A dog on a rocket to the moon"
|
14
14
|
class Audio
|
@@ -30,7 +30,7 @@ class LLM::Gemini
|
|
30
30
|
##
|
31
31
|
# Create an audio transcription
|
32
32
|
# @example
|
33
|
-
# llm = LLM.gemini(ENV["KEY"])
|
33
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
34
34
|
# res = llm.audio.create_transcription(file: "/audio/rocket.mp3")
|
35
35
|
# res.text # => "A dog on a rocket to the moon"
|
36
36
|
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
@@ -52,7 +52,7 @@ class LLM::Gemini
|
|
52
52
|
# Create an audio translation (in English)
|
53
53
|
# @example
|
54
54
|
# # Arabic => English
|
55
|
-
# llm = LLM.gemini(ENV["KEY"])
|
55
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
56
56
|
# res = llm.audio.create_translation(file: "/audio/bismillah.mp3")
|
57
57
|
# res.text # => "In the name of Allah, the Beneficent, the Merciful."
|
58
58
|
# @see https://ai.google.dev/gemini-api/docs/audio Gemini docs
|
File without changes
|
@@ -17,22 +17,10 @@ class LLM::Gemini
|
|
17
17
|
# #!/usr/bin/env ruby
|
18
18
|
# require "llm"
|
19
19
|
#
|
20
|
-
# llm = LLM.gemini(ENV["KEY"])
|
20
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
21
21
|
# bot = LLM::Bot.new(llm)
|
22
|
-
# file = llm.files.create
|
23
|
-
# bot.chat
|
24
|
-
# bot.chat("Describe the audio file I sent to you")
|
25
|
-
# bot.chat("The audio file is the first message I sent to you.")
|
26
|
-
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
27
|
-
#
|
28
|
-
# @example example #2
|
29
|
-
# #!/usr/bin/env ruby
|
30
|
-
# require "llm"
|
31
|
-
#
|
32
|
-
# llm = LLM.gemini(ENV["KEY"])
|
33
|
-
# bot = LLM::Bot.new(llm)
|
34
|
-
# file = llm.files.create file: "/audio/haiku.mp3"
|
35
|
-
# bot.chat(["Describe the audio file I sent to you", file])
|
22
|
+
# file = llm.files.create(file: "/audio/haiku.mp3")
|
23
|
+
# bot.chat ["Tell me about this file", file]
|
36
24
|
# bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
|
37
25
|
class Files
|
38
26
|
require_relative "response/file"
|
@@ -48,7 +36,7 @@ class LLM::Gemini
|
|
48
36
|
##
|
49
37
|
# List all files
|
50
38
|
# @example
|
51
|
-
# llm = LLM.gemini(ENV["KEY"])
|
39
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
52
40
|
# res = llm.files.all
|
53
41
|
# res.each do |file|
|
54
42
|
# print "name: ", file.name, "\n"
|
@@ -67,8 +55,8 @@ class LLM::Gemini
|
|
67
55
|
##
|
68
56
|
# Create a file
|
69
57
|
# @example
|
70
|
-
# llm = LLM.gemini(ENV["KEY"])
|
71
|
-
# res = llm.files.create
|
58
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
59
|
+
# res = llm.files.create(file: "/audio/haiku.mp3")
|
72
60
|
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
73
61
|
# @param [String, LLM::File] file The file
|
74
62
|
# @param [Hash] params Other parameters (see Gemini docs)
|
@@ -90,7 +78,7 @@ class LLM::Gemini
|
|
90
78
|
##
|
91
79
|
# Get a file
|
92
80
|
# @example
|
93
|
-
# llm = LLM.gemini(ENV["KEY"])
|
81
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
94
82
|
# res = llm.files.get(file: "files/1234567890")
|
95
83
|
# print "name: ", res.name, "\n"
|
96
84
|
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
@@ -109,7 +97,7 @@ class LLM::Gemini
|
|
109
97
|
##
|
110
98
|
# Delete a file
|
111
99
|
# @example
|
112
|
-
# llm = LLM.gemini(ENV["KEY"])
|
100
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
113
101
|
# res = llm.files.delete(file: "files/1234567890")
|
114
102
|
# @see https://ai.google.dev/gemini-api/docs/files Gemini docs
|
115
103
|
# @param [#name, String] file The file to delete
|
@@ -59,8 +59,8 @@ module LLM::Gemini::Format
|
|
59
59
|
end
|
60
60
|
|
61
61
|
def prompt_error!(object)
|
62
|
-
|
63
|
-
|
62
|
+
raise LLM::PromptError, "The given object (an instance of #{object.class}) " \
|
63
|
+
"is not supported by the Gemini API"
|
64
64
|
end
|
65
65
|
|
66
66
|
def message = @message
|
File without changes
|
@@ -11,7 +11,7 @@ class LLM::Gemini
|
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
-
# llm = LLM.gemini(ENV["KEY"])
|
14
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
15
15
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
16
16
|
# IO.copy_stream res.images[0], "rocket.png"
|
17
17
|
class Images
|
@@ -29,7 +29,7 @@ class LLM::Gemini
|
|
29
29
|
##
|
30
30
|
# Create an image
|
31
31
|
# @example
|
32
|
-
# llm = LLM.gemini(ENV["KEY"])
|
32
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
33
33
|
# res = llm.images.create prompt: "A dog on a rocket to the moon"
|
34
34
|
# IO.copy_stream res.images[0], "rocket.png"
|
35
35
|
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
@@ -55,8 +55,8 @@ class LLM::Gemini
|
|
55
55
|
##
|
56
56
|
# Edit an image
|
57
57
|
# @example
|
58
|
-
# llm = LLM.gemini(ENV["KEY"])
|
59
|
-
# res = llm.images.edit image:
|
58
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
59
|
+
# res = llm.images.edit image: "cat.png", prompt: "Add a hat to the cat"
|
60
60
|
# IO.copy_stream res.images[0], "hatoncat.png"
|
61
61
|
# @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
|
62
62
|
# @param [String, LLM::File] image The image to edit
|
@@ -11,7 +11,7 @@ class LLM::Gemini
|
|
11
11
|
# #!/usr/bin/env ruby
|
12
12
|
# require "llm"
|
13
13
|
#
|
14
|
-
# llm = LLM.gemini(ENV["KEY"])
|
14
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
15
15
|
# res = llm.models.all
|
16
16
|
# res.each do |model|
|
17
17
|
# print "id: ", model.id, "\n"
|
@@ -30,7 +30,7 @@ class LLM::Gemini
|
|
30
30
|
##
|
31
31
|
# List all models
|
32
32
|
# @example
|
33
|
-
# llm = LLM.gemini(ENV["KEY"])
|
33
|
+
# llm = LLM.gemini(key: ENV["KEY"])
|
34
34
|
# res = llm.models.all
|
35
35
|
# res.each do |model|
|
36
36
|
# print "id: ", model.id, "\n"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|