llm.rb 0.12.0 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +27 -19
  3. data/lib/llm/bot/conversable.rb +12 -4
  4. data/lib/llm/bot/prompt/completion.rb +18 -0
  5. data/lib/llm/bot/prompt/respond.rb +9 -0
  6. data/lib/llm/bot.rb +3 -3
  7. data/lib/llm/buffer.rb +31 -7
  8. data/lib/llm/error.rb +4 -0
  9. data/lib/llm/file.rb +1 -1
  10. data/lib/llm/function.rb +2 -2
  11. data/lib/llm/mime.rb +92 -6
  12. data/lib/llm/provider.rb +4 -3
  13. data/lib/llm/providers/anthropic/error_handler.rb +2 -0
  14. data/lib/llm/providers/anthropic/files.rb +155 -0
  15. data/lib/llm/providers/anthropic/format/completion_format.rb +12 -2
  16. data/lib/llm/providers/anthropic/models.rb +2 -1
  17. data/lib/llm/providers/anthropic/response/enumerable.rb +11 -0
  18. data/lib/llm/providers/anthropic/response/file.rb +23 -0
  19. data/lib/llm/providers/anthropic.rb +11 -1
  20. data/lib/llm/providers/gemini/error_handler.rb +2 -0
  21. data/lib/llm/providers/gemini/files.rb +2 -1
  22. data/lib/llm/providers/gemini/models.rb +2 -1
  23. data/lib/llm/providers/gemini/response/completion.rb +2 -0
  24. data/lib/llm/providers/gemini/response/files.rb +15 -0
  25. data/lib/llm/providers/gemini/response/models.rb +15 -0
  26. data/lib/llm/providers/ollama/error_handler.rb +2 -0
  27. data/lib/llm/providers/openai/error_handler.rb +13 -1
  28. data/lib/llm/providers/openai/files.rb +2 -1
  29. data/lib/llm/providers/openai/models.rb +3 -1
  30. data/lib/llm/providers/openai/response/enumerable.rb +11 -0
  31. data/lib/llm/providers/openai/vector_stores.rb +5 -3
  32. data/lib/llm/providers/xai/images.rb +1 -1
  33. data/lib/llm/{json/schema → schema}/array.rb +3 -3
  34. data/lib/llm/{json/schema → schema}/boolean.rb +3 -3
  35. data/lib/llm/{json/schema → schema}/integer.rb +6 -6
  36. data/lib/llm/{json/schema → schema}/leaf.rb +9 -9
  37. data/lib/llm/{json/schema → schema}/null.rb +3 -3
  38. data/lib/llm/{json/schema → schema}/number.rb +6 -6
  39. data/lib/llm/{json/schema → schema}/object.rb +3 -3
  40. data/lib/llm/{json/schema → schema}/string.rb +5 -5
  41. data/lib/llm/{json/schema → schema}/version.rb +1 -1
  42. data/lib/llm/{json/schema.rb → schema.rb} +10 -13
  43. data/lib/llm/version.rb +1 -1
  44. data/lib/llm.rb +1 -1
  45. data/llm.gemspec +1 -1
  46. metadata +19 -13
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2cd935a4ccd3b911e92b5ff54335cfc143247cbb5fe55214fd563551f7349da4
4
- data.tar.gz: c76b36f2877c0cec7fdde54471a81ae19a4ec044158077742eba9acd26cd1483
3
+ metadata.gz: 587a0b7425102e44f79107bd5f0471ba2cb8f88d95fe28af5e1d334093a539e3
4
+ data.tar.gz: 73242cb4daa8890a1f16d578c0e6ff50d54651162d11132ce06d9fb571fec062
5
5
  SHA512:
6
- metadata.gz: f654d042a6f44cba15b2dc0049d3933aa442f631293be446486e524773ff01bfc0f13f89ecbf09659a175c3ff9f7c6512ae8bde5716f6935c8d3d05528d3e4e9
7
- data.tar.gz: 73945113e01d89188301a1a7db921c8c07ac19a847fa71528bc2e62eb5162ac656aca02a5f17e02bc4918dc973c298f0021afbb14a4cb38e7f81705950c4ed5b
6
+ metadata.gz: 1f62948774f2cc389ec8b1d53886785667c95d4bd98d3f648307c07342b004c080feb40ae8baa9bda0cb6cf0d3cbeec33f084e9cd5a0a22622a07519a772c0b4
7
+ data.tar.gz: 92ce35a290004371ed6ac8ef11d4a41344d85170f398d69ebf2d5251a1a73fc9db79e3065ab5e8d32945070854362f415276a00ac56a4d4e916cd5752f12b5d4
data/README.md CHANGED
@@ -3,14 +3,18 @@
3
3
  llm.rb is a zero-dependency Ruby toolkit for Large Language Models that
4
4
  includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and
5
5
  LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
6
- audio, images, files, and JSON Schema generation.
6
+ audio, images, files, and structured outputs (JSON Schema).
7
+
8
+ The library provides a common, uniform interface for all the providers and
9
+ features it supports, in addition to provider-specific features as well. Keep
10
+ reading to find out more.
7
11
 
8
12
  ## Features
9
13
 
10
14
  #### General
11
15
  - ✅ A single unified interface for multiple providers
12
16
  - 📦 Zero dependencies outside Ruby's standard library
13
- - 🚀 Efficient API design that minimizes the number of requests made
17
+ - 🚀 Smart API design that minimizes the number of requests made
14
18
 
15
19
  #### Chat, Agents
16
20
  - 🧠 Stateless and stateful chat via completions and responses API
@@ -22,7 +26,7 @@ audio, images, files, and JSON Schema generation.
22
26
  - 🗣️ Text-to-speech, transcription, and translation
23
27
  - 🖼️ Image generation, editing, and variation support
24
28
  - 📎 File uploads and prompt-aware file interaction
25
- - 💡 Multimodal prompts (text, images, PDFs, URLs, files)
29
+ - 💡 Multimodal prompts (text, documents, audio, images, videos, URLs, etc)
26
30
 
27
31
  #### Embeddings
28
32
  - 🧮 Text embeddings and vector support
@@ -44,12 +48,12 @@ breaks things down by provider, so you can see exactly what’s supported where.
44
48
  | **Streaming** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
45
49
  | **Tool Calling** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
46
50
  | **JSON Schema / Structured Output** | ✅ | ❌ | ✅ | ❌ | ✅ | ✅* | ✅* |
51
+ | **Embeddings** | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
52
+ | **Multimodal Prompts** *(text, documents, audio, images, videos, URLs, etc)* | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
53
+ | **Files API** | ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ❌ |
54
+ | **Models API** | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
47
55
  | **Audio (TTS / Transcribe / Translate)** | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
48
56
  | **Image Generation & Editing** | ✅ | ❌ | ✅ | ❌ | ✅ | ❌ | ❌ |
49
- | **File Uploads** | ✅ | ❌ | ✅ | ❌ | ❌ | ❌ | ❌ |
50
- | **Multimodal Prompts** *(text+image)* | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
51
- | **Embeddings** | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
52
- | **Models API** g| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
53
57
  | **Local Model Support** | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ | ✅ |
54
58
  | **Vector Stores (RAG)** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
55
59
  | **Responses** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ | ❌ |
@@ -110,12 +114,12 @@ require "llm"
110
114
 
111
115
  llm = LLM.openai(key: ENV["KEY"])
112
116
  bot = LLM::Bot.new(llm)
113
- url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
117
+ url = "https://en.wikipedia.org/wiki/Special:FilePath/Cognac_glass.jpg"
114
118
  msgs = bot.chat do |prompt|
115
119
  prompt.system "Your task is to answer all user queries"
116
120
  prompt.user ["Tell me about this URL", URI(url)]
117
- prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "rb")]
118
- prompt.user "Is the URL and PDF similar to each other?"
121
+ prompt.user ["Tell me about this PDF", File.open("handbook.pdf", "rb")]
122
+ prompt.user "Are the URL and PDF similar to each other?"
119
123
  end
120
124
 
121
125
  # At this point, we execute a single request
@@ -142,12 +146,12 @@ require "llm"
142
146
 
143
147
  llm = LLM.openai(key: ENV["KEY"])
144
148
  bot = LLM::Bot.new(llm)
145
- url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
149
+ url = "https://en.wikipedia.org/wiki/Special:FilePath/Cognac_glass.jpg"
146
150
  bot.chat(stream: $stdout) do |prompt|
147
151
  prompt.system "Your task is to answer all user queries"
148
152
  prompt.user ["Tell me about this URL", URI(url)]
149
- prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "rb")]
150
- prompt.user "Is the URL and PDF similar to each other?"
153
+ prompt.user ["Tell me about this PDF", File.open("handbook.pdf", "rb")]
154
+ prompt.user "Are the URL and PDF similar to each other?"
151
155
  end.to_a
152
156
  ```
153
157
 
@@ -167,10 +171,10 @@ require "llm"
167
171
  ##
168
172
  # Objects
169
173
  llm = LLM.openai(key: ENV["KEY"])
170
- schema = llm.schema.object(probability: llm.schema.integer.required)
174
+ schema = llm.schema.object(probability: llm.schema.number.required)
171
175
  bot = LLM::Bot.new(llm, schema:)
172
176
  bot.chat "Does the earth orbit the sun?", role: :user
173
- bot.messages.find(&:assistant?).content! # => {probability: 1}
177
+ bot.messages.find(&:assistant?).content! # => {probability: 1.0}
174
178
 
175
179
  ##
176
180
  # Enums
@@ -259,7 +263,7 @@ require "llm"
259
263
  llm = LLM.openai(key: ENV["KEY"])
260
264
  bot = LLM::Bot.new(llm)
261
265
  file = llm.files.create(file: "/books/goodread.pdf")
262
- bot.chat(["Tell me about this file", file])
266
+ bot.chat ["Tell me about this file", file]
263
267
  bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
264
268
  ```
265
269
 
@@ -477,7 +481,7 @@ end
477
481
  ##
478
482
  # Select a model
479
483
  model = llm.models.all.find { |m| m.id == "gpt-3.5-turbo" }
480
- bot = LLM::Bot.new(llm, model:)
484
+ bot = LLM::Bot.new(llm, model: model.id)
481
485
  bot.chat "Hello #{model.id} :)"
482
486
  bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
483
487
  ```
@@ -493,8 +497,12 @@ over or doesn't cover at all. The API reference is available at
493
497
 
494
498
  ### Guides
495
499
 
496
- * [An introduction to RAG with llm.rb](https://0x1eef.github.io/posts/an-introduction-to-rag-with-llm.rb/) –
497
- a blog post that implements the RAG pattern in 32 lines of Ruby code
500
+ * [An introduction to RAG](https://0x1eef.github.io/posts/an-introduction-to-rag-with-llm.rb/) –
501
+ a blog post that implements the RAG pattern
502
+ * [How to estimate the age of a person in a photo](https://0x1eef.github.io/posts/age-estimation-with-llm.rb/) –
503
+ a blog post that implements an age estimation tool
504
+ * [How to edit an image with Gemini](https://0x1eef.github.io/posts/how-to-edit-images-with-gemini/) –
505
+ a blog post that implements image editing with Gemini
498
506
  * [docs/](docs/) – the docs directory contains additional guides
499
507
 
500
508
 
@@ -12,8 +12,12 @@ class LLM::Bot
12
12
  # @param [Hash] params
13
13
  # @return [void]
14
14
  def async_response(prompt, params = {})
15
- role = params.delete(:role)
16
- @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
15
+ if Array === prompt and prompt.empty?
16
+ @messages
17
+ else
18
+ role = params.delete(:role)
19
+ @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
20
+ end
17
21
  end
18
22
 
19
23
  ##
@@ -22,8 +26,12 @@ class LLM::Bot
22
26
  # @param [Hash] params
23
27
  # @return [void]
24
28
  def async_completion(prompt, params = {})
25
- role = params.delete(:role)
26
- @messages.push [LLM::Message.new(role, prompt), @params.merge(params), :complete]
29
+ if Array === prompt and prompt.empty?
30
+ @messages
31
+ else
32
+ role = params.delete(:role)
33
+ @messages << [LLM::Message.new(role, prompt), @params.merge(params), :complete]
34
+ end
27
35
  end
28
36
  end
29
37
  end
@@ -27,5 +27,23 @@ module LLM::Bot::Prompt
27
27
  params = defaults.merge(params)
28
28
  bot.chat prompt, params.merge(role: :user)
29
29
  end
30
+
31
+ ##
32
+ # @param [String] prompt
33
+ # @param [Hash] params (see LLM::Provider#complete)
34
+ # @return [LLM::Bot]
35
+ def assistant(prompt, params = {})
36
+ params = defaults.merge(params)
37
+ bot.chat prompt, params.merge(role: :assistant)
38
+ end
39
+
40
+ ##
41
+ # @param [String] prompt
42
+ # @param [Hash] params (see LLM::Provider#complete)
43
+ # @return [LLM::Bot]
44
+ def model(prompt, params = {})
45
+ params = defaults.merge(params)
46
+ bot.chat prompt, params.merge(role: :model)
47
+ end
30
48
  end
31
49
  end
@@ -36,5 +36,14 @@ module LLM::Bot::Prompt
36
36
  params = defaults.merge(params)
37
37
  bot.respond prompt, params.merge(role: :user)
38
38
  end
39
+
40
+ ##
41
+ # @param [String] prompt
42
+ # @param [Hash] params (see LLM::Provider#complete)
43
+ # @return [LLM::Bot]
44
+ def assistant(prompt, params = {})
45
+ params = defaults.merge(params)
46
+ bot.chat prompt, params.merge(role: :assistant)
47
+ end
39
48
  end
40
49
  end
data/lib/llm/bot.rb CHANGED
@@ -13,12 +13,12 @@ module LLM
13
13
  #
14
14
  # llm = LLM.openai(key: ENV["KEY"])
15
15
  # bot = LLM::Bot.new(llm)
16
- # url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
16
+ # url = "https://en.wikipedia.org/wiki/Special:FilePath/Cognac_glass.jpg"
17
17
  # msgs = bot.chat do |prompt|
18
18
  # prompt.system "Your task is to answer all user queries"
19
19
  # prompt.user ["Tell me about this URL", URI(url)]
20
- # prompt.user ["Tell me about this pdf", File.open("freebsd_book.pdf", "rb")]
21
- # prompt.user "Is the URL and PDF similar to each other?"
20
+ # prompt.user ["Tell me about this PDF", File.open("handbook.pdf", "rb")]
21
+ # prompt.user "Are the URL and PDF similar to each other?"
22
22
  # end
23
23
  #
24
24
  # # At this point, we execute a single request
data/lib/llm/buffer.rb CHANGED
@@ -55,7 +55,7 @@ module LLM
55
55
  end
56
56
 
57
57
  ##
58
- # @param [[LLM::Message, Hash]] item
58
+ # @param [[LLM::Message, Hash, Symbol]] item
59
59
  # A message and its parameters
60
60
  # @return [void]
61
61
  def <<(item)
@@ -80,6 +80,27 @@ module LLM
80
80
  "completed_count=#{@completed.size} pending_count=#{@pending.size}>"
81
81
  end
82
82
 
83
+ ##
84
+ # Returns true when the buffer is empty
85
+ # @return [Boolean]
86
+ def empty?
87
+ @pending.empty? and @completed.empty?
88
+ end
89
+
90
+ ##
91
+ # @example
92
+ # llm = LLM.openai(key: ENV["KEY"])
93
+ # bot = LLM::Bot.new(llm, stream: $stdout)
94
+ # bot.chat "Hello", role: :user
95
+ # bot.messages.drain
96
+ # @note
97
+ # This method is especially useful when using the streaming API.
98
+ # Drains the buffer and returns all messages as an array
99
+ # @return [Array<LLM::Message>]
100
+ def drain
101
+ to_a
102
+ end
103
+
83
104
  private
84
105
 
85
106
  def empty!
@@ -94,23 +115,26 @@ module LLM
94
115
  end
95
116
 
96
117
  def complete!(message, params)
97
- pendings = @pending.map { _1[0] }
98
- messages = [*@completed, *pendings]
118
+ oldparams = @pending.map { _1[1] }
119
+ pendings = @pending.map { _1[0] }
120
+ messages = [*@completed, *pendings]
99
121
  role = message.role
100
122
  completion = @provider.complete(
101
123
  message.content,
102
- params.merge(role:, messages:)
124
+ [*oldparams, params.merge(role:, messages:)].inject({}, &:merge!)
103
125
  )
104
126
  @completed.concat([*pendings, message, *completion.choices[0]])
105
127
  @pending.clear
106
128
  end
107
129
 
108
130
  def respond!(message, params)
109
- pendings = @pending.map { _1[0] }
110
- input = [*pendings]
131
+ oldparams = @pending.map { _1[1] }
132
+ pendings = @pending.map { _1[0] }
133
+ messages = [*pendings]
111
134
  role = message.role
112
135
  params = [
113
- params.merge(input:),
136
+ *oldparams,
137
+ params.merge(input: messages),
114
138
  @response ? {previous_response_id: @response.id} : {}
115
139
  ].inject({}, &:merge!)
116
140
  @response = @provider.responses.create(message.content, params.merge(role:))
data/lib/llm/error.rb CHANGED
@@ -31,6 +31,10 @@ module LLM
31
31
  # HTTPTooManyRequests
32
32
  RateLimitError = Class.new(ResponseError)
33
33
 
34
+ ##
35
+ # HTTPServerError
36
+ ServerError = Class.new(ResponseError)
37
+
34
38
  ##
35
39
  # When an given an input object that is not understood
36
40
  FormatError = Class.new(Error)
data/lib/llm/file.rb CHANGED
@@ -26,7 +26,7 @@ class LLM::File
26
26
  # @return [String]
27
27
  # Returns the MIME type of the file
28
28
  def mime_type
29
- LLM::Mime[File.extname(path)]
29
+ LLM::Mime[path]
30
30
  end
31
31
 
32
32
  ##
data/lib/llm/function.rb CHANGED
@@ -53,7 +53,7 @@ class LLM::Function
53
53
  # @yieldparam [LLM::Function] self The function object
54
54
  def initialize(name, &b)
55
55
  @name = name
56
- @schema = JSON::Schema.new
56
+ @schema = LLM::Schema.new
57
57
  @called = false
58
58
  @cancelled = false
59
59
  yield(self)
@@ -72,7 +72,7 @@ class LLM::Function
72
72
  end
73
73
 
74
74
  ##
75
- # @yieldparam [JSON::Schema] schema The schema object
75
+ # @yieldparam [LLM::Schema] schema The schema object
76
76
  # @return [void]
77
77
  def params
78
78
  @params = yield(@schema)
data/lib/llm/mime.rb CHANGED
@@ -3,15 +3,16 @@
3
3
  ##
4
4
  # @private
5
5
  class LLM::Mime
6
+ EXTNAME = /\A\.[a-zA-Z0-9]+\z/
7
+ private_constant :EXTNAME
8
+
6
9
  ##
7
10
  # Lookup a mime type
8
11
  # @return [String, nil]
9
12
  def self.[](key)
10
- if key.respond_to?(:path)
11
- types[File.extname(key.path)]
12
- else
13
- types[key]
14
- end
13
+ key = key.respond_to?(:path) ? key.path : key
14
+ extname = (key =~ EXTNAME) ? key : File.extname(key)
15
+ types[extname] || "application/octet-stream"
15
16
  end
16
17
 
17
18
  ##
@@ -24,6 +25,15 @@ class LLM::Mime
24
25
  ".jpg" => "image/jpeg",
25
26
  ".jpeg" => "image/jpeg",
26
27
  ".webp" => "image/webp",
28
+ ".gif" => "image/gif",
29
+ ".bmp" => "image/bmp",
30
+ ".tif" => "image/tiff",
31
+ ".tiff" => "image/tiff",
32
+ ".svg" => "image/svg+xml",
33
+ ".ico" => "image/x-icon",
34
+ ".apng" => "image/apng",
35
+ ".jfif" => "image/jpeg",
36
+ ".heic" => "image/heic",
27
37
 
28
38
  # Videos
29
39
  ".flv" => "video/x-flv",
@@ -34,6 +44,12 @@ class LLM::Mime
34
44
  ".webm" => "video/webm",
35
45
  ".wmv" => "video/x-ms-wmv",
36
46
  ".3gp" => "video/3gpp",
47
+ ".avi" => "video/x-msvideo",
48
+ ".mkv" => "video/x-matroska",
49
+ ".ogv" => "video/ogg",
50
+ ".m4v" => "video/x-m4v",
51
+ ".m2ts" => "video/mp2t",
52
+ ".mts" => "video/mp2t",
37
53
 
38
54
  # Audio
39
55
  ".aac" => "audio/aac",
@@ -45,10 +61,80 @@ class LLM::Mime
45
61
  ".pcm" => "audio/L16",
46
62
  ".wav" => "audio/wav",
47
63
  ".weba" => "audio/webm",
64
+ ".oga" => "audio/ogg",
65
+ ".ogg" => "audio/ogg",
66
+ ".mid" => "audio/midi",
67
+ ".midi" => "audio/midi",
68
+ ".aiff" => "audio/aiff",
69
+ ".aif" => "audio/aiff",
70
+ ".amr" => "audio/amr",
71
+ ".mka" => "audio/x-matroska",
72
+ ".caf" => "audio/x-caf",
48
73
 
49
74
  # Documents
50
75
  ".pdf" => "application/pdf",
51
- ".txt" => "text/plain"
76
+ ".txt" => "text/plain",
77
+ ".md" => "text/markdown",
78
+ ".markdown" => "text/markdown",
79
+ ".mkd" => "text/markdown",
80
+ ".doc" => "application/msword",
81
+ ".docx" => "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
82
+ ".xls" => "application/vnd.ms-excel",
83
+ ".xlsx" => "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
84
+ ".ppt" => "application/vnd.ms-powerpoint",
85
+ ".pptx" => "application/vnd.openxmlformats-officedocument.presentationml.presentation",
86
+ ".csv" => "text/csv",
87
+ ".json" => "application/json",
88
+ ".xml" => "application/xml",
89
+ ".html" => "text/html",
90
+ ".htm" => "text/html",
91
+ ".odt" => "application/vnd.oasis.opendocument.text",
92
+ ".odp" => "application/vnd.oasis.opendocument.presentation",
93
+ ".ods" => "application/vnd.oasis.opendocument.spreadsheet",
94
+ ".rtf" => "application/rtf",
95
+ ".epub" => "application/epub+zip",
96
+
97
+ # Code
98
+ ".js" => "application/javascript",
99
+ ".jsx" => "text/jsx",
100
+ ".ts" => "application/typescript",
101
+ ".tsx" => "text/tsx",
102
+ ".css" => "text/css",
103
+ ".c" => "text/x-c",
104
+ ".cpp" => "text/x-c++",
105
+ ".h" => "text/x-c",
106
+ ".rb" => "text/x-ruby",
107
+ ".py" => "text/x-python",
108
+ ".java" => "text/x-java-source",
109
+ ".sh" => "application/x-sh",
110
+ ".php" => "application/x-httpd-php",
111
+ ".yml" => "text/yaml",
112
+ ".yaml" => "text/yaml",
113
+ ".go" => "text/x-go",
114
+ ".rs" => "text/rust",
115
+
116
+ # Fonts
117
+ ".woff" => "font/woff",
118
+ ".woff2" => "font/woff2",
119
+ ".ttf" => "font/ttf",
120
+ ".otf" => "font/otf",
121
+
122
+ # Archives
123
+ ".zip" => "application/zip",
124
+ ".tar" => "application/x-tar",
125
+ ".gz" => "application/gzip",
126
+ ".bz2" => "application/x-bzip2",
127
+ ".xz" => "application/x-xz",
128
+ ".rar" => "application/vnd.rar",
129
+ ".7z" => "application/x-7z-compressed",
130
+ ".tar.gz" => "application/gzip",
131
+ ".tar.bz2" => "application/x-bzip2",
132
+ ".apk" => "application/vnd.android.package-archive",
133
+ ".exe" => "application/x-msdownload",
134
+
135
+ # Others
136
+ ".ics" => "text/calendar",
137
+ ".vcf" => "text/vcard"
52
138
  }
53
139
  end
54
140
  end
data/lib/llm/provider.rb CHANGED
@@ -198,9 +198,9 @@ class LLM::Provider
198
198
 
199
199
  ##
200
200
  # Returns an object that can generate a JSON schema
201
- # @return [JSON::Schema]
201
+ # @return [LLM::Schema]
202
202
  def schema
203
- @schema ||= JSON::Schema.new
203
+ @schema ||= LLM::Schema.new
204
204
  end
205
205
 
206
206
  ##
@@ -284,7 +284,8 @@ class LLM::Provider
284
284
  parser&.free
285
285
  end
286
286
  else
287
- client.request(request, &b)
287
+ b ? client.request(request) { (Net::HTTPSuccess === _1) ? b.call(_1) : _1 } :
288
+ client.request(request)
288
289
  end
289
290
  handle_response(res)
290
291
  end
@@ -22,6 +22,8 @@ class LLM::Anthropic
22
22
  # Raises a subclass of {LLM::Error LLM::Error}
23
23
  def raise_error!
24
24
  case res
25
+ when Net::HTTPServerError
26
+ raise LLM::ServerError.new { _1.response = res }, "Server error"
25
27
  when Net::HTTPUnauthorized
26
28
  raise LLM::UnauthorizedError.new { _1.response = res }, "Authentication error"
27
29
  when Net::HTTPTooManyRequests
@@ -0,0 +1,155 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Anthropic
4
+ ##
5
+ # The {LLM::Anthropic::Files LLM::Anthropic::Files} class provides a files
6
+ # object for interacting with [Anthropic's Files API](https://docs.anthropic.com/en/docs/build-with-claude/files).
7
+ #
8
+ # @example
9
+ # #!/usr/bin/env ruby
10
+ # require "llm"
11
+ #
12
+ # llm = LLM.anthropic(key: ENV["KEY"])
13
+ # bot = LLM::Bot.new(llm)
14
+ # file = llm.files.create file: "/books/goodread.pdf"
15
+ # bot.chat ["Tell me about this PDF", file]
16
+ # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
+ class Files
18
+ require_relative "response/file"
19
+ ##
20
+ # Returns a new Files object
21
+ # @param provider [LLM::Provider]
22
+ # @return [LLM::Anthropic::Files]
23
+ def initialize(provider)
24
+ @provider = provider
25
+ end
26
+
27
+ ##
28
+ # List all files
29
+ # @example
30
+ # llm = LLM.anthropic(key: ENV["KEY"])
31
+ # res = llm.files.all
32
+ # res.each do |file|
33
+ # print "id: ", file.id, "\n"
34
+ # end
35
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files Anthropic docs
36
+ # @param [Hash] params Other parameters (see Anthropic docs)
37
+ # @raise (see LLM::Provider#request)
38
+ # @return [LLM::Response]
39
+ def all(**params)
40
+ query = URI.encode_www_form(params)
41
+ req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
42
+ res = execute(request: req)
43
+ LLM::Response.new(res).extend(LLM::Anthropic::Response::Enumerable)
44
+ end
45
+
46
+ ##
47
+ # Create a file
48
+ # @example
49
+ # llm = LLM.anthropic(key: ENV["KEY"])
50
+ # res = llm.files.create file: "/documents/haiku.txt"
51
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files Anthropic docs
52
+ # @param [File, LLM::File, String] file The file
53
+ # @param [Hash] params Other parameters (see Anthropic docs)
54
+ # @raise (see LLM::Provider#request)
55
+ # @return [LLM::Response]
56
+ def create(file:, **params)
57
+ multi = LLM::Multipart.new(params.merge!(file: LLM.File(file)))
58
+ req = Net::HTTP::Post.new("/v1/files", headers)
59
+ req["content-type"] = multi.content_type
60
+ set_body_stream(req, multi.body)
61
+ res = execute(request: req)
62
+ LLM::Response.new(res).extend(LLM::Anthropic::Response::File)
63
+ end
64
+
65
+ ##
66
+ # Get a file
67
+ # @example
68
+ # llm = LLM.anthropic(key: ENV["KEY"])
69
+ # res = llm.files.get(file: "file-1234567890")
70
+ # print "id: ", res.id, "\n"
71
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files Anthropic docs
72
+ # @param [#id, #to_s] file The file ID
73
+ # @param [Hash] params Other parameters - if any (see Anthropic docs)
74
+ # @raise (see LLM::Provider#request)
75
+ # @return [LLM::Response]
76
+ def get(file:, **params)
77
+ file_id = file.respond_to?(:id) ? file.id : file
78
+ query = URI.encode_www_form(params)
79
+ req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
80
+ res = execute(request: req)
81
+ LLM::Response.new(res).extend(LLM::Anthropic::Response::File)
82
+ end
83
+
84
+ ##
85
+ # Retrieve file metadata
86
+ # @example
87
+ # llm = LLM.anthropic(key: ENV["KEY"])
88
+ # res = llm.files.get_metadata(file: "file-1234567890")
89
+ # print "id: ", res.id, "\n"
90
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files
91
+ # @param [#id, #to_s] file The file ID
92
+ # @param [Hash] params Other parameters - if any (see Anthropic docs)
93
+ # @raise (see LLM::Provider#request)
94
+ # @return [LLM::Response]
95
+ def get_metadata(file:, **params)
96
+ query = URI.encode_www_form(params)
97
+ file_id = file.respond_to?(:id) ? file.id : file
98
+ req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
99
+ res = execute(request: req)
100
+ LLM::Response.new(res).extend(LLM::Anthropic::Response::File)
101
+ end
102
+ alias_method :retrieve_metadata, :get_metadata
103
+
104
+ ##
105
+ # Delete a file
106
+ # @example
107
+ # llm = LLM.anthropic(key: ENV["KEY"])
108
+ # res = llm.files.delete(file: "file-1234567890")
109
+ # print res.deleted, "\n"
110
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files Anthropic docs
111
+ # @param [#id, #to_s] file The file ID
112
+ # @raise (see LLM::Provider#request)
113
+ # @return [LLM::Response]
114
+ def delete(file:)
115
+ file_id = file.respond_to?(:id) ? file.id : file
116
+ req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
117
+ res = execute(request: req)
118
+ LLM::Response.new(res)
119
+ end
120
+
121
+ ##
122
+ # Download the contents of a file
123
+ # @note
124
+ # You can only download files that were created by the code
125
+ # execution tool. Files that you uploaded cannot be downloaded.
126
+ # @example
127
+ # llm = LLM.anthropic(key: ENV["KEY"])
128
+ # res = llm.files.download(file: "file-1234567890")
129
+ # File.binwrite "program.c", res.file.read
130
+ # print res.file.read, "\n"
131
+ # @see https://docs.anthropic.com/en/docs/build-with-claude/files Anthropic docs
132
+ # @param [#id, #to_s] file The file ID
133
+ # @param [Hash] params Other parameters (see Anthropic docs)
134
+ # @raise (see LLM::Provider#request)
135
+ # @return [LLM::Response]
136
+ def download(file:, **params)
137
+ query = URI.encode_www_form(params)
138
+ file_id = file.respond_to?(:id) ? file.id : file
139
+ req = Net::HTTP::Get.new("/v1/files/#{file_id}/content?#{query}", headers)
140
+ io = StringIO.new("".b)
141
+ res = execute(request: req) { |res| res.read_body { |chunk| io << chunk } }
142
+ LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
143
+ end
144
+
145
+ private
146
+
147
+ def key
148
+ @provider.instance_variable_get(:@key)
149
+ end
150
+
151
+ [:headers, :execute, :set_body_stream].each do |m|
152
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
153
+ end
154
+ end
155
+ end