llm.rb 0.10.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +0 -0
  3. data/README.md +81 -117
  4. data/lib/llm/bot/builder.rb +2 -2
  5. data/lib/llm/bot/conversable.rb +0 -0
  6. data/lib/llm/bot/prompt/completion.rb +0 -0
  7. data/lib/llm/bot/prompt/respond.rb +0 -0
  8. data/lib/llm/bot.rb +9 -11
  9. data/lib/llm/buffer.rb +0 -0
  10. data/lib/llm/error.rb +0 -0
  11. data/lib/llm/event_handler.rb +0 -0
  12. data/lib/llm/eventstream/event.rb +0 -0
  13. data/lib/llm/eventstream/parser.rb +0 -0
  14. data/lib/llm/eventstream.rb +0 -0
  15. data/lib/llm/file.rb +18 -9
  16. data/lib/llm/function.rb +6 -5
  17. data/lib/llm/json/schema/array.rb +0 -0
  18. data/lib/llm/json/schema/boolean.rb +0 -0
  19. data/lib/llm/json/schema/integer.rb +0 -0
  20. data/lib/llm/json/schema/leaf.rb +0 -0
  21. data/lib/llm/json/schema/null.rb +0 -0
  22. data/lib/llm/json/schema/number.rb +0 -0
  23. data/lib/llm/json/schema/object.rb +0 -0
  24. data/lib/llm/json/schema/string.rb +0 -0
  25. data/lib/llm/json/schema/version.rb +0 -0
  26. data/lib/llm/json/schema.rb +0 -0
  27. data/lib/llm/message.rb +8 -0
  28. data/lib/llm/mime.rb +0 -0
  29. data/lib/llm/multipart.rb +0 -0
  30. data/lib/llm/object/builder.rb +0 -0
  31. data/lib/llm/object/kernel.rb +8 -0
  32. data/lib/llm/object.rb +7 -0
  33. data/lib/llm/provider.rb +9 -11
  34. data/lib/llm/providers/anthropic/error_handler.rb +0 -0
  35. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  36. data/lib/llm/providers/anthropic/format.rb +0 -0
  37. data/lib/llm/providers/anthropic/models.rb +2 -7
  38. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  39. data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
  40. data/lib/llm/providers/anthropic.rb +3 -24
  41. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  42. data/lib/llm/providers/deepseek/format.rb +0 -0
  43. data/lib/llm/providers/deepseek.rb +6 -0
  44. data/lib/llm/providers/gemini/audio.rb +6 -10
  45. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  46. data/lib/llm/providers/gemini/files.rb +11 -14
  47. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  48. data/lib/llm/providers/gemini/format.rb +0 -0
  49. data/lib/llm/providers/gemini/images.rb +8 -7
  50. data/lib/llm/providers/gemini/models.rb +2 -8
  51. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  52. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  53. data/lib/llm/providers/gemini/response/file.rb +11 -0
  54. data/lib/llm/providers/gemini/response/image.rb +26 -0
  55. data/lib/llm/providers/gemini/stream_parser.rb +0 -0
  56. data/lib/llm/providers/gemini.rb +5 -8
  57. data/lib/llm/providers/llamacpp.rb +6 -0
  58. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  59. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  60. data/lib/llm/providers/ollama/format.rb +0 -0
  61. data/lib/llm/providers/ollama/models.rb +2 -8
  62. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  63. data/lib/llm/providers/ollama/response/embedding.rb +10 -0
  64. data/lib/llm/providers/ollama/stream_parser.rb +0 -0
  65. data/lib/llm/providers/ollama.rb +5 -8
  66. data/lib/llm/providers/openai/audio.rb +6 -6
  67. data/lib/llm/providers/openai/error_handler.rb +0 -0
  68. data/lib/llm/providers/openai/files.rb +14 -15
  69. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  70. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  71. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  72. data/lib/llm/providers/openai/format.rb +0 -0
  73. data/lib/llm/providers/openai/images.rb +8 -7
  74. data/lib/llm/providers/openai/models.rb +2 -7
  75. data/lib/llm/providers/openai/moderations.rb +9 -11
  76. data/lib/llm/providers/openai/response/audio.rb +7 -0
  77. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
  78. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  79. data/lib/llm/providers/openai/response/file.rb +7 -0
  80. data/lib/llm/providers/openai/response/image.rb +16 -0
  81. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  82. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
  83. data/lib/llm/providers/openai/responses.rb +10 -9
  84. data/lib/llm/providers/openai/stream_parser.rb +0 -0
  85. data/lib/llm/providers/openai/vector_stores.rb +106 -0
  86. data/lib/llm/providers/openai.rb +14 -8
  87. data/lib/llm/response.rb +37 -13
  88. data/lib/llm/utils.rb +0 -0
  89. data/lib/llm/version.rb +1 -1
  90. data/lib/llm.rb +2 -12
  91. data/llm.gemspec +1 -1
  92. metadata +18 -29
  93. data/lib/llm/model.rb +0 -32
  94. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  95. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  96. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  97. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  98. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  99. data/lib/llm/providers/openai/response_parser.rb +0 -65
  100. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  101. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  102. data/lib/llm/providers/voyageai.rb +0 -44
  103. data/lib/llm/response/audio.rb +0 -13
  104. data/lib/llm/response/audio_transcription.rb +0 -14
  105. data/lib/llm/response/audio_translation.rb +0 -14
  106. data/lib/llm/response/completion.rb +0 -51
  107. data/lib/llm/response/download_file.rb +0 -15
  108. data/lib/llm/response/embedding.rb +0 -23
  109. data/lib/llm/response/file.rb +0 -42
  110. data/lib/llm/response/filelist.rb +0 -18
  111. data/lib/llm/response/image.rb +0 -29
  112. data/lib/llm/response/modellist.rb +0 -18
  113. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  114. data/lib/llm/response/moderationlist.rb +0 -51
  115. data/lib/llm/response/respond.rb +0 -56
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 39e538d8185cf5c8c5a36da0e1bf5b0b9e0055945a02570cd00fefc805b288d0
4
- data.tar.gz: 7fc0d3a4422fe10bb3058c7b1b5b9bc80693ccc0dbf6b62bda46d42fb7c2830c
3
+ metadata.gz: ce6a4e56ce25f397337733009249edb930cade1641bd84d7939ef0d349c6e92a
4
+ data.tar.gz: b9506c346dd19af655f342e9b6a64aed25cd3f4ecffb6bb32cb36e1221e71c6a
5
5
  SHA512:
6
- metadata.gz: a2b3de69ce317d856ec593074e22883ec2b96ddcdc2637cb2b4c555885c1c771b2ffd447b255cdc16a2c7f1c2b72362ab1b6e29ec0fdf775e977292b03fd3e34
7
- data.tar.gz: c2ba0f853b7eaac4ca8fab15f497a3fa375e054b7da928b2f0798e393909baff20d2381afd48e793fd878261a0d838474e4be3be5b5232e480cf162af57dbe2e
6
+ metadata.gz: a4a53b8e5aeaae2cd26f5303366ce1974ecfdb4176a76c5aa01f46cb9997d10a402de782b0b14f97bf3c65dc4c200b976c1279a037ce29ae44ab64fd1799ffd9
7
+ data.tar.gz: 96128a756147d8cad13e85f91247fdb43b6bf5c469348318c27f27518670c61b6466546d65bf1f553b086b2a42b15a5225e0ac5355b680ba46214b32bc20d754
data/LICENSE CHANGED
File without changes
data/README.md CHANGED
@@ -10,7 +10,7 @@ images, files, and JSON Schema generation.
10
10
  #### General
11
11
  - ✅ A single unified interface for multiple providers
12
12
  - 📦 Zero dependencies outside Ruby's standard library
13
- - 🚀 Efficient API design that minimizes the request count
13
+ - 🚀 Efficient API design that minimizes the number of requests made
14
14
 
15
15
  #### Chat, Agents
16
16
  - 🧠 Stateless and stateful chat via completions and responses API
@@ -27,24 +27,7 @@ images, files, and JSON Schema generation.
27
27
  #### Miscellaneous
28
28
  - 🧮 Text embeddings and vector support
29
29
  - 🔌 Retrieve models dynamically for introspection and selection
30
-
31
- ## Demos
32
-
33
- > The
34
- > [llmrb/llm-shell](https://github.com/llmrb/llm-shell)
35
- > project is built with llm.rb and its demos have been
36
- > included to provide a better idea of what llm.rb
37
- > is capable of.
38
-
39
- <details>
40
- <summary><b>1. An introduction to tool calls</b></summary>
41
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls_v2.gif">
42
- </details>
43
-
44
- <details>
45
- <summary><b>2. Add files as conversation context</b></summary>
46
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime_v2.gif">
47
- </details>
30
+ - 🧱 Includes support for OpenAI's responses, moderations, and vector stores APIs
48
31
 
49
32
  ## Examples
50
33
 
@@ -67,7 +50,6 @@ llm = LLM.openai(key: "yourapikey")
67
50
  llm = LLM.gemini(key: "yourapikey")
68
51
  llm = LLM.anthropic(key: "yourapikey")
69
52
  llm = LLM.deepseek(key: "yourapikey")
70
- llm = LLM.voyageai(key: "yourapikey")
71
53
 
72
54
  ##
73
55
  # local providers
@@ -86,24 +68,24 @@ llm = LLM.llamacpp(key: nil)
86
68
 
87
69
  The following example creates an instance of
88
70
  [LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
89
- by entering into a conversation where messages are buffered and
90
- sent to the provider on-demand. This is the default behavior
91
- because it can reduce the number of requests sent to a provider,
92
- and avoids unneccessary requests until an attempt to iterate over
71
+ and enters into a conversation where messages are buffered and
72
+ sent to the provider on-demand. The implementation is designed to
73
+ buffer messages by waiting until an attempt to iterate over
93
74
  [LLM::Bot#messages](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#messages-instance_method)
94
- is made:
75
+ is made before sending a request to the LLM:
95
76
 
96
77
  ```ruby
97
78
  #!/usr/bin/env ruby
98
79
  require "llm"
99
80
 
100
- llm = LLM.openai(key: ENV["KEY"])
81
+ llm = LLM.openai(key: ENV["OPENAI_SECRET"])
101
82
  bot = LLM::Bot.new(llm)
83
+ url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
102
84
  msgs = bot.chat do |prompt|
103
- prompt.system File.read("./share/llm/prompts/system.txt")
104
- prompt.user "Tell me the answer to 5 + 15"
105
- prompt.user "Tell me the answer to (5 + 15) * 2"
106
- prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
85
+ prompt.system "Your task is to answer all user queries"
86
+ prompt.user ["Tell me about this URL", URI(url)]
87
+ prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
88
+ prompt.user "Is the URL and PDF similar to each other?"
107
89
  end
108
90
 
109
91
  # At this point, we execute a single request
@@ -118,27 +100,24 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
118
100
  > [docs/](docs/STREAMING.md#scopes) for more details.
119
101
 
120
102
  The following example streams the messages in a conversation
121
- as they are generated in real-time. This feature can be useful
122
- when you want to stream a conversation in real time, or when you
123
- want to avoid potential read timeouts during the generation of a
124
- response.
125
-
126
- The `stream` option can be set to an IO object, or the value `true`
127
- to enable streaming &ndash; and at the end of the request, `bot.chat`
128
- returns the same response as the non-streaming version which allows
129
- you to process a response in the same way:
103
+ as they are generated in real-time. The `stream` option can
104
+ be set to an IO object, or the value `true` to enable streaming
105
+ &ndash; and at the end of the request, `bot.chat` returns the
106
+ same response as the non-streaming version which allows you
107
+ to process a response in the same way:
130
108
 
131
109
  ```ruby
132
110
  #!/usr/bin/env ruby
133
111
  require "llm"
134
112
 
135
- llm = LLM.openai(key: ENV["KEY"])
113
+ llm = LLM.openai(key: ENV["OPENAI_SECRET"])
136
114
  bot = LLM::Bot.new(llm)
115
+ url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
137
116
  bot.chat(stream: $stdout) do |prompt|
138
- prompt.system "You are my math assistant."
139
- prompt.user "Tell me the answer to 5 + 15"
140
- prompt.user "Tell me the answer to (5 + 15) * 2"
141
- prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
117
+ prompt.system "Your task is to answer all user queries"
118
+ prompt.user ["Tell me about this URL", URI(url)]
119
+ prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
120
+ prompt.user "Is the URL and PDF similar to each other?"
142
121
  end.to_a
143
122
  ```
144
123
 
@@ -197,11 +176,7 @@ The
197
176
  method returns an array of functions that can be called after sending a message and
198
177
  it will only be populated if the LLM detects a function should be called. Each function
199
178
  corresponds to an element in the "tools" array. The array is emptied after a function call,
200
- and potentially repopulated on the next message.
201
-
202
- The following example defines an agent that can run system commands based on natural language,
203
- and it is only intended to be a fun demo of tool calling - it is not recommended to run
204
- arbitrary commands from a LLM without sanitizing the input first :) Without further ado:
179
+ and potentially repopulated on the next message:
205
180
 
206
181
  ```ruby
207
182
  #!/usr/bin/env ruby
@@ -213,10 +188,10 @@ tool = LLM.function(:system) do |fn|
213
188
  fn.params do |schema|
214
189
  schema.object(command: schema.string.required)
215
190
  end
216
- fn.define do |params|
191
+ fn.define do |command:|
217
192
  ro, wo = IO.pipe
218
193
  re, we = IO.pipe
219
- Process.wait Process.spawn(params.command, out: wo, err: we)
194
+ Process.wait Process.spawn(command, out: wo, err: we)
220
195
  [wo,we].each(&:close)
221
196
  {stderr: re.read, stdout: ro.read}
222
197
  end
@@ -236,6 +211,60 @@ bot.chat bot.functions.map(&:call) # report return value to the LLM
236
211
  # {stderr: "", stdout: "FreeBSD"}
237
212
  ```
238
213
 
214
+ ### Files
215
+
216
+ #### Create
217
+
218
+ The OpenAI and Gemini providers provide a Files API where a client can upload files
219
+ that can be referenced from a prompt, and with other APIs as well. The following
220
+ example uses the OpenAI provider to describe the contents of a PDF file after
221
+ it has been uploaded. The file (a specialized instance of
222
+ [LLM::Response](https://0x1eef.github.io/x/llm.rb/LLM/Response.html)
223
+ ) is given as part of a prompt that is understood by llm.rb:
224
+
225
+ ```ruby
226
+ #!/usr/bin/env ruby
227
+ require "llm"
228
+
229
+ llm = LLM.openai(key: ENV["KEY"])
230
+ bot = LLM::Bot.new(llm)
231
+ file = llm.files.create(file: "/books/goodread.pdf")
232
+ bot.chat(["Tell me about this file", file])
233
+ bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
234
+ ```
235
+
236
+ ### Prompts
237
+
238
+ #### Multimodal
239
+
240
+ It is generally a given that an LLM will understand text but they can also
241
+ understand and generate other types of media as well: audio, images, video,
242
+ and even URLs. The object given as a prompt in llm.rb can be a string to
243
+ represent text, a URI object to represent a URL, an LLM::Response object
244
+ to represent a file stored with the LLM, and so on. These are objects you
245
+ can throw at the prompt and have them be understood automatically.
246
+
247
+ A prompt can also have multiple parts, and in that case, an array is given
248
+ as a prompt. Each element is considered to part of the prompt:
249
+
250
+ ```ruby
251
+ #!/usr/bin/env ruby
252
+ require "llm"
253
+
254
+ llm = LLM.openai(key: ENV["KEY"])
255
+ bot = LLM::Bot.new(llm)
256
+
257
+ bot.chat ["Tell me about this URL", URI("https://example.com/path/to/image.png")]
258
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
259
+
260
+ file = llm.files.create(file: "/books/goodread.pdf")
261
+ bot.chat ["Tell me about this PDF", file]
262
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
263
+
264
+ bot.chat ["Tell me about this image", File.open("/images/nemothefish.png", "r")]
265
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
266
+ ```
267
+
239
268
  ### Audio
240
269
 
241
270
  #### Speech
@@ -363,71 +392,6 @@ res.urls.each.with_index do |url, index|
363
392
  end
364
393
  ```
365
394
 
366
- ### Files
367
-
368
- #### Create
369
-
370
- Most LLM providers provide a Files API where you can upload files
371
- that can be referenced from a prompt and llm.rb has first-class support
372
- for this feature. The following example uses the OpenAI provider to describe
373
- the contents of a PDF file after it has been uploaded. The file (an instance
374
- of [LLM::Response::File](https://0x1eef.github.io/x/llm.rb/LLM/Response/File.html))
375
- is passed directly to the chat method, and generally any object a prompt supports
376
- can be given to the chat method:
377
-
378
-
379
- ```ruby
380
- #!/usr/bin/env ruby
381
- require "llm"
382
-
383
- llm = LLM.openai(key: ENV["KEY"])
384
- bot = LLM::Bot.new(llm)
385
- file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
386
- bot.chat(file)
387
- bot.chat("What is this file about?")
388
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
389
-
390
- ##
391
- # [assistant] This file is about OpenBSD, a free and open-source Unix-like operating system
392
- # based on the Berkeley Software Distribution (BSD). It is known for its
393
- # emphasis on security, code correctness, and code simplicity. The file
394
- # contains information about the features, installation, and usage of OpenBSD.
395
- ```
396
-
397
- ### Prompts
398
-
399
- #### Multimodal
400
-
401
- Generally all providers accept text prompts but some providers can
402
- also understand URLs, and various file types (eg images, audio, video,
403
- etc). The llm.rb approach to multimodal prompts is to let you pass `URI`
404
- objects to describe links, `LLM::File` | `LLM::Response::File` objects
405
- to describe files, `String` objects to describe text blobs, or an array
406
- of the aforementioned objects to describe multiple objects in a single
407
- prompt. Each object is a first class citizen that can be passed directly
408
- to a prompt:
409
-
410
- ```ruby
411
- #!/usr/bin/env ruby
412
- require "llm"
413
-
414
- llm = LLM.openai(key: ENV["KEY"])
415
- bot = LLM::Bot.new(llm)
416
-
417
- bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
418
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
419
-
420
- file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
421
- bot.chat [file, "What is this file about?"]
422
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
423
-
424
- bot.chat [LLM.File("/images/puffy.png"), "What is this image about?"]
425
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
426
-
427
- bot.chat [LLM.File("/images/beastie.png"), "What is this image about?"]
428
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
429
- ```
430
-
431
395
  ### Embeddings
432
396
 
433
397
  #### Text
@@ -453,7 +417,7 @@ print res.embeddings.size, "\n"
453
417
  print res.embeddings[0].size, "\n"
454
418
 
455
419
  ##
456
- # LLM::Response::Embedding
420
+ # LLM::Response
457
421
  # 3
458
422
  # 1536
459
423
  ```
@@ -9,7 +9,7 @@ class LLM::Bot
9
9
  ##
10
10
  # @param [String] prompt The prompt
11
11
  # @param [Hash] params
12
- # @return [LLM::Response::Respond]
12
+ # @return [LLM::Response]
13
13
  def create_response!(prompt, params)
14
14
  @provider.responses.create(
15
15
  prompt,
@@ -20,7 +20,7 @@ class LLM::Bot
20
20
  ##
21
21
  # @param [String] prompt The prompt
22
22
  # @param [Hash] params
23
- # @return [LLM::Response::Completion]
23
+ # @return [LLM::Response]
24
24
  def create_completion!(prompt, params)
25
25
  @provider.complete(
26
26
  prompt,
File without changes
File without changes
File without changes
data/lib/llm/bot.rb CHANGED
@@ -2,10 +2,10 @@
2
2
 
3
3
  module LLM
4
4
  ##
5
- # {LLM::Bot LLM::Bot} provides a bot object that can maintain a
5
+ # {LLM::Bot LLM::Bot} provides an object that can maintain a
6
6
  # a conversation. A conversation can use the chat completions API
7
- # that all LLM providers support or the responses API that a select
8
- # few LLM providers support.
7
+ # that all LLM providers support or the responses API that currently
8
+ # only OpenAI supports.
9
9
  #
10
10
  # @example example #1
11
11
  # #!/usr/bin/env ruby
@@ -14,10 +14,9 @@ module LLM
14
14
  # llm = LLM.openai(ENV["KEY"])
15
15
  # bot = LLM::Bot.new(llm)
16
16
  # msgs = bot.chat do |prompt|
17
- # prompt.system "Answer the following questions."
18
- # prompt.user "What is 5 + 7 ?"
19
- # prompt.user "Why is the sky blue ?"
20
- # prompt.user "Why did the chicken cross the road ?"
17
+ # prompt.user "What programming language should I learn next ?"
18
+ # prompt.user "Can you recommend a good book ?"
19
+ # prompt.user "Can you suggest a fun project to practice ?"
21
20
  # end
22
21
  # msgs.each { print "[#{_1.role}]", _1.content, "\n" }
23
22
  #
@@ -27,10 +26,9 @@ module LLM
27
26
  #
28
27
  # llm = LLM.openai(ENV["KEY"])
29
28
  # bot = LLM::Bot.new(llm)
30
- # bot.chat "Answer the following questions.", role: :system
31
- # bot.chat "What is 5 + 7 ?", role: :user
32
- # bot.chat "Why is the sky blue ?", role: :user
33
- # bot.chat "Why did the chicken cross the road ?", role: :user
29
+ # bot.chat "What programming language should I learn next ?", role: :user
30
+ # bot.chat "Can you recommend a good book ?", role: :user
31
+ # bot.chat "Can you suggest a fun project to practice ?", role: :user
34
32
  # bot.messages.each { print "[#{_1.role}]", _1.content, "\n" }
35
33
  class Bot
36
34
  require_relative "bot/prompt/completion"
data/lib/llm/buffer.rb CHANGED
File without changes
data/lib/llm/error.rb CHANGED
File without changes
File without changes
File without changes
File without changes
File without changes
data/lib/llm/file.rb CHANGED
@@ -29,12 +29,19 @@ class LLM::File
29
29
  end
30
30
 
31
31
  ##
32
- # @return [String]
32
+ # @return [Boolean]
33
33
  # Returns true if the file is an image
34
34
  def image?
35
35
  mime_type.start_with?("image/")
36
36
  end
37
37
 
38
+ ##
39
+ # @return [Boolean]
40
+ # Returns true if the file is a PDF document
41
+ def pdf?
42
+ mime_type == "application/pdf"
43
+ end
44
+
38
45
  ##
39
46
  # @return [Integer]
40
47
  # Returns the size of the file in bytes
@@ -68,14 +75,16 @@ class LLM::File
68
75
  end
69
76
 
70
77
  ##
71
- # @param [String] path
72
- # The path to a file
78
+ # @param [String, File, LLM::Response] obj
79
+ # The path to the file, or an existing file reference
73
80
  # @return [LLM::File]
74
- def LLM.File(path)
75
- case path
76
- when LLM::File, LLM::Response::File
77
- path
78
- else
79
- LLM::File.new(path)
81
+ def LLM.File(obj)
82
+ case obj
83
+ when File
84
+ obj.close unless obj.closed?
85
+ LLM.File(obj.path)
86
+ when LLM::File, LLM::Response then obj
87
+ when String then LLM::File.new(obj)
88
+ else raise TypeError, "don't know how to handle #{obj.class} objects"
80
89
  end
81
90
  end
data/lib/llm/function.rb CHANGED
@@ -10,15 +10,15 @@
10
10
  # fn.params do |schema|
11
11
  # schema.object(command: schema.string.required)
12
12
  # end
13
- # fn.define do |params|
14
- # {success: Kernel.system(params.command)}
13
+ # fn.define do |command:|
14
+ # {success: Kernel.system(command)}
15
15
  # end
16
16
  # end
17
17
  #
18
18
  # @example example #2
19
19
  # class System
20
- # def call(params)
21
- # {success: Kernel.system(params.command)}
20
+ # def call(command:)
21
+ # {success: Kernel.system(command)}
22
22
  # end
23
23
  # end
24
24
  #
@@ -91,7 +91,8 @@ class LLM::Function
91
91
  # Call the function
92
92
  # @return [LLM::Function::Return] The result of the function call
93
93
  def call
94
- Return.new id, (Class === @runner) ? @runner.new.call(arguments) : @runner.call(arguments)
94
+ runner = ((Class === @runner) ? @runner.new : @runner)
95
+ Return.new(id, runner.call(**arguments))
95
96
  ensure
96
97
  @called = true
97
98
  end
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
data/lib/llm/message.rb CHANGED
@@ -109,6 +109,14 @@ module LLM
109
109
  tool_calls.any?
110
110
  end
111
111
 
112
+ ##
113
+ # @return [Boolean]
114
+ # Returns true when the message represents a function return
115
+ def tool_return?
116
+ LLM::Function::Return === content ||
117
+ [*content].grep(LLM::Function::Return).any?
118
+ end
119
+
112
120
  ##
113
121
  # Returns a string representation of the message
114
122
  # @return [String]
data/lib/llm/mime.rb CHANGED
File without changes
data/lib/llm/multipart.rb CHANGED
File without changes
File without changes
@@ -12,6 +12,10 @@ class LLM::Object
12
12
  ::Kernel.instance_method(:instance_of?).bind(self).call(...)
13
13
  end
14
14
 
15
+ def extend(...)
16
+ ::Kernel.instance_method(:extend).bind(self).call(...)
17
+ end
18
+
15
19
  def method(...)
16
20
  ::Kernel.instance_method(:method).bind(self).call(...)
17
21
  end
@@ -41,5 +45,9 @@ class LLM::Object
41
45
  "#<#{self.class}:0x#{object_id.to_s(16)} properties=#{to_h.inspect}>"
42
46
  end
43
47
  alias_method :to_s, :inspect
48
+
49
+ def pretty_print(q)
50
+ q.text(inspect)
51
+ end
44
52
  end
45
53
  end
data/lib/llm/object.rb CHANGED
@@ -62,6 +62,13 @@ class LLM::Object < BasicObject
62
62
  def to_h
63
63
  @h
64
64
  end
65
+ alias_method :to_hash, :to_h
66
+
67
+ ##
68
+ # @return [Object, nil]
69
+ def dig(...)
70
+ to_h.dig(...)
71
+ end
65
72
 
66
73
  private
67
74
 
data/lib/llm/provider.rb CHANGED
@@ -44,7 +44,7 @@ class LLM::Provider
44
44
  # Other embedding parameters
45
45
  # @raise [NotImplementedError]
46
46
  # When the method is not implemented by a subclass
47
- # @return [LLM::Response::Embedding]
47
+ # @return [LLM::Response]
48
48
  def embed(input, model: nil, **params)
49
49
  raise NotImplementedError
50
50
  end
@@ -68,7 +68,7 @@ class LLM::Provider
68
68
  # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
69
69
  # @raise [NotImplementedError]
70
70
  # When the method is not implemented by a subclass
71
- # @return [LLM::Response::Completion]
71
+ # @return [LLM::Response]
72
72
  def complete(prompt, params = {})
73
73
  raise NotImplementedError
74
74
  end
@@ -174,6 +174,13 @@ class LLM::Provider
174
174
  raise NotImplementedError
175
175
  end
176
176
 
177
+ ##
178
+ # @return [LLM::OpenAI::VectorStore]
179
+ # Returns an interface to the vector stores API
180
+ def vector_stores
181
+ raise NotImplementedError
182
+ end
183
+
177
184
  ##
178
185
  # @return [String]
179
186
  # Returns the role of the assistant in the conversation.
@@ -222,15 +229,6 @@ class LLM::Provider
222
229
  raise NotImplementedError
223
230
  end
224
231
 
225
- ##
226
- # @return [Module]
227
- # Returns the module responsible for parsing a successful LLM response
228
- # @raise [NotImplementedError]
229
- # (see LLM::Provider#complete)
230
- def response_parser
231
- raise NotImplementedError
232
- end
233
-
234
232
  ##
235
233
  # @return [Class]
236
234
  # Returns the class responsible for handling an unsuccessful LLM response
File without changes
@@ -47,13 +47,18 @@ module LLM::Anthropic::Format
47
47
  content.empty? ? throw(:abort, nil) : content.flat_map { format_content(_1) }
48
48
  when URI
49
49
  [{type: :image, source: {type: "url", url: content.to_s}}]
50
+ when File
51
+ content.close unless content.closed?
52
+ format_content(LLM.File(content.path))
50
53
  when LLM::File
51
54
  if content.image?
52
55
  [{type: :image, source: {type: "base64", media_type: content.mime_type, data: content.to_b64}}]
56
+ elsif content.pdf?
57
+ [{type: :document, source: {type: "base64", media_type: content.mime_type, data: content.to_b64}}]
53
58
  else
54
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
55
- "is not an image, and therefore not supported by the " \
56
- "Anthropic API"
59
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
60
+ "is not an image or PDF, and therefore not supported by the " \
61
+ "Anthropic API"
57
62
  end
58
63
  when String
59
64
  [{type: :text, text: content}]
@@ -62,8 +67,8 @@ module LLM::Anthropic::Format
62
67
  when LLM::Function::Return
63
68
  [{type: "tool_result", tool_use_id: content.id, content: [{type: :text, text: JSON.dump(content.value)}]}]
64
69
  else
65
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
66
- "is not supported by the Anthropic API"
70
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
71
+ "is not supported by the Anthropic API"
67
72
  end
68
73
  end
69
74
 
File without changes
@@ -36,17 +36,12 @@ class LLM::Anthropic
36
36
  # @see https://docs.anthropic.com/en/api/models-list Anthropic docs
37
37
  # @param [Hash] params Other parameters (see Anthropic docs)
38
38
  # @raise (see LLM::Provider#request)
39
- # @return [LLM::Response::FileList]
39
+ # @return [LLM::Response]
40
40
  def all(**params)
41
41
  query = URI.encode_www_form(params)
42
42
  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
43
  res = execute(request: req)
44
- LLM::Response::ModelList.new(res).tap { |modellist|
45
- models = modellist.body["data"].map do |model|
46
- LLM::Model.from_hash(model).tap { _1.provider = @provider }
47
- end
48
- modellist.models = models
49
- }
44
+ LLM::Response.new(res)
50
45
  end
51
46
 
52
47
  private