llm.rb 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +0 -0
  3. data/README.md +81 -122
  4. data/lib/llm/bot/builder.rb +2 -2
  5. data/lib/llm/bot/conversable.rb +0 -0
  6. data/lib/llm/bot/prompt/completion.rb +0 -0
  7. data/lib/llm/bot/prompt/respond.rb +0 -0
  8. data/lib/llm/bot.rb +9 -11
  9. data/lib/llm/buffer.rb +0 -0
  10. data/lib/llm/error.rb +0 -0
  11. data/lib/llm/event_handler.rb +0 -0
  12. data/lib/llm/eventstream/event.rb +0 -0
  13. data/lib/llm/eventstream/parser.rb +0 -0
  14. data/lib/llm/eventstream.rb +0 -0
  15. data/lib/llm/file.rb +18 -9
  16. data/lib/llm/function.rb +18 -13
  17. data/lib/llm/json/schema/array.rb +0 -0
  18. data/lib/llm/json/schema/boolean.rb +0 -0
  19. data/lib/llm/json/schema/integer.rb +0 -0
  20. data/lib/llm/json/schema/leaf.rb +0 -0
  21. data/lib/llm/json/schema/null.rb +0 -0
  22. data/lib/llm/json/schema/number.rb +0 -0
  23. data/lib/llm/json/schema/object.rb +0 -0
  24. data/lib/llm/json/schema/string.rb +0 -0
  25. data/lib/llm/json/schema/version.rb +0 -0
  26. data/lib/llm/json/schema.rb +0 -0
  27. data/lib/llm/message.rb +8 -0
  28. data/lib/llm/mime.rb +0 -0
  29. data/lib/llm/multipart.rb +0 -0
  30. data/lib/llm/object/builder.rb +0 -0
  31. data/lib/llm/object/kernel.rb +8 -0
  32. data/lib/llm/object.rb +7 -0
  33. data/lib/llm/provider.rb +9 -11
  34. data/lib/llm/providers/anthropic/error_handler.rb +0 -0
  35. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  36. data/lib/llm/providers/anthropic/format.rb +0 -0
  37. data/lib/llm/providers/anthropic/models.rb +2 -7
  38. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  39. data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
  40. data/lib/llm/providers/anthropic.rb +3 -24
  41. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  42. data/lib/llm/providers/deepseek/format.rb +0 -0
  43. data/lib/llm/providers/deepseek.rb +6 -0
  44. data/lib/llm/providers/gemini/audio.rb +6 -10
  45. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  46. data/lib/llm/providers/gemini/files.rb +11 -14
  47. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  48. data/lib/llm/providers/gemini/format.rb +0 -0
  49. data/lib/llm/providers/gemini/images.rb +8 -7
  50. data/lib/llm/providers/gemini/models.rb +2 -8
  51. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  52. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  53. data/lib/llm/providers/gemini/response/file.rb +11 -0
  54. data/lib/llm/providers/gemini/response/image.rb +26 -0
  55. data/lib/llm/providers/gemini/stream_parser.rb +0 -0
  56. data/lib/llm/providers/gemini.rb +5 -8
  57. data/lib/llm/providers/llamacpp.rb +6 -0
  58. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  59. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  60. data/lib/llm/providers/ollama/format.rb +0 -0
  61. data/lib/llm/providers/ollama/models.rb +2 -8
  62. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  63. data/lib/llm/providers/ollama/response/embedding.rb +10 -0
  64. data/lib/llm/providers/ollama/stream_parser.rb +0 -0
  65. data/lib/llm/providers/ollama.rb +5 -8
  66. data/lib/llm/providers/openai/audio.rb +6 -6
  67. data/lib/llm/providers/openai/error_handler.rb +0 -0
  68. data/lib/llm/providers/openai/files.rb +14 -15
  69. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  70. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  71. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  72. data/lib/llm/providers/openai/format.rb +0 -0
  73. data/lib/llm/providers/openai/images.rb +8 -7
  74. data/lib/llm/providers/openai/models.rb +2 -7
  75. data/lib/llm/providers/openai/moderations.rb +9 -11
  76. data/lib/llm/providers/openai/response/audio.rb +7 -0
  77. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
  78. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  79. data/lib/llm/providers/openai/response/file.rb +7 -0
  80. data/lib/llm/providers/openai/response/image.rb +16 -0
  81. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  82. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
  83. data/lib/llm/providers/openai/responses.rb +10 -9
  84. data/lib/llm/providers/openai/stream_parser.rb +0 -0
  85. data/lib/llm/providers/openai/vector_stores.rb +106 -0
  86. data/lib/llm/providers/openai.rb +14 -8
  87. data/lib/llm/response.rb +37 -13
  88. data/lib/llm/utils.rb +0 -0
  89. data/lib/llm/version.rb +1 -1
  90. data/lib/llm.rb +2 -12
  91. data/llm.gemspec +1 -1
  92. metadata +18 -29
  93. data/lib/llm/model.rb +0 -32
  94. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  95. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  96. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  97. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  98. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  99. data/lib/llm/providers/openai/response_parser.rb +0 -65
  100. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  101. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  102. data/lib/llm/providers/voyageai.rb +0 -44
  103. data/lib/llm/response/audio.rb +0 -13
  104. data/lib/llm/response/audio_transcription.rb +0 -14
  105. data/lib/llm/response/audio_translation.rb +0 -14
  106. data/lib/llm/response/completion.rb +0 -51
  107. data/lib/llm/response/download_file.rb +0 -15
  108. data/lib/llm/response/embedding.rb +0 -23
  109. data/lib/llm/response/file.rb +0 -42
  110. data/lib/llm/response/filelist.rb +0 -18
  111. data/lib/llm/response/image.rb +0 -29
  112. data/lib/llm/response/modellist.rb +0 -18
  113. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  114. data/lib/llm/response/moderationlist.rb +0 -51
  115. data/lib/llm/response/respond.rb +0 -56
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 15ddbff68b600d6a8926e872ed687edaff585ed075136415c8b8921438c5d32f
4
- data.tar.gz: bf498f154df07de201f1529253559db03e850182afdf84eb6b99efb527920a2f
3
+ metadata.gz: ce6a4e56ce25f397337733009249edb930cade1641bd84d7939ef0d349c6e92a
4
+ data.tar.gz: b9506c346dd19af655f342e9b6a64aed25cd3f4ecffb6bb32cb36e1221e71c6a
5
5
  SHA512:
6
- metadata.gz: 6256cf08682c5f9bf5aff9a6065aec57e23e8f68bc54acd302eee4f6c08628feddf438043e58898611be6ffa0d61c07f437da3c6cd16007d256c6fd4adee4218
7
- data.tar.gz: a7c0510691a9aa08447dc65146a763a37ca1aefb59de9ee10c2eca691d725a971d9dcdc35dc1316ff473b3ff002ef1b5c2963601a8044ec70b2c2302132e4627
6
+ metadata.gz: a4a53b8e5aeaae2cd26f5303366ce1974ecfdb4176a76c5aa01f46cb9997d10a402de782b0b14f97bf3c65dc4c200b976c1279a037ce29ae44ab64fd1799ffd9
7
+ data.tar.gz: 96128a756147d8cad13e85f91247fdb43b6bf5c469348318c27f27518670c61b6466546d65bf1f553b086b2a42b15a5225e0ac5355b680ba46214b32bc20d754
data/LICENSE CHANGED
File without changes
data/README.md CHANGED
@@ -10,7 +10,7 @@ images, files, and JSON Schema generation.
10
10
  #### General
11
11
  - ✅ A single unified interface for multiple providers
12
12
  - 📦 Zero dependencies outside Ruby's standard library
13
- - 🚀 Efficient API design that minimizes the request count
13
+ - 🚀 Efficient API design that minimizes the number of requests made
14
14
 
15
15
  #### Chat, Agents
16
16
  - 🧠 Stateless and stateful chat via completions and responses API
@@ -27,29 +27,7 @@ images, files, and JSON Schema generation.
27
27
  #### Miscellaneous
28
28
  - 🧮 Text embeddings and vector support
29
29
  - 🔌 Retrieve models dynamically for introspection and selection
30
-
31
- ## Demos
32
-
33
- > The
34
- > [llmrb/llm-shell](https://github.com/llmrb/llm-shell)
35
- > project is built with llm.rb and its demos have been
36
- > included to provide a better idea of what llm.rb
37
- > is capable of.
38
-
39
- <details>
40
- <summary><b>1. Tools: "system" function</b></summary>
41
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/toolcalls.gif">
42
- </details>
43
-
44
- <details>
45
- <summary><b>2. Files: import at runtime</b></summary>
46
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-runtime.gif">
47
- </details>
48
-
49
- <details>
50
- <summary><b>3. Files: import at boot time</b></summary>
51
- <img src="https://github.com/llmrb/llm/raw/main/share/llm-shell/examples/files-boottime.gif">
52
- </details>
30
+ - 🧱 Includes support for OpenAI's responses, moderations, and vector stores APIs
53
31
 
54
32
  ## Examples
55
33
 
@@ -72,7 +50,6 @@ llm = LLM.openai(key: "yourapikey")
72
50
  llm = LLM.gemini(key: "yourapikey")
73
51
  llm = LLM.anthropic(key: "yourapikey")
74
52
  llm = LLM.deepseek(key: "yourapikey")
75
- llm = LLM.voyageai(key: "yourapikey")
76
53
 
77
54
  ##
78
55
  # local providers
@@ -91,24 +68,24 @@ llm = LLM.llamacpp(key: nil)
91
68
 
92
69
  The following example creates an instance of
93
70
  [LLM::Bot](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html)
94
- by entering into a conversation where messages are buffered and
95
- sent to the provider on-demand. This is the default behavior
96
- because it can reduce the number of requests sent to a provider,
97
- and avoids unneccessary requests until an attempt to iterate over
71
+ and enters into a conversation where messages are buffered and
72
+ sent to the provider on-demand. The implementation is designed to
73
+ buffer messages by waiting until an attempt to iterate over
98
74
  [LLM::Bot#messages](https://0x1eef.github.io/x/llm.rb/LLM/Bot.html#messages-instance_method)
99
- is made:
75
+ is made before sending a request to the LLM:
100
76
 
101
77
  ```ruby
102
78
  #!/usr/bin/env ruby
103
79
  require "llm"
104
80
 
105
- llm = LLM.openai(key: ENV["KEY"])
81
+ llm = LLM.openai(key: ENV["OPENAI_SECRET"])
106
82
  bot = LLM::Bot.new(llm)
83
+ url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
107
84
  msgs = bot.chat do |prompt|
108
- prompt.system File.read("./share/llm/prompts/system.txt")
109
- prompt.user "Tell me the answer to 5 + 15"
110
- prompt.user "Tell me the answer to (5 + 15) * 2"
111
- prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
85
+ prompt.system "Your task is to answer all user queries"
86
+ prompt.user ["Tell me about this URL", URI(url)]
87
+ prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
88
+ prompt.user "Is the URL and PDF similar to each other?"
112
89
  end
113
90
 
114
91
  # At this point, we execute a single request
@@ -123,27 +100,24 @@ msgs.each { print "[#{_1.role}] ", _1.content, "\n" }
123
100
  > [docs/](docs/STREAMING.md#scopes) for more details.
124
101
 
125
102
  The following example streams the messages in a conversation
126
- as they are generated in real-time. This feature can be useful
127
- when you want to stream a conversation in real time, or when you
128
- want to avoid potential read timeouts during the generation of a
129
- response.
130
-
131
- The `stream` option can be set to an IO object, or the value `true`
132
- to enable streaming &ndash; and at the end of the request, `bot.chat`
133
- returns the same response as the non-streaming version which allows
134
- you to process a response in the same way:
103
+ as they are generated in real-time. The `stream` option can
104
+ be set to an IO object, or the value `true` to enable streaming
105
+ &ndash; and at the end of the request, `bot.chat` returns the
106
+ same response as the non-streaming version which allows you
107
+ to process a response in the same way:
135
108
 
136
109
  ```ruby
137
110
  #!/usr/bin/env ruby
138
111
  require "llm"
139
112
 
140
- llm = LLM.openai(key: ENV["KEY"])
113
+ llm = LLM.openai(key: ENV["OPENAI_SECRET"])
141
114
  bot = LLM::Bot.new(llm)
115
+ url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/9a/Cognac_glass.jpg/500px-Cognac_glass.jpg"
142
116
  bot.chat(stream: $stdout) do |prompt|
143
- prompt.system "You are my math assistant."
144
- prompt.user "Tell me the answer to 5 + 15"
145
- prompt.user "Tell me the answer to (5 + 15) * 2"
146
- prompt.user "Tell me the answer to ((5 + 15) * 2) / 10"
117
+ prompt.system "Your task is to answer all user queries"
118
+ prompt.user ["Tell me about this URL", URI(url)]
119
+ prompt.user ["Tell me about this pdf", File.open("spec/fixtures/documents/freebsd.sysctl.pdf", "r")]
120
+ prompt.user "Is the URL and PDF similar to each other?"
147
121
  end.to_a
148
122
  ```
149
123
 
@@ -202,11 +176,7 @@ The
202
176
  method returns an array of functions that can be called after sending a message and
203
177
  it will only be populated if the LLM detects a function should be called. Each function
204
178
  corresponds to an element in the "tools" array. The array is emptied after a function call,
205
- and potentially repopulated on the next message.
206
-
207
- The following example defines an agent that can run system commands based on natural language,
208
- and it is only intended to be a fun demo of tool calling - it is not recommended to run
209
- arbitrary commands from a LLM without sanitizing the input first :) Without further ado:
179
+ and potentially repopulated on the next message:
210
180
 
211
181
  ```ruby
212
182
  #!/usr/bin/env ruby
@@ -218,10 +188,10 @@ tool = LLM.function(:system) do |fn|
218
188
  fn.params do |schema|
219
189
  schema.object(command: schema.string.required)
220
190
  end
221
- fn.define do |params|
191
+ fn.define do |command:|
222
192
  ro, wo = IO.pipe
223
193
  re, we = IO.pipe
224
- Process.wait Process.spawn(params.command, out: wo, err: we)
194
+ Process.wait Process.spawn(command, out: wo, err: we)
225
195
  [wo,we].each(&:close)
226
196
  {stderr: re.read, stdout: ro.read}
227
197
  end
@@ -241,6 +211,60 @@ bot.chat bot.functions.map(&:call) # report return value to the LLM
241
211
  # {stderr: "", stdout: "FreeBSD"}
242
212
  ```
243
213
 
214
+ ### Files
215
+
216
+ #### Create
217
+
218
+ The OpenAI and Gemini providers provide a Files API where a client can upload files
219
+ that can be referenced from a prompt, and with other APIs as well. The following
220
+ example uses the OpenAI provider to describe the contents of a PDF file after
221
+ it has been uploaded. The file (a specialized instance of
222
+ [LLM::Response](https://0x1eef.github.io/x/llm.rb/LLM/Response.html)
223
+ ) is given as part of a prompt that is understood by llm.rb:
224
+
225
+ ```ruby
226
+ #!/usr/bin/env ruby
227
+ require "llm"
228
+
229
+ llm = LLM.openai(key: ENV["KEY"])
230
+ bot = LLM::Bot.new(llm)
231
+ file = llm.files.create(file: "/books/goodread.pdf")
232
+ bot.chat(["Tell me about this file", file])
233
+ bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
234
+ ```
235
+
236
+ ### Prompts
237
+
238
+ #### Multimodal
239
+
240
+ It is generally a given that an LLM will understand text but they can also
241
+ understand and generate other types of media as well: audio, images, video,
242
+ and even URLs. The object given as a prompt in llm.rb can be a string to
243
+ represent text, a URI object to represent a URL, an LLM::Response object
244
+ to represent a file stored with the LLM, and so on. These are objects you
245
+ can throw at the prompt and have them be understood automatically.
246
+
247
+ A prompt can also have multiple parts, and in that case, an array is given
248
+ as a prompt. Each element is considered to part of the prompt:
249
+
250
+ ```ruby
251
+ #!/usr/bin/env ruby
252
+ require "llm"
253
+
254
+ llm = LLM.openai(key: ENV["KEY"])
255
+ bot = LLM::Bot.new(llm)
256
+
257
+ bot.chat ["Tell me about this URL", URI("https://example.com/path/to/image.png")]
258
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
259
+
260
+ file = llm.files.create(file: "/books/goodread.pdf")
261
+ bot.chat ["Tell me about this PDF", file]
262
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
263
+
264
+ bot.chat ["Tell me about this image", File.open("/images/nemothefish.png", "r")]
265
+ [bot.messages.find(&:assistant?)].each { print "[#{_1.role}] ", _1.content, "\n" }
266
+ ```
267
+
244
268
  ### Audio
245
269
 
246
270
  #### Speech
@@ -368,71 +392,6 @@ res.urls.each.with_index do |url, index|
368
392
  end
369
393
  ```
370
394
 
371
- ### Files
372
-
373
- #### Create
374
-
375
- Most LLM providers provide a Files API where you can upload files
376
- that can be referenced from a prompt and llm.rb has first-class support
377
- for this feature. The following example uses the OpenAI provider to describe
378
- the contents of a PDF file after it has been uploaded. The file (an instance
379
- of [LLM::Response::File](https://0x1eef.github.io/x/llm.rb/LLM/Response/File.html))
380
- is passed directly to the chat method, and generally any object a prompt supports
381
- can be given to the chat method:
382
-
383
-
384
- ```ruby
385
- #!/usr/bin/env ruby
386
- require "llm"
387
-
388
- llm = LLM.openai(key: ENV["KEY"])
389
- bot = LLM::Bot.new(llm)
390
- file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
391
- bot.chat(file)
392
- bot.chat("What is this file about?")
393
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
394
-
395
- ##
396
- # [assistant] This file is about OpenBSD, a free and open-source Unix-like operating system
397
- # based on the Berkeley Software Distribution (BSD). It is known for its
398
- # emphasis on security, code correctness, and code simplicity. The file
399
- # contains information about the features, installation, and usage of OpenBSD.
400
- ```
401
-
402
- ### Prompts
403
-
404
- #### Multimodal
405
-
406
- Generally all providers accept text prompts but some providers can
407
- also understand URLs, and various file types (eg images, audio, video,
408
- etc). The llm.rb approach to multimodal prompts is to let you pass `URI`
409
- objects to describe links, `LLM::File` | `LLM::Response::File` objects
410
- to describe files, `String` objects to describe text blobs, or an array
411
- of the aforementioned objects to describe multiple objects in a single
412
- prompt. Each object is a first class citizen that can be passed directly
413
- to a prompt:
414
-
415
- ```ruby
416
- #!/usr/bin/env ruby
417
- require "llm"
418
-
419
- llm = LLM.openai(key: ENV["KEY"])
420
- bot = LLM::Bot.new(llm)
421
-
422
- bot.chat [URI("https://example.com/path/to/image.png"), "Describe the image in the link"]
423
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
424
-
425
- file = llm.files.create(file: "/documents/openbsd_is_awesome.pdf")
426
- bot.chat [file, "What is this file about?"]
427
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
428
-
429
- bot.chat [LLM.File("/images/puffy.png"), "What is this image about?"]
430
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
431
-
432
- bot.chat [LLM.File("/images/beastie.png"), "What is this image about?"]
433
- bot.messages.select(&:assistant?).each { print "[#{_1.role}] ", _1.content, "\n" }
434
- ```
435
-
436
395
  ### Embeddings
437
396
 
438
397
  #### Text
@@ -458,7 +417,7 @@ print res.embeddings.size, "\n"
458
417
  print res.embeddings[0].size, "\n"
459
418
 
460
419
  ##
461
- # LLM::Response::Embedding
420
+ # LLM::Response
462
421
  # 3
463
422
  # 1536
464
423
  ```
@@ -9,7 +9,7 @@ class LLM::Bot
9
9
  ##
10
10
  # @param [String] prompt The prompt
11
11
  # @param [Hash] params
12
- # @return [LLM::Response::Respond]
12
+ # @return [LLM::Response]
13
13
  def create_response!(prompt, params)
14
14
  @provider.responses.create(
15
15
  prompt,
@@ -20,7 +20,7 @@ class LLM::Bot
20
20
  ##
21
21
  # @param [String] prompt The prompt
22
22
  # @param [Hash] params
23
- # @return [LLM::Response::Completion]
23
+ # @return [LLM::Response]
24
24
  def create_completion!(prompt, params)
25
25
  @provider.complete(
26
26
  prompt,
File without changes
File without changes
File without changes
data/lib/llm/bot.rb CHANGED
@@ -2,10 +2,10 @@
2
2
 
3
3
  module LLM
4
4
  ##
5
- # {LLM::Bot LLM::Bot} provides a bot object that can maintain a
5
+ # {LLM::Bot LLM::Bot} provides an object that can maintain a
6
6
  # a conversation. A conversation can use the chat completions API
7
- # that all LLM providers support or the responses API that a select
8
- # few LLM providers support.
7
+ # that all LLM providers support or the responses API that currently
8
+ # only OpenAI supports.
9
9
  #
10
10
  # @example example #1
11
11
  # #!/usr/bin/env ruby
@@ -14,10 +14,9 @@ module LLM
14
14
  # llm = LLM.openai(ENV["KEY"])
15
15
  # bot = LLM::Bot.new(llm)
16
16
  # msgs = bot.chat do |prompt|
17
- # prompt.system "Answer the following questions."
18
- # prompt.user "What is 5 + 7 ?"
19
- # prompt.user "Why is the sky blue ?"
20
- # prompt.user "Why did the chicken cross the road ?"
17
+ # prompt.user "What programming language should I learn next ?"
18
+ # prompt.user "Can you recommend a good book ?"
19
+ # prompt.user "Can you suggest a fun project to practice ?"
21
20
  # end
22
21
  # msgs.each { print "[#{_1.role}]", _1.content, "\n" }
23
22
  #
@@ -27,10 +26,9 @@ module LLM
27
26
  #
28
27
  # llm = LLM.openai(ENV["KEY"])
29
28
  # bot = LLM::Bot.new(llm)
30
- # bot.chat "Answer the following questions.", role: :system
31
- # bot.chat "What is 5 + 7 ?", role: :user
32
- # bot.chat "Why is the sky blue ?", role: :user
33
- # bot.chat "Why did the chicken cross the road ?", role: :user
29
+ # bot.chat "What programming language should I learn next ?", role: :user
30
+ # bot.chat "Can you recommend a good book ?", role: :user
31
+ # bot.chat "Can you suggest a fun project to practice ?", role: :user
34
32
  # bot.messages.each { print "[#{_1.role}]", _1.content, "\n" }
35
33
  class Bot
36
34
  require_relative "bot/prompt/completion"
data/lib/llm/buffer.rb CHANGED
File without changes
data/lib/llm/error.rb CHANGED
File without changes
File without changes
File without changes
File without changes
File without changes
data/lib/llm/file.rb CHANGED
@@ -29,12 +29,19 @@ class LLM::File
29
29
  end
30
30
 
31
31
  ##
32
- # @return [String]
32
+ # @return [Boolean]
33
33
  # Returns true if the file is an image
34
34
  def image?
35
35
  mime_type.start_with?("image/")
36
36
  end
37
37
 
38
+ ##
39
+ # @return [Boolean]
40
+ # Returns true if the file is a PDF document
41
+ def pdf?
42
+ mime_type == "application/pdf"
43
+ end
44
+
38
45
  ##
39
46
  # @return [Integer]
40
47
  # Returns the size of the file in bytes
@@ -68,14 +75,16 @@ class LLM::File
68
75
  end
69
76
 
70
77
  ##
71
- # @param [String] path
72
- # The path to a file
78
+ # @param [String, File, LLM::Response] obj
79
+ # The path to the file, or an existing file reference
73
80
  # @return [LLM::File]
74
- def LLM.File(path)
75
- case path
76
- when LLM::File, LLM::Response::File
77
- path
78
- else
79
- LLM::File.new(path)
81
+ def LLM.File(obj)
82
+ case obj
83
+ when File
84
+ obj.close unless obj.closed?
85
+ LLM.File(obj.path)
86
+ when LLM::File, LLM::Response then obj
87
+ when String then LLM::File.new(obj)
88
+ else raise TypeError, "don't know how to handle #{obj.class} objects"
80
89
  end
81
90
  end
data/lib/llm/function.rb CHANGED
@@ -10,15 +10,15 @@
10
10
  # fn.params do |schema|
11
11
  # schema.object(command: schema.string.required)
12
12
  # end
13
- # fn.define do |params|
14
- # {success: Kernel.system(params.command)}
13
+ # fn.define do |command:|
14
+ # {success: Kernel.system(command)}
15
15
  # end
16
16
  # end
17
17
  #
18
18
  # @example example #2
19
19
  # class System
20
- # def call(params)
21
- # {success: Kernel.system(params.command)}
20
+ # def call(command:)
21
+ # {success: Kernel.system(command)}
22
22
  # end
23
23
  # end
24
24
  #
@@ -33,6 +33,11 @@ class LLM::Function
33
33
  class Return < Struct.new(:id, :value)
34
34
  end
35
35
 
36
+ ##
37
+ # Returns the function ID
38
+ # @return [String, nil]
39
+ attr_accessor :id
40
+
36
41
  ##
37
42
  # Returns the function name
38
43
  # @return [String]
@@ -43,11 +48,6 @@ class LLM::Function
43
48
  # @return [Array, nil]
44
49
  attr_accessor :arguments
45
50
 
46
- ##
47
- # Returns the function ID
48
- # @return [String, nil]
49
- attr_accessor :id
50
-
51
51
  ##
52
52
  # @param [String] name The function name
53
53
  # @yieldparam [LLM::Function] self The function object
@@ -61,10 +61,14 @@ class LLM::Function
61
61
 
62
62
  ##
63
63
  # Set the function description
64
- # @param [String] str The function description
64
+ # @param [String] desc The function description
65
65
  # @return [void]
66
- def description(str)
67
- @description = str
66
+ def description(desc = nil)
67
+ if desc
68
+ @description = desc
69
+ else
70
+ @description
71
+ end
68
72
  end
69
73
 
70
74
  ##
@@ -87,7 +91,8 @@ class LLM::Function
87
91
  # Call the function
88
92
  # @return [LLM::Function::Return] The result of the function call
89
93
  def call
90
- Return.new id, (Class === @runner) ? @runner.new.call(arguments) : @runner.call(arguments)
94
+ runner = ((Class === @runner) ? @runner.new : @runner)
95
+ Return.new(id, runner.call(**arguments))
91
96
  ensure
92
97
  @called = true
93
98
  end
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
data/lib/llm/message.rb CHANGED
@@ -109,6 +109,14 @@ module LLM
109
109
  tool_calls.any?
110
110
  end
111
111
 
112
+ ##
113
+ # @return [Boolean]
114
+ # Returns true when the message represents a function return
115
+ def tool_return?
116
+ LLM::Function::Return === content ||
117
+ [*content].grep(LLM::Function::Return).any?
118
+ end
119
+
112
120
  ##
113
121
  # Returns a string representation of the message
114
122
  # @return [String]
data/lib/llm/mime.rb CHANGED
File without changes
data/lib/llm/multipart.rb CHANGED
File without changes
File without changes
@@ -12,6 +12,10 @@ class LLM::Object
12
12
  ::Kernel.instance_method(:instance_of?).bind(self).call(...)
13
13
  end
14
14
 
15
+ def extend(...)
16
+ ::Kernel.instance_method(:extend).bind(self).call(...)
17
+ end
18
+
15
19
  def method(...)
16
20
  ::Kernel.instance_method(:method).bind(self).call(...)
17
21
  end
@@ -41,5 +45,9 @@ class LLM::Object
41
45
  "#<#{self.class}:0x#{object_id.to_s(16)} properties=#{to_h.inspect}>"
42
46
  end
43
47
  alias_method :to_s, :inspect
48
+
49
+ def pretty_print(q)
50
+ q.text(inspect)
51
+ end
44
52
  end
45
53
  end
data/lib/llm/object.rb CHANGED
@@ -62,6 +62,13 @@ class LLM::Object < BasicObject
62
62
  def to_h
63
63
  @h
64
64
  end
65
+ alias_method :to_hash, :to_h
66
+
67
+ ##
68
+ # @return [Object, nil]
69
+ def dig(...)
70
+ to_h.dig(...)
71
+ end
65
72
 
66
73
  private
67
74
 
data/lib/llm/provider.rb CHANGED
@@ -44,7 +44,7 @@ class LLM::Provider
44
44
  # Other embedding parameters
45
45
  # @raise [NotImplementedError]
46
46
  # When the method is not implemented by a subclass
47
- # @return [LLM::Response::Embedding]
47
+ # @return [LLM::Response]
48
48
  def embed(input, model: nil, **params)
49
49
  raise NotImplementedError
50
50
  end
@@ -68,7 +68,7 @@ class LLM::Provider
68
68
  # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
69
69
  # @raise [NotImplementedError]
70
70
  # When the method is not implemented by a subclass
71
- # @return [LLM::Response::Completion]
71
+ # @return [LLM::Response]
72
72
  def complete(prompt, params = {})
73
73
  raise NotImplementedError
74
74
  end
@@ -174,6 +174,13 @@ class LLM::Provider
174
174
  raise NotImplementedError
175
175
  end
176
176
 
177
+ ##
178
+ # @return [LLM::OpenAI::VectorStore]
179
+ # Returns an interface to the vector stores API
180
+ def vector_stores
181
+ raise NotImplementedError
182
+ end
183
+
177
184
  ##
178
185
  # @return [String]
179
186
  # Returns the role of the assistant in the conversation.
@@ -222,15 +229,6 @@ class LLM::Provider
222
229
  raise NotImplementedError
223
230
  end
224
231
 
225
- ##
226
- # @return [Module]
227
- # Returns the module responsible for parsing a successful LLM response
228
- # @raise [NotImplementedError]
229
- # (see LLM::Provider#complete)
230
- def response_parser
231
- raise NotImplementedError
232
- end
233
-
234
232
  ##
235
233
  # @return [Class]
236
234
  # Returns the class responsible for handling an unsuccessful LLM response
File without changes