llm.rb 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +264 -110
  3. data/lib/llm/buffer.rb +83 -0
  4. data/lib/llm/chat.rb +131 -0
  5. data/lib/llm/file.rb +26 -40
  6. data/lib/llm/http_client.rb +10 -5
  7. data/lib/llm/message.rb +14 -8
  8. data/lib/llm/mime.rb +54 -0
  9. data/lib/llm/multipart.rb +98 -0
  10. data/lib/llm/provider.rb +96 -19
  11. data/lib/llm/providers/anthropic/error_handler.rb +2 -0
  12. data/lib/llm/providers/anthropic/format.rb +2 -0
  13. data/lib/llm/providers/anthropic/response_parser.rb +3 -1
  14. data/lib/llm/providers/anthropic.rb +14 -5
  15. data/lib/llm/providers/gemini/audio.rb +77 -0
  16. data/lib/llm/providers/gemini/error_handler.rb +2 -0
  17. data/lib/llm/providers/gemini/files.rb +160 -0
  18. data/lib/llm/providers/gemini/format.rb +12 -6
  19. data/lib/llm/providers/gemini/images.rb +99 -0
  20. data/lib/llm/providers/gemini/response_parser.rb +27 -1
  21. data/lib/llm/providers/gemini.rb +62 -6
  22. data/lib/llm/providers/ollama/error_handler.rb +2 -0
  23. data/lib/llm/providers/ollama/format.rb +13 -5
  24. data/lib/llm/providers/ollama/response_parser.rb +3 -1
  25. data/lib/llm/providers/ollama.rb +30 -7
  26. data/lib/llm/providers/openai/audio.rb +97 -0
  27. data/lib/llm/providers/openai/error_handler.rb +2 -0
  28. data/lib/llm/providers/openai/files.rb +148 -0
  29. data/lib/llm/providers/openai/format.rb +21 -8
  30. data/lib/llm/providers/openai/images.rb +109 -0
  31. data/lib/llm/providers/openai/response_parser.rb +58 -5
  32. data/lib/llm/providers/openai/responses.rb +78 -0
  33. data/lib/llm/providers/openai.rb +52 -6
  34. data/lib/llm/providers/voyageai.rb +2 -2
  35. data/lib/llm/response/audio.rb +13 -0
  36. data/lib/llm/response/audio_transcription.rb +14 -0
  37. data/lib/llm/response/audio_translation.rb +14 -0
  38. data/lib/llm/response/download_file.rb +15 -0
  39. data/lib/llm/response/file.rb +42 -0
  40. data/lib/llm/response/filelist.rb +18 -0
  41. data/lib/llm/response/image.rb +29 -0
  42. data/lib/llm/response/output.rb +56 -0
  43. data/lib/llm/response.rb +18 -6
  44. data/lib/llm/utils.rb +19 -0
  45. data/lib/llm/version.rb +1 -1
  46. data/lib/llm.rb +5 -2
  47. data/llm.gemspec +1 -6
  48. data/spec/anthropic/completion_spec.rb +1 -1
  49. data/spec/gemini/completion_spec.rb +1 -1
  50. data/spec/gemini/conversation_spec.rb +31 -0
  51. data/spec/gemini/files_spec.rb +124 -0
  52. data/spec/gemini/images_spec.rb +47 -0
  53. data/spec/llm/conversation_spec.rb +101 -61
  54. data/spec/ollama/completion_spec.rb +1 -1
  55. data/spec/ollama/conversation_spec.rb +31 -0
  56. data/spec/openai/audio_spec.rb +55 -0
  57. data/spec/openai/completion_spec.rb +1 -1
  58. data/spec/openai/files_spec.rb +150 -0
  59. data/spec/openai/images_spec.rb +95 -0
  60. data/spec/openai/responses_spec.rb +51 -0
  61. data/spec/setup.rb +8 -0
  62. metadata +31 -49
  63. data/LICENSE.txt +0 -21
  64. data/lib/llm/conversation.rb +0 -90
  65. data/lib/llm/message_queue.rb +0 -54
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::Chat: ollama" do
6
+ let(:described_class) { LLM::Chat }
7
+ let(:provider) { LLM.ollama(nil, host: "eel.home.network") }
8
+ let(:conversation) { described_class.new(provider, **params).lazy }
9
+
10
+ context "when asked to describe an image",
11
+ vcr: {cassette_name: "ollama/conversations/multimodal_response"} do
12
+ subject { conversation.last_message }
13
+
14
+ let(:params) { {model: "llava"} }
15
+ let(:image) { LLM::File("spec/fixtures/images/bluebook.png") }
16
+
17
+ before do
18
+ conversation.chat(image, :user)
19
+ conversation.chat("Describe the image with a short sentance", :user)
20
+ end
21
+
22
+ it "describes the image" do
23
+ is_expected.to have_attributes(
24
+ role: "assistant",
25
+ content: " The image is a graphic illustration of a book" \
26
+ " with its pages spread out, symbolizing openness" \
27
+ " or knowledge. "
28
+ )
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Audio" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation",
10
+ vcr: {cassette_name: "openai/audio/successful_create"} do
11
+ subject(:response) { provider.audio.create_speech(input: "A dog on a rocket to the moon") }
12
+
13
+ it "is successful" do
14
+ expect(response).to be_instance_of(LLM::Response::Audio)
15
+ end
16
+
17
+ it "returns an audio" do
18
+ expect(response.audio).to be_instance_of(StringIO)
19
+ end
20
+ end
21
+
22
+ context "when given a successful transcription operation",
23
+ vcr: {cassette_name: "openai/audio/successful_transcription"} do
24
+ subject(:response) do
25
+ provider.audio.create_transcription(
26
+ file: LLM::File("spec/fixtures/audio/rocket.mp3")
27
+ )
28
+ end
29
+
30
+ it "is successful" do
31
+ expect(response).to be_instance_of(LLM::Response::AudioTranscription)
32
+ end
33
+
34
+ it "returns a transcription" do
35
+ expect(response.text).to eq("A dog on a rocket to the moon.")
36
+ end
37
+ end
38
+
39
+ context "when given a successful translation operation",
40
+ vcr: {cassette_name: "openai/audio/successful_translation"} do
41
+ subject(:response) do
42
+ provider.audio.create_translation(
43
+ file: LLM::File("spec/fixtures/audio/bismillah.mp3")
44
+ )
45
+ end
46
+
47
+ it "is successful" do
48
+ expect(response).to be_instance_of(LLM::Response::AudioTranslation)
49
+ end
50
+
51
+ it "returns a translation (Arabic => English)" do
52
+ expect(response.text).to eq("In the name of Allah, the Beneficent, the Merciful.")
53
+ end
54
+ end
55
+ end
@@ -37,7 +37,7 @@ RSpec.describe "LLM::OpenAI: completions" do
37
37
  end
38
38
 
39
39
  it "includes the response" do
40
- expect(choice.extra[:completion]).to eq(response)
40
+ expect(choice.extra[:response]).to eq(response)
41
41
  end
42
42
  end
43
43
  end
@@ -0,0 +1,150 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Files" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation (haiku1.txt)",
10
+ vcr: {cassette_name: "openai/files/successful_create_haiku1"} do
11
+ subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku1.txt")) }
12
+ after { provider.files.delete(file:) }
13
+
14
+ it "is successful" do
15
+ expect(file).to be_instance_of(LLM::Response::File)
16
+ end
17
+
18
+ it "returns a file object" do
19
+ expect(file).to have_attributes(
20
+ id: instance_of(String),
21
+ filename: "haiku1.txt",
22
+ purpose: "assistants"
23
+ )
24
+ end
25
+ end
26
+
27
+ context "when given a successful create operation (haiku2.txt)",
28
+ vcr: {cassette_name: "openai/files/successful_create_haiku2"} do
29
+ subject(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt")) }
30
+ after { provider.files.delete(file:) }
31
+
32
+ it "is successful" do
33
+ expect(file).to be_instance_of(LLM::Response::File)
34
+ end
35
+
36
+ it "returns a file object" do
37
+ expect(file).to have_attributes(
38
+ id: instance_of(String),
39
+ filename: "haiku2.txt",
40
+ purpose: "assistants"
41
+ )
42
+ end
43
+ end
44
+
45
+ context "when given a successful delete operation (haiku3.txt)",
46
+ vcr: {cassette_name: "openai/files/successful_delete_haiku3"} do
47
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku3.txt")) }
48
+ subject { provider.files.delete(file:) }
49
+
50
+ it "is successful" do
51
+ is_expected.to be_instance_of(OpenStruct)
52
+ end
53
+
54
+ it "returns deleted status" do
55
+ is_expected.to have_attributes(
56
+ deleted: true
57
+ )
58
+ end
59
+ end
60
+
61
+ context "when given a successful get operation (haiku4.txt)",
62
+ vcr: {cassette_name: "openai/files/successful_get_haiku4"} do
63
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/haiku4.txt")) }
64
+ subject { provider.files.get(file:) }
65
+ after { provider.files.delete(file:) }
66
+
67
+ it "is successful" do
68
+ is_expected.to be_instance_of(LLM::Response::File)
69
+ end
70
+
71
+ it "returns a file object" do
72
+ is_expected.to have_attributes(
73
+ id: instance_of(String),
74
+ filename: "haiku4.txt",
75
+ purpose: "assistants"
76
+ )
77
+ end
78
+ end
79
+
80
+ context "when given a successful all operation",
81
+ vcr: {cassette_name: "openai/files/successful_all"} do
82
+ let!(:files) do
83
+ [
84
+ provider.files.create(file: LLM::File("spec/fixtures/documents/haiku1.txt")),
85
+ provider.files.create(file: LLM::File("spec/fixtures/documents/haiku2.txt"))
86
+ ]
87
+ end
88
+ subject(:file) { provider.files.all }
89
+ after { files.each { |file| provider.files.delete(file:) } }
90
+
91
+ it "is successful" do
92
+ expect(file).to be_instance_of(LLM::Response::FileList)
93
+ end
94
+
95
+ it "returns an array of file objects" do
96
+ expect(file).to match_array(
97
+ [
98
+ have_attributes(
99
+ id: instance_of(String),
100
+ filename: "haiku1.txt",
101
+ purpose: "assistants"
102
+ ),
103
+ have_attributes(
104
+ id: instance_of(String),
105
+ filename: "haiku2.txt",
106
+ purpose: "assistants"
107
+ )
108
+ ]
109
+ )
110
+ end
111
+ end
112
+
113
+ context "when asked to describe the contents of a file",
114
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl.pdf"} do
115
+ subject { bot.last_message.content }
116
+ let(:bot) { LLM::Chat.new(provider).lazy }
117
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
118
+ after { provider.files.delete(file:) }
119
+
120
+ before do
121
+ bot.respond(file)
122
+ bot.respond("Describe the contents of the file to me")
123
+ bot.respond("Your summary should be no more than ten words")
124
+ end
125
+
126
+ it "describes the document" do
127
+ is_expected.to eq("FreeBSD system control nodes implementation and usage overview.")
128
+ end
129
+ end
130
+
131
+ context "when asked to describe the contents of a file",
132
+ vcr: {cassette_name: "openai/files/describe_freebsd.sysctl_2.pdf"} do
133
+ subject { bot.last_message.content }
134
+ let(:bot) { LLM::Chat.new(provider).lazy }
135
+ let(:file) { provider.files.create(file: LLM::File("spec/fixtures/documents/freebsd.sysctl.pdf")) }
136
+ after { provider.files.delete(file:) }
137
+
138
+ before do
139
+ bot.respond([
140
+ "Describe the contents of the file to me",
141
+ "Your summary should be no more than ten words",
142
+ file
143
+ ])
144
+ end
145
+
146
+ it "describes the document" do
147
+ is_expected.to eq("FreeBSD kernel system control nodes overview and implementation.")
148
+ end
149
+ end
150
+ end
@@ -0,0 +1,95 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Images" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation (urls)",
10
+ vcr: {cassette_name: "openai/images/successful_create_urls"} do
11
+ subject(:response) { provider.images.create(prompt: "A dog on a rocket to the moon") }
12
+
13
+ it "is successful" do
14
+ expect(response).to be_instance_of(LLM::Response::Image)
15
+ end
16
+
17
+ it "returns an array of urls" do
18
+ expect(response.urls).to be_instance_of(Array)
19
+ end
20
+
21
+ it "returns a url" do
22
+ expect(response.urls[0]).to be_instance_of(String)
23
+ end
24
+ end
25
+
26
+ context "when given a successful create operation (base64)",
27
+ vcr: {cassette_name: "openai/images/successful_create_base64"} do
28
+ subject(:response) do
29
+ provider.images.create(
30
+ prompt: "A dog on a rocket to the moon",
31
+ response_format: "b64_json"
32
+ )
33
+ end
34
+
35
+ it "is successful" do
36
+ expect(response).to be_instance_of(LLM::Response::Image)
37
+ end
38
+
39
+ it "returns an array of images" do
40
+ expect(response.images).to be_instance_of(Array)
41
+ end
42
+
43
+ it "returns an encoded string" do
44
+ expect(response.images[0].encoded).to be_instance_of(String)
45
+ end
46
+
47
+ it "returns an binary string" do
48
+ expect(response.images[0].binary).to be_instance_of(String)
49
+ end
50
+ end
51
+
52
+ context "when given a successful variation operation",
53
+ vcr: {cassette_name: "openai/images/successful_variation"} do
54
+ subject(:response) do
55
+ provider.images.create_variation(
56
+ image: LLM::File("spec/fixtures/images/bluebook.png"),
57
+ n: 5
58
+ )
59
+ end
60
+
61
+ it "is successful" do
62
+ expect(response).to be_instance_of(LLM::Response::Image)
63
+ end
64
+
65
+ it "returns data" do
66
+ expect(response.urls.size).to eq(5)
67
+ end
68
+
69
+ it "returns multiple variations" do
70
+ response.urls.each { expect(_1).to be_instance_of(String) }
71
+ end
72
+ end
73
+
74
+ context "when given a successful edit",
75
+ vcr: {cassette_name: "openai/images/successful_edit"} do
76
+ subject(:response) do
77
+ provider.images.edit(
78
+ image: LLM::File("spec/fixtures/images/bluebook.png"),
79
+ prompt: "Add white background"
80
+ )
81
+ end
82
+
83
+ it "is successful" do
84
+ expect(response).to be_instance_of(LLM::Response::Image)
85
+ end
86
+
87
+ it "returns data" do
88
+ expect(response.urls).to be_instance_of(Array)
89
+ end
90
+
91
+ it "returns a url" do
92
+ expect(response.urls[0]).to be_instance_of(String)
93
+ end
94
+ end
95
+ end
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "setup"
4
+
5
+ RSpec.describe "LLM::OpenAI::Responses" do
6
+ let(:token) { ENV["LLM_SECRET"] || "TOKEN" }
7
+ let(:provider) { LLM.openai(token) }
8
+
9
+ context "when given a successful create operation",
10
+ vcr: {cassette_name: "openai/responses/successful_create"} do
11
+ subject { provider.responses.create("Hello", :developer) }
12
+
13
+ it "is successful" do
14
+ is_expected.to be_instance_of(LLM::Response::Output)
15
+ end
16
+
17
+ it "has outputs" do
18
+ is_expected.to have_attributes(
19
+ outputs: [instance_of(LLM::Message)]
20
+ )
21
+ end
22
+ end
23
+
24
+ context "when given a successful get operation",
25
+ vcr: {cassette_name: "openai/responses/successful_get"} do
26
+ let(:response) { provider.responses.create("Hello", :developer) }
27
+ subject { provider.responses.get(response) }
28
+
29
+ it "is successful" do
30
+ is_expected.to be_instance_of(LLM::Response::Output)
31
+ end
32
+
33
+ it "has outputs" do
34
+ is_expected.to have_attributes(
35
+ outputs: [instance_of(LLM::Message)]
36
+ )
37
+ end
38
+ end
39
+
40
+ context "when given a successful delete operation",
41
+ vcr: {cassette_name: "openai/responses/successful_delete"} do
42
+ let(:response) { provider.responses.create("Hello", :developer) }
43
+ subject { provider.responses.delete(response) }
44
+
45
+ it "is successful" do
46
+ is_expected.to have_attributes(
47
+ deleted: true
48
+ )
49
+ end
50
+ end
51
+ end
data/spec/setup.rb CHANGED
@@ -16,5 +16,13 @@ VCR.configure do |config|
16
16
  config.cassette_library_dir = "spec/fixtures/cassettes"
17
17
  config.hook_into :webmock
18
18
  config.configure_rspec_metadata!
19
+
20
+ ##
21
+ # scrub
19
22
  config.filter_sensitive_data("TOKEN") { ENV["LLM_SECRET"] }
23
+ config.before_record do
24
+ body = _1.response.body
25
+ body.gsub! %r|#{Regexp.escape("https://oaidalleapiprodscus.blob.core.windows.net/")}[^"]+|,
26
+ "https://openai.com/generated/image.png"
27
+ end
20
28
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.1
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -9,50 +9,8 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2025-04-04 00:00:00.000000000 Z
12
+ date: 2025-04-25 00:00:00.000000000 Z
13
13
  dependencies:
14
- - !ruby/object:Gem::Dependency
15
- name: net-http
16
- requirement: !ruby/object:Gem::Requirement
17
- requirements:
18
- - - "~>"
19
- - !ruby/object:Gem::Version
20
- version: 0.6.0
21
- type: :runtime
22
- prerelease: false
23
- version_requirements: !ruby/object:Gem::Requirement
24
- requirements:
25
- - - "~>"
26
- - !ruby/object:Gem::Version
27
- version: 0.6.0
28
- - !ruby/object:Gem::Dependency
29
- name: json
30
- requirement: !ruby/object:Gem::Requirement
31
- requirements:
32
- - - ">="
33
- - !ruby/object:Gem::Version
34
- version: '0'
35
- type: :runtime
36
- prerelease: false
37
- version_requirements: !ruby/object:Gem::Requirement
38
- requirements:
39
- - - ">="
40
- - !ruby/object:Gem::Version
41
- version: '0'
42
- - !ruby/object:Gem::Dependency
43
- name: yaml
44
- requirement: !ruby/object:Gem::Requirement
45
- requirements:
46
- - - ">="
47
- - !ruby/object:Gem::Version
48
- version: '0'
49
- type: :runtime
50
- prerelease: false
51
- version_requirements: !ruby/object:Gem::Requirement
52
- requirements:
53
- - - ">="
54
- - !ruby/object:Gem::Version
55
- version: '0'
56
14
  - !ruby/object:Gem::Dependency
57
15
  name: webmock
58
16
  requirement: !ruby/object:Gem::Requirement
@@ -189,40 +147,57 @@ executables: []
189
147
  extensions: []
190
148
  extra_rdoc_files: []
191
149
  files:
192
- - LICENSE.txt
193
150
  - README.md
194
151
  - lib/llm.rb
195
- - lib/llm/conversation.rb
152
+ - lib/llm/buffer.rb
153
+ - lib/llm/chat.rb
196
154
  - lib/llm/core_ext/ostruct.rb
197
155
  - lib/llm/error.rb
198
156
  - lib/llm/file.rb
199
157
  - lib/llm/http_client.rb
200
158
  - lib/llm/message.rb
201
- - lib/llm/message_queue.rb
159
+ - lib/llm/mime.rb
202
160
  - lib/llm/model.rb
161
+ - lib/llm/multipart.rb
203
162
  - lib/llm/provider.rb
204
163
  - lib/llm/providers/anthropic.rb
205
164
  - lib/llm/providers/anthropic/error_handler.rb
206
165
  - lib/llm/providers/anthropic/format.rb
207
166
  - lib/llm/providers/anthropic/response_parser.rb
208
167
  - lib/llm/providers/gemini.rb
168
+ - lib/llm/providers/gemini/audio.rb
209
169
  - lib/llm/providers/gemini/error_handler.rb
170
+ - lib/llm/providers/gemini/files.rb
210
171
  - lib/llm/providers/gemini/format.rb
172
+ - lib/llm/providers/gemini/images.rb
211
173
  - lib/llm/providers/gemini/response_parser.rb
212
174
  - lib/llm/providers/ollama.rb
213
175
  - lib/llm/providers/ollama/error_handler.rb
214
176
  - lib/llm/providers/ollama/format.rb
215
177
  - lib/llm/providers/ollama/response_parser.rb
216
178
  - lib/llm/providers/openai.rb
179
+ - lib/llm/providers/openai/audio.rb
217
180
  - lib/llm/providers/openai/error_handler.rb
181
+ - lib/llm/providers/openai/files.rb
218
182
  - lib/llm/providers/openai/format.rb
183
+ - lib/llm/providers/openai/images.rb
219
184
  - lib/llm/providers/openai/response_parser.rb
185
+ - lib/llm/providers/openai/responses.rb
220
186
  - lib/llm/providers/voyageai.rb
221
187
  - lib/llm/providers/voyageai/error_handler.rb
222
188
  - lib/llm/providers/voyageai/response_parser.rb
223
189
  - lib/llm/response.rb
190
+ - lib/llm/response/audio.rb
191
+ - lib/llm/response/audio_transcription.rb
192
+ - lib/llm/response/audio_translation.rb
224
193
  - lib/llm/response/completion.rb
194
+ - lib/llm/response/download_file.rb
225
195
  - lib/llm/response/embedding.rb
196
+ - lib/llm/response/file.rb
197
+ - lib/llm/response/filelist.rb
198
+ - lib/llm/response/image.rb
199
+ - lib/llm/response/output.rb
200
+ - lib/llm/utils.rb
226
201
  - lib/llm/version.rb
227
202
  - llm.gemspec
228
203
  - share/llm/models/anthropic.yml
@@ -232,21 +207,28 @@ files:
232
207
  - spec/anthropic/completion_spec.rb
233
208
  - spec/anthropic/embedding_spec.rb
234
209
  - spec/gemini/completion_spec.rb
210
+ - spec/gemini/conversation_spec.rb
235
211
  - spec/gemini/embedding_spec.rb
212
+ - spec/gemini/files_spec.rb
213
+ - spec/gemini/images_spec.rb
236
214
  - spec/llm/conversation_spec.rb
237
215
  - spec/ollama/completion_spec.rb
216
+ - spec/ollama/conversation_spec.rb
238
217
  - spec/ollama/embedding_spec.rb
218
+ - spec/openai/audio_spec.rb
239
219
  - spec/openai/completion_spec.rb
240
220
  - spec/openai/embedding_spec.rb
221
+ - spec/openai/files_spec.rb
222
+ - spec/openai/images_spec.rb
223
+ - spec/openai/responses_spec.rb
241
224
  - spec/readme_spec.rb
242
225
  - spec/setup.rb
243
226
  homepage: https://github.com/llmrb/llm
244
227
  licenses:
245
- - MIT
228
+ - 0BSDL
246
229
  metadata:
247
230
  homepage_uri: https://github.com/llmrb/llm
248
231
  source_code_uri: https://github.com/llmrb/llm
249
- changelog_uri: https://github.com/llmrb/llm/blob/main/CHANGELOG.md
250
232
  post_install_message:
251
233
  rdoc_options: []
252
234
  require_paths:
data/LICENSE.txt DELETED
@@ -1,21 +0,0 @@
1
- The MIT License (MIT)
2
-
3
- Copyright (c) 2024 Antar Azri
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in
13
- all copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
- THE SOFTWARE.
@@ -1,90 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # {LLM::Conversation LLM::Conversation} provides a conversation
6
- # object that maintains a thread of messages that acts as context
7
- # throughout the conversation.
8
- # @example
9
- # llm = LLM.openai(ENV["KEY"])
10
- # convo = llm.chat("You are my climate expert", :system)
11
- # convo.chat("What's the climate like in Rio de Janerio?", :user)
12
- # convo.chat("What's the climate like in Algiers?", :user)
13
- # convo.chat("What's the climate like in Tokyo?", :user)
14
- # p bot.messages.map { [_1.role, _1.content] }
15
- class Conversation
16
- ##
17
- # @return [Array<LLM::Message>]
18
- attr_reader :messages
19
-
20
- ##
21
- # @param [LLM::Provider] provider
22
- # A provider
23
- # @param [Hash] params
24
- # The parameters to maintain throughout the conversation
25
- def initialize(provider, params = {})
26
- @provider = provider
27
- @params = params
28
- @lazy = false
29
- @messages = []
30
- end
31
-
32
- ##
33
- # @param prompt (see LLM::Provider#prompt)
34
- # @return [LLM::Conversation]
35
- def chat(prompt, role = :user, **params)
36
- tap do
37
- if lazy?
38
- @messages << [LLM::Message.new(role, prompt), @params.merge(params)]
39
- else
40
- completion = complete(prompt, role, params)
41
- @messages.concat [Message.new(role, prompt), completion.choices[0]]
42
- end
43
- end
44
- end
45
-
46
- ##
47
- # @note
48
- # The `read_response` and `recent_message` methods are aliases of
49
- # the `last_message` method, and you can choose the name that best
50
- # fits your context or code style.
51
- # @param [#to_s] role
52
- # The role of the last message.
53
- # Defaults to the LLM's assistant role (eg "assistant" or "model")
54
- # @return [LLM::Message]
55
- # The last message for the given role
56
- def last_message(role: @provider.assistant_role)
57
- messages.reverse_each.find { _1.role == role.to_s }
58
- end
59
- alias_method :recent_message, :last_message
60
- alias_method :read_response, :last_message
61
-
62
- ##
63
- # Enables lazy mode for the conversation.
64
- # @return [LLM::Conversation]
65
- def lazy
66
- tap do
67
- next if lazy?
68
- @lazy = true
69
- @messages = LLM::MessageQueue.new(@provider)
70
- end
71
- end
72
-
73
- ##
74
- # @return [Boolean]
75
- # Returns true if the conversation is lazy
76
- def lazy?
77
- @lazy
78
- end
79
-
80
- private
81
-
82
- def complete(prompt, role, params)
83
- @provider.complete(
84
- prompt,
85
- role,
86
- **@params.merge(params.merge(messages:))
87
- )
88
- end
89
- end
90
- end