llm.rb 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +146 -0
  4. data/lib/llm/conversation.rb +38 -0
  5. data/lib/llm/core_ext/ostruct.rb +37 -0
  6. data/lib/llm/error.rb +28 -0
  7. data/lib/llm/file.rb +66 -0
  8. data/lib/llm/http_client.rb +29 -0
  9. data/lib/llm/lazy_conversation.rb +39 -0
  10. data/lib/llm/message.rb +55 -0
  11. data/lib/llm/message_queue.rb +47 -0
  12. data/lib/llm/provider.rb +114 -0
  13. data/lib/llm/providers/anthropic/error_handler.rb +32 -0
  14. data/lib/llm/providers/anthropic/format.rb +31 -0
  15. data/lib/llm/providers/anthropic/response_parser.rb +29 -0
  16. data/lib/llm/providers/anthropic.rb +63 -0
  17. data/lib/llm/providers/gemini/error_handler.rb +43 -0
  18. data/lib/llm/providers/gemini/format.rb +31 -0
  19. data/lib/llm/providers/gemini/response_parser.rb +31 -0
  20. data/lib/llm/providers/gemini.rb +64 -0
  21. data/lib/llm/providers/ollama/error_handler.rb +32 -0
  22. data/lib/llm/providers/ollama/format.rb +28 -0
  23. data/lib/llm/providers/ollama/response_parser.rb +18 -0
  24. data/lib/llm/providers/ollama.rb +51 -0
  25. data/lib/llm/providers/openai/error_handler.rb +32 -0
  26. data/lib/llm/providers/openai/format.rb +28 -0
  27. data/lib/llm/providers/openai/response_parser.rb +35 -0
  28. data/lib/llm/providers/openai.rb +62 -0
  29. data/lib/llm/response/completion.rb +50 -0
  30. data/lib/llm/response/embedding.rb +23 -0
  31. data/lib/llm/response.rb +24 -0
  32. data/lib/llm/version.rb +5 -0
  33. data/lib/llm.rb +47 -0
  34. data/llm.gemspec +40 -0
  35. data/spec/anthropic/completion_spec.rb +76 -0
  36. data/spec/gemini/completion_spec.rb +80 -0
  37. data/spec/gemini/embedding_spec.rb +33 -0
  38. data/spec/llm/conversation_spec.rb +56 -0
  39. data/spec/llm/lazy_conversation_spec.rb +110 -0
  40. data/spec/ollama/completion_spec.rb +52 -0
  41. data/spec/ollama/embedding_spec.rb +15 -0
  42. data/spec/openai/completion_spec.rb +99 -0
  43. data/spec/openai/embedding_spec.rb +33 -0
  44. data/spec/readme_spec.rb +64 -0
  45. data/spec/setup.rb +29 -0
  46. metadata +194 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 5cd6331d31fab0e7582d9a0bef7a10b45e910fff70faace6bae774dd2757cff6
4
+ data.tar.gz: 42f0d68055c3f4aa732bb31b7dddd8d72b8e4fa90ba8727e871a43b736060e8f
5
+ SHA512:
6
+ metadata.gz: b3158f6d82d9f344deef6727ae2e74f2d7fc8637c46188d2aaf7c92d395cd50e737f39674d283decb17bcd9b7c45f5a0b592bf2fe3d48a8f121e89cdc0b5be35
7
+ data.tar.gz: 74a9dbe6e7a2082cf764db07058761ed2ca5fb36916e0f78d487b4648fd34b531ca801e2202706a138d45191ed8f0f7e05db357f400aa05569403eadfb8fc887
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2024 Antar Azri
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,146 @@
1
+ ## About
2
+
3
+ llm.rb is a lightweight library that provides a common interface
4
+ and set of functionality for multiple Large Language Models (LLMs). It
5
+ is designed to be simple, flexible, and easy to use.
6
+
7
+ ## Examples
8
+
9
+ ### Providers
10
+
11
+ #### LLM::Provider
12
+
13
+ All providers inherit from [LLM::Provider](https://0x1eef.github.io/x/llm/LLM/Provider.html) –
14
+ they share a common interface and set of functionality. Each provider can be instantiated
15
+ using an API key (if required) and an optional set of configuration options via
16
+ [the singleton methods of LLM](https://0x1eef.github.io/x/llm/LLM.html). For example:
17
+
18
+ ```ruby
19
+ #!/usr/bin/env ruby
20
+ require "llm"
21
+
22
+ llm = LLM.openai("yourapikey")
23
+ llm = LLM.gemini("yourapikey")
24
+ llm = LLM.anthropic("yourapikey")
25
+ llm = LLM.ollama(nil)
26
+ ```
27
+
28
+ ### Completions
29
+
30
+ #### Conversation
31
+
32
+ The
33
+ [LLM::Provider#chat](https://0x1eef.github.io/x/llm/LLM/Provider.html#chat-instance_method)
34
+ method returns a
35
+ [LLM::LazyConversation](https://0x1eef.github.io/x/llm/LLM/LazyConversation.html)
36
+ object, and it allows for a "lazy" conversation where messages are batched and
37
+ sent to the provider only when necessary. The non-lazy counterpart is available via the
38
+ [LLM::Provider#chat!](https://0x1eef.github.io/x/llm/LLM/Provider.html#chat!-instance_method)
39
+ method.
40
+
41
+ Both lazy and non-lazy conversations maintain a message thread that can
42
+ be reused as context throughout a conversation. For the sake of brevity the system
43
+ prompt is loaded from
44
+ [a file](./share/llm/prompts/system.txt)
45
+ in the following example – all other prompts are "user" prompts –
46
+ and a single request is made to the provider when iterating over the messages
47
+ belonging to a lazy conversation:
48
+
49
+ ```ruby
50
+ #!/usr/bin/env ruby
51
+ require "llm"
52
+
53
+ llm = LLM.openai(ENV["KEY"])
54
+ bot = llm.chat File.read("./share/llm/prompts/system.txt"), :system
55
+ bot.chat "What color is the sky?"
56
+ bot.chat "What color is an orange?"
57
+ bot.chat "I like Ruby"
58
+ bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
59
+
60
+ ##
61
+ # [system] You are a friendly chatbot. Sometimes, you like to tell a joke.
62
+ # But the joke must be based on the given inputs.
63
+ # I will provide you a set of messages. Reply to all of them.
64
+ # A message is considered unanswered if there is no corresponding assistant response.
65
+ #
66
+ # [user] What color is the sky?
67
+ # [user] What color is an orange?
68
+ # [user] I like Ruby
69
+ #
70
+ # [assistant] The sky is typically blue during the day. As for an orange,
71
+ # it is usually orange in color—funny how that works, right?
72
+ # I love Ruby too! Speaking of colors, why did the orange stop?
73
+ # Because it ran out of juice! 🍊😂
74
+ ```
75
+
76
+ #### Prompts
77
+
78
+ Both lazy and non-lazy conversations accept text as a prompt.
79
+ Depending on the provider, they may also accept a
80
+ [URI](https://docs.ruby-lang.org/en/master/URI.html)
81
+ or
82
+ [LLM::File](https://0x1eef.github.io/x/llm/LLM/File.html)
83
+ object. Generally a
84
+ [URI](https://docs.ruby-lang.org/en/master/URI.html)
85
+ object is used to reference an image on the web, and an
86
+ [LLM::File](https://0x1eef.github.io/x/llm/LLM/File.html)
87
+ object is used to reference a file on the local filesystem.
88
+ The following list shows the types of prompts that each
89
+ provider accepts:
90
+
91
+ * OpenAI       =>   String, URI
92
+ * Gemini        =>   String, LLM::File
93
+ * Anthropic   =>   String, URI
94
+ * Ollama        =>   String, URI
95
+
96
+ ### Embeddings
97
+
98
+ #### Text
99
+
100
+ The
101
+ [`LLM::Provider#embed`](https://0x1eef.github.io/x/llm/LLM/Provider.html#embed-instance_method)
102
+ method generates a vector representation of a given piece of text.
103
+ Embeddings capture the semantic meaning of text, and they are
104
+ commonly used in tasks such as text similarity comparison (e.g., finding related documents),
105
+ semantic search in vector databases, and the clustering and classification
106
+ of text-based data:
107
+
108
+ ```ruby
109
+ #!/usr/bin/env ruby
110
+ require "llm"
111
+
112
+ llm = LLM.openai(ENV["KEY"])
113
+ res = llm.embed("Hello, world!")
114
+ print res.class, "\n"
115
+ print res.embeddings.size, "\n"
116
+ print res.embeddings[0].size, "\n"
117
+
118
+ ##
119
+ # LLM::Response::Embedding
120
+ # 1
121
+ # 1536
122
+ ```
123
+
124
+ ## Providers
125
+
126
+ - [x] [Anthropic](https://www.anthropic.com/)
127
+ - [x] [OpenAI](https://platform.openai.com/docs/overview)
128
+ - [x] [Gemini](https://ai.google.dev/gemini-api/docs)
129
+ - [x] [Ollama](https://github.com/ollama/ollama#readme)
130
+ - [ ] Hugging Face
131
+ - [ ] Cohere
132
+ - [ ] AI21 Labs
133
+ - [ ] Replicate
134
+ - [ ] Mistral AI
135
+
136
+ ## Documentation
137
+
138
+ A complete API reference is available at [0x1eef.github.io/x/llm](https://0x1eef.github.io/x/llm)
139
+
140
+ ## Install
141
+
142
+ LLM has not been published to RubyGems.org yet. Stay tuned
143
+
144
+ ## License
145
+
146
+ MIT. See [LICENSE.txt](LICENSE.txt) for more details
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # {LLM::Conversation LLM::Conversation} provides a conversation
6
+ # object that maintains a thread of messages that act as the
7
+ # context of the conversation.
8
+ #
9
+ # @example
10
+ # llm = LLM.openai(key)
11
+ # bot = llm.chat("What is the capital of France?")
12
+ # bot.chat("What should we eat in Paris?")
13
+ # bot.chat("What is the weather like in Paris?")
14
+ # p bot.messages.map { [_1.role, _1.content] }
15
+ class Conversation
16
+ ##
17
+ # @return [Array<LLM::Message>]
18
+ attr_reader :messages
19
+
20
+ ##
21
+ # @param [LLM::Provider] provider
22
+ # A provider
23
+ def initialize(provider)
24
+ @provider = provider
25
+ @messages = []
26
+ end
27
+
28
+ ##
29
+ # @param prompt (see LLM::Provider#prompt)
30
+ # @return [LLM::Conversation]
31
+ def chat(prompt, role = :user, **params)
32
+ tap do
33
+ completion = @provider.complete(prompt, role, **params.merge(messages:))
34
+ @messages.concat [Message.new(role, prompt), completion.choices[0]]
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,37 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "ostruct"
4
+ class OpenStruct
5
+ module FromHash
6
+ ##
7
+ # @example
8
+ # obj = OpenStruct.from_hash(person: {name: 'John'})
9
+ # obj.person.name # => 'John'
10
+ # obj.person.class # => OpenStruct
11
+ # @param [Hash] hash_obj
12
+ # A Hash object
13
+ # @return [OpenStruct]
14
+ # An OpenStruct object initialized by visiting `hash_obj` with
15
+ # recursion
16
+ def from_hash(hash_obj)
17
+ visited_object = {}
18
+ hash_obj.each do |key, value|
19
+ visited_object[key] = walk(value)
20
+ end
21
+ OpenStruct.new(visited_object)
22
+ end
23
+
24
+ private
25
+
26
+ def walk(value)
27
+ if Hash === value
28
+ from_hash(value)
29
+ elsif Array === value
30
+ value.map { |v| (Hash === v) ? from_hash(v) : v }
31
+ else
32
+ value
33
+ end
34
+ end
35
+ end
36
+ extend FromHash
37
+ end
data/lib/llm/error.rb ADDED
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The superclass of all LLM errors
6
+ class Error < RuntimeError
7
+ def initialize
8
+ block_given? ? yield(self) : nil
9
+ end
10
+
11
+ ##
12
+ # The superclass of all HTTP protocol errors
13
+ class BadResponse < Error
14
+ ##
15
+ # @return [Net::HTTPResponse]
16
+ # Returns the response associated with an error
17
+ attr_accessor :response
18
+ end
19
+
20
+ ##
21
+ # HTTPUnauthorized
22
+ Unauthorized = Class.new(BadResponse)
23
+
24
+ ##
25
+ # HTTPTooManyRequests
26
+ RateLimit = Class.new(BadResponse)
27
+ end
28
+ end
data/lib/llm/file.rb ADDED
@@ -0,0 +1,66 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::File
4
+ ##
5
+ # @return [Hash]
6
+ # Returns a hash of common file extensions and their
7
+ # corresponding MIME types
8
+ def self.mime_types
9
+ @mime_types ||= {
10
+ # Images
11
+ ".png" => "image/png",
12
+ ".jpg" => "image/jpeg",
13
+ ".jpeg" => "image/jpeg",
14
+ ".webp" => "image/webp",
15
+
16
+ # Videos
17
+ ".flv" => "video/x-flv",
18
+ ".mov" => "video/quicktime",
19
+ ".mpeg" => "video/mpeg",
20
+ ".mpg" => "video/mpeg",
21
+ ".mp4" => "video/mp4",
22
+ ".webm" => "video/webm",
23
+ ".wmv" => "video/x-ms-wmv",
24
+ ".3gp" => "video/3gpp",
25
+
26
+ # Audio
27
+ ".aac" => "audio/aac",
28
+ ".flac" => "audio/flac",
29
+ ".mp3" => "audio/mpeg",
30
+ ".m4a" => "audio/mp4",
31
+ ".mpga" => "audio/mpeg",
32
+ ".opus" => "audio/opus",
33
+ ".pcm" => "audio/L16",
34
+ ".wav" => "audio/wav",
35
+ ".weba" => "audio/webm",
36
+
37
+ # Documents
38
+ ".pdf" => "application/pdf",
39
+ ".txt" => "text/plain"
40
+ }.freeze
41
+ end
42
+
43
+ ##
44
+ # @return [String]
45
+ # Returns the path to a file
46
+ attr_reader :path
47
+
48
+ def initialize(path)
49
+ @path = path
50
+ end
51
+
52
+ ##
53
+ # @return [String]
54
+ # Returns the MIME type of the file
55
+ def mime_type
56
+ self.class.mime_types[File.extname(path)]
57
+ end
58
+ end
59
+
60
+ ##
61
+ # @param [String] path
62
+ # The path to a file
63
+ # @return [LLM::File]
64
+ def LLM.File(path)
65
+ LLM::File.new(path)
66
+ end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ module HTTPClient
5
+ require "net/http"
6
+ ##
7
+ # Initiates a HTTP request
8
+ # @param [Net::HTTP] http
9
+ # The HTTP object to use for the request
10
+ # @param [Net::HTTPRequest] req
11
+ # The request to send
12
+ # @return [Net::HTTPResponse]
13
+ # The response from the server
14
+ # @raise [LLM::Error::Unauthorized]
15
+ # When authentication fails
16
+ # @raise [LLM::Error::RateLimit]
17
+ # When the rate limit is exceeded
18
+ # @raise [LLM::Error::BadResponse]
19
+ # When any other unsuccessful status code is returned
20
+ # @raise [SystemCallError]
21
+ # When there is a network error at the operating system level
22
+ def request(http, req)
23
+ res = http.request(req)
24
+ res.tap(&:value)
25
+ rescue Net::HTTPClientException
26
+ error_handler.new(res).raise_error!
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,39 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ require_relative "message_queue"
5
+
6
+ ##
7
+ # {LLM::LazyConversation LLM::LazyConversation} provides a
8
+ # conversation object that allows input prompts to be queued
9
+ # and only sent to the LLM when a response is needed.
10
+ #
11
+ # @example
12
+ # llm = LLM.openai(key)
13
+ # bot = llm.chat("Be a helpful weather assistant", :system)
14
+ # bot.chat("What's the weather like in Rio?")
15
+ # bot.chat("What's the weather like in Algiers?")
16
+ # bot.messages.each do |message|
17
+ # # A single request is made at this point
18
+ # end
19
+ class LazyConversation
20
+ ##
21
+ # @return [LLM::MessageQueue]
22
+ attr_reader :messages
23
+
24
+ ##
25
+ # @param [LLM::Provider] provider
26
+ # A provider
27
+ def initialize(provider)
28
+ @provider = provider
29
+ @messages = LLM::MessageQueue.new(provider)
30
+ end
31
+
32
+ ##
33
+ # @param prompt (see LLM::Provider#prompt)
34
+ # @return [LLM::Conversation]
35
+ def chat(prompt, role = :user, **params)
36
+ tap { @messages << [prompt, role, params] }
37
+ end
38
+ end
39
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ class Message
5
+ ##
6
+ # @return [Symbol]
7
+ attr_reader :role
8
+
9
+ ##
10
+ # @return [String]
11
+ attr_reader :content
12
+
13
+ ##
14
+ # @return [Hash]
15
+ attr_reader :extra
16
+
17
+ ##
18
+ # @param [Symbol] role
19
+ # @param [String] content
20
+ # @param [Hash] extra
21
+ # @return [LLM::Message]
22
+ def initialize(role, content, extra = {})
23
+ @role = role
24
+ @content = content
25
+ @extra = extra
26
+ end
27
+
28
+ ##
29
+ # @return [OpenStruct]
30
+ def logprobs
31
+ return nil unless extra.key?(:logprobs)
32
+ OpenStruct.from_hash(extra[:logprobs])
33
+ end
34
+
35
+ ##
36
+ # @return [Hash]
37
+ def to_h
38
+ {role:, content:}
39
+ end
40
+
41
+ ##
42
+ # @param [Object] other
43
+ # The other object to compare
44
+ # @return [Boolean]
45
+ # Returns true when the "other" object has the same role and content
46
+ def ==(other)
47
+ if other.respond_to?(:to_h)
48
+ to_h == other.to_h
49
+ else
50
+ false
51
+ end
52
+ end
53
+ alias_method :eql?, :==
54
+ end
55
+ end
@@ -0,0 +1,47 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # {LLM::MessageQueue LLM::MessageQueue} provides an Enumerable
6
+ # object that yields each message in a conversation on-demand,
7
+ # and only sends a request to the LLM when a response is needed.
8
+ class MessageQueue
9
+ include Enumerable
10
+
11
+ ##
12
+ # @param [LLM::Provider] provider
13
+ # @return [LLM::MessageQueue]
14
+ def initialize(provider)
15
+ @provider = provider
16
+ @messages = []
17
+ end
18
+
19
+ ##
20
+ # @yield [LLM::Message]
21
+ # Yields each message in the conversation thread
22
+ # @raise (see LLM::Provider#complete)
23
+ # @return [void]
24
+ def each
25
+ @messages = complete! unless @messages.grep(LLM::Message).size == @messages.size
26
+ @messages.each { yield(_1) }
27
+ end
28
+
29
+ ##
30
+ # @param message [Object]
31
+ # A message to add to the conversation thread
32
+ # @return [void]
33
+ def <<(message)
34
+ @messages << message
35
+ end
36
+ alias_method :push, :<<
37
+
38
+ private
39
+
40
+ def complete!
41
+ prompt, role, params = @messages[-1]
42
+ rest = @messages[0..-2].map { (Array === _1) ? LLM::Message.new(_1[1], _1[0]) : _1 }
43
+ comp = @provider.complete(prompt, role, **params.merge(messages: rest)).choices.last
44
+ [*rest, LLM::Message.new(role, prompt), comp]
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,114 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ require "llm/http_client"
5
+ ##
6
+ # The Provider class represents an abstract class for
7
+ # LLM (Language Model) providers
8
+ class Provider
9
+ include HTTPClient
10
+ ##
11
+ # @param [String] secret
12
+ # The secret key for authentication
13
+ # @param [String] host
14
+ # The host address of the LLM provider
15
+ # @param [Integer] port
16
+ # The port number
17
+ def initialize(secret, host:, port: 443, ssl: true)
18
+ @secret = secret
19
+ @http = Net::HTTP.new(host, port).tap do |http|
20
+ http.use_ssl = ssl
21
+ end
22
+ end
23
+
24
+ ##
25
+ # Returns an inspection of the provider object
26
+ # @return [String]
27
+ # @note The secret key is redacted in inspect for security reasons
28
+ def inspect
29
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @secret=[REDACTED] @http=#{@http.inspect}>"
30
+ end
31
+
32
+ ##
33
+ # @param [String] input
34
+ # The input to embed
35
+ # @raise [NotImplementedError]
36
+ # When the method is not implemented by a subclass
37
+ # @return [LLM::Response::Embedding]
38
+ def embed(input, **params)
39
+ raise NotImplementedError
40
+ end
41
+
42
+ ##
43
+ # Completes a given prompt using the LLM
44
+ # @param [String] prompt
45
+ # The input prompt to be completed
46
+ # @param [Symbol] role
47
+ # The role of the prompt (e.g. :user, :system)
48
+ # @raise [NotImplementedError]
49
+ # When the method is not implemented by a subclass
50
+ # @return [LLM::Response::Completion]
51
+ def complete(prompt, role = :user, **params)
52
+ raise NotImplementedError
53
+ end
54
+
55
+ ##
56
+ # Starts a new lazy conversation
57
+ # @param prompt (see LLM::Provider#complete)
58
+ # @param role (see LLM::Provider#complete)
59
+ # @raise (see LLM::Provider#complete)
60
+ # @return [LLM::LazyConversation]
61
+ def chat(prompt, role = :user, **params)
62
+ LazyConversation.new(self).chat(prompt, role, **params)
63
+ end
64
+
65
+ ##
66
+ # Starts a new conversation
67
+ # @param prompt (see LLM::Provider#complete)
68
+ # @param role (see LLM::Provider#complete)
69
+ # @raise (see LLM::Provider#complete)
70
+ # @return [LLM::Conversation]
71
+ def chat!(prompt, role = :user, **params)
72
+ Conversation.new(self).chat(prompt, role, **params)
73
+ end
74
+
75
+ private
76
+
77
+ ##
78
+ # Prepares a request for authentication
79
+ # @param [Net::HTTP::Request] req
80
+ # The request to prepare for authentication
81
+ # @raise [NotImplementedError]
82
+ # (see LLM::Provider#complete)
83
+ def auth(req)
84
+ raise NotImplementedError
85
+ end
86
+
87
+ ##
88
+ # @return [Module]
89
+ # Returns the module responsible for parsing a successful LLM response
90
+ # @raise [NotImplementedError]
91
+ # (see LLM::Provider#complete)
92
+ def response_parser
93
+ raise NotImplementedError
94
+ end
95
+
96
+ ##
97
+ # @return [Class]
98
+ # Returns the class responsible for handling an unsuccessful LLM response
99
+ # @raise [NotImplementedError]
100
+ # (see LLM::Provider#complete)
101
+ def error_handler
102
+ raise NotImplementedError
103
+ end
104
+
105
+ ##
106
+ # Prepares a request before sending it
107
+ def preflight(req, body)
108
+ req.content_type = "application/json"
109
+ req.body = JSON.generate(body)
110
+ auth(req)
111
+ req
112
+ end
113
+ end
114
+ end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Anthropic
4
+ class ErrorHandler
5
+ ##
6
+ # @return [Net::HTTPResponse]
7
+ # Non-2XX response from the server
8
+ attr_reader :res
9
+
10
+ ##
11
+ # @param [Net::HTTPResponse] res
12
+ # The response from the server
13
+ # @return [LLM::Anthropic::ErrorHandler]
14
+ def initialize(res)
15
+ @res = res
16
+ end
17
+
18
+ ##
19
+ # @raise [LLM::Error]
20
+ # Raises a subclass of {LLM::Error LLM::Error}
21
+ def raise_error!
22
+ case res
23
+ when Net::HTTPForbidden
24
+ raise LLM::Error::Unauthorized.new { _1.response = res }, "Authentication error"
25
+ when Net::HTTPTooManyRequests
26
+ raise LLM::Error::RateLimit.new { _1.response = res }, "Too many requests"
27
+ else
28
+ raise LLM::Error::BadResponse.new { _1.response = res }, "Unexpected response"
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Anthropic
4
+ module Format
5
+ ##
6
+ # @param [Array<LLM::Message>] messages
7
+ # The messages to format
8
+ # @return [Array<Hash>]
9
+ def format(messages)
10
+ messages.map { {role: _1.role, content: format_content(_1.content)} }
11
+ end
12
+
13
+ private
14
+
15
+ ##
16
+ # @param [String, URI] content
17
+ # The content to format
18
+ # @return [String, Hash]
19
+ # The formatted content
20
+ def format_content(content)
21
+ if URI === content
22
+ [{
23
+ type: :image,
24
+ source: {type: :base64, media_type: LLM::File(content.to_s).mime_type, data: [content.to_s].pack("m0")}
25
+ }]
26
+ else
27
+ content
28
+ end
29
+ end
30
+ end
31
+ end