llm_ruby 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: dfe908817dd406ae16aca4130133b9a421b18333cdecc4ad870635dd997be500
4
+ data.tar.gz: 105ae0dcc30918686abcf8d01d99605d2f70f41eebdd2737744bc2bf27c6575c
5
+ SHA512:
6
+ metadata.gz: 5b9643df8771735111f18f52182b6f217231ca92c890e3456f21c3937850bf2c9ac668730cd1aeae8b43330d5a3c84eab6b419c60aa5d54d70f405793e4463ad
7
+ data.tar.gz: 820687838675aeaadde8e5b7c5b7f7f45bfdf10beb74b90e2b594cd80d5e7accc38ace2deec56a6a2fcdc3de6a369af41405c5523da3e4c2b47f6e584c28f3fd
data/.rspec ADDED
@@ -0,0 +1,3 @@
1
+ --format documentation
2
+ --color
3
+ --require spec_helper
data/.standard.yml ADDED
@@ -0,0 +1,3 @@
1
+ # For available configuration options, see:
2
+ # https://github.com/standardrb/standard
3
+ ruby_version: 3.1.0
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2024 TODO: Write your name
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,153 @@
1
+ # LLMRuby
2
+
3
+ LLMRuby is a Ruby gem that provides a consistent interface for interacting with various Large Language Model (LLM) APIs, with a current focus on OpenAI's models.
4
+
5
+ ## Installation
6
+
7
+ Add this line to your application's Gemfile:
8
+
9
+ ```ruby
10
+ gem 'llm_ruby'
11
+ ```
12
+
13
+ And then execute:
14
+
15
+ ```
16
+ $ bundle install
17
+ ```
18
+
19
+ Or install it yourself as:
20
+
21
+ ```
22
+ $ gem install llm_ruby
23
+ ```
24
+
25
+ ## Usage
26
+
27
+ ### Basic Usage
28
+
29
+ ```ruby
30
+ require 'llm'
31
+
32
+ # Initialize an LLM instance
33
+ llm = LLM.from_string!("gpt-4")
34
+
35
+ # Create a client
36
+ client = llm.client
37
+
38
+ # Send a chat message
39
+ response = client.chat([{role: :user, content: "Hello, world!"}])
40
+
41
+ puts response.content
42
+ ```
43
+
44
+ ### Streaming Responses
45
+
46
+ LLMRuby supports streaming responses:
47
+
48
+ ```ruby
49
+ require 'llm'
50
+
51
+ # Initialize an LLM instance
52
+ llm = LLM.from_string!("gpt-4")
53
+
54
+ # Create a client
55
+ client = llm.client
56
+
57
+ # Define the on_message callback
58
+ on_message = proc do |message|
59
+ puts "Received message chunk: #{message}"
60
+ end
61
+
62
+ # Define the on_complete callback
63
+ on_complete = proc do |stop_reason|
64
+ puts "Streaming complete. Stop reason: #{stop_reason}"
65
+ end
66
+
67
+ # Send a chat message with streaming enabled
68
+ response = client.chat(
69
+ [{role: :user, content: "Hello, world!"}],
70
+ stream: true,
71
+ on_message: on_message,
72
+ on_complete: on_complete
73
+ )
74
+
75
+ puts response.content
76
+ ```
77
+
78
+ ### Using the Response Object
79
+
80
+ The response object returned by the `client.chat` method contains several useful fields:
81
+
82
+ - `content`: The final content of the response.
83
+ - `raw_response`: The raw response payload for non-streaming requests and the array of chunks for streaming requests.
84
+ - `stop_reason`: The reason why the response generation was stopped.
85
+
86
+ Here is an example of how to use the response object:
87
+
88
+ ```ruby
89
+ # Initialize an LLM instance
90
+ llm = LLM.from_string!("gpt-4")
91
+
92
+ # Create a client
93
+ client = llm.client
94
+
95
+ # Send a chat message
96
+ response = client.chat([{role: :user, content: "Hello, world!"}])
97
+
98
+ # Access the response fields
99
+ puts "Response content: #{response.content}"
100
+ puts "Raw response: #{response.raw_response}"
101
+ puts "Stop reason: #{response.stop_reason}"
102
+ ```
103
+
104
+
105
+ ## Available Models
106
+
107
+ LLMRuby supports various OpenAI models, including GPT-3.5 and GPT-4 variants. You can see the full list of supported models in the `KNOWN_MODELS` constant:
108
+
109
+ | Canonical Name | Display Name | Provider |
110
+ |---------------------------|------------------------|----------|
111
+ | gpt-3.5-turbo | GPT-3.5 Turbo | openai |
112
+ | gpt-3.5-turbo-0125 | GPT-3.5 Turbo 0125 | openai |
113
+ | gpt-3.5-turbo-16k | GPT-3.5 Turbo 16K | openai |
114
+ | gpt-3.5-turbo-1106 | GPT-3.5 Turbo 1106 | openai |
115
+ | gpt-4 | GPT-4 | openai |
116
+ | gpt-4-32k | GPT-4 32K | openai |
117
+ | gpt-4-1106-preview | GPT-4 Turbo 1106 | openai |
118
+ | gpt-4-turbo-2024-04-09 | GPT-4 Turbo 2024-04-09 | openai |
119
+ | gpt-4-0125-preview | GPT-4 Turbo 0125 | openai |
120
+ | gpt-4-turbo-preview | GPT-4 Turbo | openai |
121
+ | gpt-4-0613 | GPT-4 0613 | openai |
122
+ | gpt-4-32k-0613 | GPT-4 32K 0613 | openai |
123
+ | gpt-4o | GPT-4o | openai |
124
+ | gpt-4o-mini | GPT-4o Mini | openai |
125
+ | gpt-4o-2024-05-13 | GPT-4o 2024-05-13 | openai |
126
+ | gpt-4o-2024-08-06 | GPT-4o 2024-08-06 | openai |
127
+
128
+
129
+ ## Configuration
130
+
131
+ Set your OpenAI API key as an environment variable:
132
+
133
+ ```
134
+ export OPENAI_API_KEY=your_api_key_here
135
+ ```
136
+
137
+ ## Development
138
+
139
+ After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
140
+
141
+ To install this gem onto your local machine, run `bundle exec rake install`.
142
+
143
+ ## Contributing
144
+
145
+ Bug reports and pull requests are welcome on GitHub at https://github.com/contextco/llm_ruby.
146
+
147
+ ## License
148
+
149
+ The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
150
+
151
+ ## Acknowledgements
152
+
153
+ This gem is developed and maintained by [Context](https://context.ai).
data/Rakefile ADDED
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "bundler/gem_tasks"
4
+ require "rspec/core/rake_task"
5
+
6
+ RSpec::Core::RakeTask.new(:spec)
7
+
8
+ require "standard/rake"
9
+
10
+ task default: %i[spec standard]
@@ -0,0 +1,42 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Clients::OpenAI::Response
4
+ def initialize(raw_response)
5
+ @raw_response = raw_response
6
+ end
7
+
8
+ def to_normalized_response
9
+ LLM::Response.new(
10
+ content: content,
11
+ raw_response: parsed_response,
12
+ stop_reason: normalize_stop_reason
13
+ )
14
+ end
15
+
16
+ def self.normalize_stop_reason(stop_reason)
17
+ case stop_reason
18
+ when "stop"
19
+ LLM::StopReason::STOP
20
+ when "safety"
21
+ LLM::StopReason::SAFETY
22
+ when "max_tokens"
23
+ LLM::StopReason::MAX_TOKENS_REACHED
24
+ else
25
+ LLM::StopReason::OTHER
26
+ end
27
+ end
28
+
29
+ private
30
+
31
+ def content
32
+ parsed_response.dig("choices", 0, "message", "content")
33
+ end
34
+
35
+ def normalize_stop_reason
36
+ self.class.normalize_stop_reason(parsed_response.dig("choices", 0, "finish_reason"))
37
+ end
38
+
39
+ def parsed_response
40
+ @raw_response.parsed_response
41
+ end
42
+ end
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "httparty"
4
+ require "event_stream_parser"
5
+
6
+ class LLM::Clients::OpenAI
7
+ include HTTParty
8
+ base_uri "https://api.openai.com/v1"
9
+
10
+ def initialize(llm:)
11
+ @llm = llm
12
+ end
13
+
14
+ def chat(messages, options = {})
15
+ parameters = {
16
+ model: @llm.canonical_name,
17
+ messages: messages,
18
+ temperature: options[:temperature],
19
+ response_format: options[:response_format],
20
+ max_tokens: options[:max_output_tokens],
21
+ top_p: options[:top_p],
22
+ stop: options[:stop_sequences],
23
+ presence_penalty: options[:presence_penalty],
24
+ frequency_penalty: options[:frequency_penalty],
25
+ tools: options[:tools],
26
+ tool_choice: options[:tool_choice]
27
+ }.compact
28
+
29
+ return chat_streaming(parameters, options[:on_message], options[:on_complete]) if options[:stream]
30
+
31
+ resp = post_url("/chat/completions", body: parameters.to_json)
32
+
33
+ Response.new(resp).to_normalized_response
34
+ end
35
+
36
+ private
37
+
38
+ def chat_streaming(parameters, on_message, on_complete)
39
+ buffer = +""
40
+ chunks = []
41
+ output_data = {}
42
+
43
+ wrapped_on_complete = lambda { |stop_reason|
44
+ output_data[:stop_reason] = stop_reason
45
+ on_complete&.call(stop_reason)
46
+ }
47
+
48
+ parameters[:stream] = true
49
+
50
+ proc = stream_proc(buffer, chunks, on_message, wrapped_on_complete)
51
+
52
+ parameters.delete(:on_message)
53
+ parameters.delete(:on_complete)
54
+
55
+ _resp = post_url_streaming("/chat/completions", body: parameters.to_json, &proc)
56
+
57
+ LLM::Response.new(
58
+ content: buffer,
59
+ raw_response: chunks,
60
+ stop_reason: output_data[:stop_reason]
61
+ )
62
+ end
63
+
64
+ def stream_proc(buffer, chunks, on_message, complete_proc)
65
+ each_json_chunk do |_type, event|
66
+ next if event == "[DONE]"
67
+
68
+ chunks << event
69
+ new_content = event.dig("choices", 0, "delta", "content")
70
+ stop_reason = event.dig("choices", 0, "finish_reason")
71
+
72
+ buffer << new_content unless new_content.nil?
73
+ on_message&.call(new_content) unless new_content.nil?
74
+ complete_proc&.call(Response.normalize_stop_reason(stop_reason)) unless stop_reason.nil?
75
+ end
76
+ end
77
+
78
+ def each_json_chunk
79
+ parser = EventStreamParser::Parser.new
80
+
81
+ proc do |chunk, _bytes, env|
82
+ if env && env.status != 200
83
+ raise_error = Faraday::Response::RaiseError.new
84
+ raise_error.on_complete(env.merge(body: try_parse_json(chunk)))
85
+ end
86
+
87
+ parser.feed(chunk) do |type, data|
88
+ next if data == "[DONE]"
89
+
90
+ yield(type, JSON.parse(data))
91
+ end
92
+ end
93
+ end
94
+
95
+ def post_url(url, **kwargs)
96
+ self.class.post(url, **kwargs.merge(headers: default_headers))
97
+ end
98
+
99
+ def post_url_streaming(url, **kwargs, &block)
100
+ self.class.post(url, **kwargs.merge(headers: default_headers, stream_body: true), &block)
101
+ end
102
+
103
+ def default_headers
104
+ {
105
+ "Authorization" => "Bearer #{ENV["OPENAI_API_KEY"]}",
106
+ "Content-Type" => "application/json"
107
+ }
108
+ end
109
+ end
data/lib/llm/info.rb ADDED
@@ -0,0 +1,94 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::Info
4
+ KNOWN_MODELS = [
5
+ # Semantics of fields:
6
+ # - canonical_name (required): A string that uniquely identifies the model.
7
+ # We use this string as the public identifier when users choose this model via the API.
8
+ # - display_name (required): A string that is displayed to the user when choosing this model via the UI.
9
+
10
+ # GPT-3.5 Turbo Models
11
+ {
12
+ canonical_name: "gpt-3.5-turbo",
13
+ display_name: "GPT-3.5 Turbo",
14
+ provider: :openai
15
+ },
16
+ {
17
+ canonical_name: "gpt-3.5-turbo-0125",
18
+ display_name: "GPT-3.5 Turbo 0125",
19
+ provider: :openai
20
+ },
21
+ {
22
+ canonical_name: "gpt-3.5-turbo-16k",
23
+ display_name: "GPT-3.5 Turbo 16K",
24
+ provider: :openai
25
+ },
26
+ {
27
+ canonical_name: "gpt-3.5-turbo-1106",
28
+ display_name: "GPT-3.5 Turbo 1106",
29
+ provider: :openai
30
+ },
31
+
32
+ # GPT-4 Models
33
+ {
34
+ canonical_name: "gpt-4",
35
+ display_name: "GPT-4",
36
+ provider: :openai
37
+ },
38
+ {
39
+ canonical_name: "gpt-4-32k",
40
+ display_name: "GPT-4 32K",
41
+ provider: :openai
42
+ },
43
+ {
44
+ canonical_name: "gpt-4-1106-preview",
45
+ display_name: "GPT-4 Turbo 1106",
46
+ provider: :openai
47
+ },
48
+ {
49
+ canonical_name: "gpt-4-turbo-2024-04-09",
50
+ display_name: "GPT-4 Turbo 2024-04-09",
51
+ provider: :openai
52
+ },
53
+ {
54
+ canonical_name: "gpt-4-0125-preview",
55
+ display_name: "GPT-4 Turbo 0125",
56
+ provider: :openai
57
+ },
58
+ {
59
+ canonical_name: "gpt-4-turbo-preview",
60
+ display_name: "GPT-4 Turbo",
61
+ provider: :openai
62
+ },
63
+ {
64
+ canonical_name: "gpt-4-0613",
65
+ display_name: "GPT-4 0613",
66
+ provider: :openai
67
+ },
68
+ {
69
+ canonical_name: "gpt-4-32k-0613",
70
+ display_name: "GPT-4 32K 0613",
71
+ provider: :openai
72
+ },
73
+ {
74
+ canonical_name: "gpt-4o",
75
+ display_name: "GPT-4o",
76
+ provider: :openai
77
+ },
78
+ {
79
+ canonical_name: "gpt-4o-mini",
80
+ display_name: "GPT-4o Mini",
81
+ provider: :openai
82
+ },
83
+ {
84
+ canonical_name: "gpt-4o-2024-05-13",
85
+ display_name: "GPT-4o 2024-05-13",
86
+ provider: :openai
87
+ },
88
+ {
89
+ canonical_name: "gpt-4o-2024-08-06",
90
+ display_name: "GPT-4o 2024-08-06",
91
+ provider: :openai
92
+ }
93
+ ].freeze
94
+ end
File without changes
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ LLM::Response = Struct.new(:content, :raw_response, :stop_reason, keyword_init: true)
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::StopReason
4
+ STOP = :stop
5
+ SAFETY = :safety
6
+ MAX_TOKENS_REACHED = :max_tokens
7
+
8
+ OTHER = :other
9
+ end
@@ -0,0 +1 @@
1
+ # frozen_string_literal: true
data/lib/llm.rb ADDED
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "zeitwerk"
4
+ loader = Zeitwerk::Loader.for_gem
5
+ loader.inflector.inflect(
6
+ "llm" => "LLM",
7
+ "open_ai" => "OpenAI"
8
+ )
9
+ loader.setup
10
+
11
+ class LLM
12
+ def initialize(model)
13
+ @canonical_name = model[:canonical_name]
14
+ @display_name = model[:display_name]
15
+ @provider = model[:provider]
16
+ @client_class = LLM::Clients::OpenAI # TODO: Allow alternative client classes.
17
+ end
18
+
19
+ def client
20
+ client_class.new(llm: self)
21
+ end
22
+
23
+ attr_reader :canonical_name,
24
+ :display_name,
25
+ :provider
26
+
27
+ private
28
+
29
+ attr_reader :client_class
30
+
31
+ class << self
32
+ def all!
33
+ known_models
34
+ end
35
+
36
+ def from_string!(model_string)
37
+ model = known_models.find { |model| model.canonical_name == model_string }
38
+
39
+ raise ArgumentError, "Unknown model: #{model_string}" unless model
40
+
41
+ model
42
+ end
43
+
44
+ private
45
+
46
+ def known_models
47
+ @known_models ||= LLM::Info::KNOWN_MODELS.map { |model| new(model) }
48
+ end
49
+ end
50
+ end
metadata ADDED
@@ -0,0 +1,185 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: llm_ruby
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Alex Gamble
8
+ autorequire:
9
+ bindir: exe
10
+ cert_chain: []
11
+ date: 2024-09-13 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: event_stream_parser
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: 1.0.0
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: 1.0.0
27
+ - !ruby/object:Gem::Dependency
28
+ name: httparty
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: 0.22.0
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: 0.22.0
41
+ - !ruby/object:Gem::Dependency
42
+ name: zeitwerk
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: 2.6.0
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: 2.6.0
55
+ - !ruby/object:Gem::Dependency
56
+ name: dotenv
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - "~>"
60
+ - !ruby/object:Gem::Version
61
+ version: 3.1.0
62
+ type: :development
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - "~>"
67
+ - !ruby/object:Gem::Version
68
+ version: 3.1.0
69
+ - !ruby/object:Gem::Dependency
70
+ name: rake
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: '13.0'
76
+ type: :development
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: '13.0'
83
+ - !ruby/object:Gem::Dependency
84
+ name: rspec
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: '3.0'
90
+ type: :development
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: '3.0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: standard
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: 1.31.0
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: 1.31.0
111
+ - !ruby/object:Gem::Dependency
112
+ name: vcr
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - "~>"
116
+ - !ruby/object:Gem::Version
117
+ version: 6.3.1
118
+ type: :development
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - "~>"
123
+ - !ruby/object:Gem::Version
124
+ version: 6.3.1
125
+ - !ruby/object:Gem::Dependency
126
+ name: webmock
127
+ requirement: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - "~>"
130
+ - !ruby/object:Gem::Version
131
+ version: 3.23.0
132
+ type: :development
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - "~>"
137
+ - !ruby/object:Gem::Version
138
+ version: 3.23.0
139
+ description:
140
+ email:
141
+ - alex@context.ai
142
+ - alec@context.ai
143
+ executables: []
144
+ extensions: []
145
+ extra_rdoc_files: []
146
+ files:
147
+ - ".rspec"
148
+ - ".standard.yml"
149
+ - LICENSE.txt
150
+ - README.md
151
+ - Rakefile
152
+ - lib/llm.rb
153
+ - lib/llm/clients/open_ai.rb
154
+ - lib/llm/clients/open_ai/response.rb
155
+ - lib/llm/info.rb
156
+ - lib/llm/provider.rb
157
+ - lib/llm/response.rb
158
+ - lib/llm/stop_reason.rb
159
+ - lib/llm/version.rb
160
+ homepage: https://context.ai
161
+ licenses:
162
+ - MIT
163
+ metadata:
164
+ homepage_uri: https://context.ai
165
+ source_code_uri: https://github.com/contextco/llm_ruby
166
+ post_install_message:
167
+ rdoc_options: []
168
+ require_paths:
169
+ - lib
170
+ required_ruby_version: !ruby/object:Gem::Requirement
171
+ requirements:
172
+ - - ">="
173
+ - !ruby/object:Gem::Version
174
+ version: 3.1.0
175
+ required_rubygems_version: !ruby/object:Gem::Requirement
176
+ requirements:
177
+ - - ">="
178
+ - !ruby/object:Gem::Version
179
+ version: '0'
180
+ requirements: []
181
+ rubygems_version: 3.5.16
182
+ signing_key:
183
+ specification_version: 4
184
+ summary: A client to interact with LLM APIs in a consistent way.
185
+ test_files: []