llm_ruby 0.2.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +25 -0
- data/lib/llm/clients/gemini/request.rb +10 -1
- data/lib/llm/clients/gemini/response.rb +8 -1
- data/lib/llm/clients/open_ai/response.rb +8 -1
- data/lib/llm/clients/open_ai.rb +1 -1
- data/lib/llm/info.rb +22 -11
- data/lib/llm/response.rb +9 -1
- data/lib/llm/schema.rb +75 -0
- data/lib/llm.rb +7 -0
- data/lib/llm_ruby.rb +1 -0
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d377d13dd257a9b4a6def17668d3bb3badf4692f9e865faba142d5189d746519
|
4
|
+
data.tar.gz: a201962fb8dd4f245face648cc433e127afcc22832cb1499da89db2bc2816d7e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 10a190d6b7f4aa364c17d91b383a744fd490b94437ba4779c973e1f9bdc65d92f16d03c075d807d2a6aaf9509cf837736a0c48860c6ebc9d7c3dea6fb41eacd4
|
7
|
+
data.tar.gz: a3e63a044316a257c1e429bf85882c86ff78d3af193a87a978e4552f3464cdb417c7ada8aaf35a3556672ea6e79196812dada4c25c2f9a624466981b195c2ad3
|
data/README.md
CHANGED
@@ -1,5 +1,10 @@
|
|
1
1
|
# LLMRuby
|
2
2
|
|
3
|
+
[](https://badge.fury.io/rb/llm_ruby)
|
4
|
+

|
5
|
+
[](https://opensource.org/licenses/MIT)
|
6
|
+
|
7
|
+
|
3
8
|
LLMRuby is a Ruby gem that provides a consistent interface for interacting with multiple Large Language Model (LLM) APIs. Most OpenAI, Anthropic and Gemini models are currently supported.
|
4
9
|
|
5
10
|
## Installation
|
@@ -166,6 +171,26 @@ export ANTHROPIC_API_KEY=your_api_key_here
|
|
166
171
|
export GEMINI_API_KEY=your_api_key_here
|
167
172
|
```
|
168
173
|
|
174
|
+
## Structured Outputs
|
175
|
+
|
176
|
+
OpenAI and Gemini models can be configured to generate responses that adhere to a provided schema. Even though each use a different format for configuring this schema, `llm_ruby` can handle the translation for you, so that you can share a single schema definition across models.
|
177
|
+
|
178
|
+
```ruby
|
179
|
+
|
180
|
+
llm = LLM.from_string!("gpt-4o")
|
181
|
+
|
182
|
+
# Create a client
|
183
|
+
client = llm.client
|
184
|
+
|
185
|
+
# Send a chat message
|
186
|
+
response_format = LLM::Schema.new("test_schema", {"type" => "object", "properties" => {"name" => {"type" => "string"}, "age" => {"type" => "integer"}}, "additionalProperties" => false, "required" => ["name", "age"]})
|
187
|
+
# or load the schema from a file: LLM::Schema.from_file('myschema.json')
|
188
|
+
response = client.chat([{role: :user, content: "Hello, world!"}], response_format: response_format)
|
189
|
+
|
190
|
+
response.structured_output[:name] # Alex
|
191
|
+
response.structured_output_object.name # Alex
|
192
|
+
```
|
193
|
+
|
169
194
|
## Development
|
170
195
|
|
171
196
|
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
@@ -14,9 +14,18 @@ class LLM
|
|
14
14
|
end
|
15
15
|
|
16
16
|
def params
|
17
|
+
generation_config = {}
|
18
|
+
if options[:response_format]
|
19
|
+
generation_config = {
|
20
|
+
responseMimeType: "application/json",
|
21
|
+
responseSchema: options[:response_format]&.gemini_response_format
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
17
25
|
{
|
18
26
|
systemInstruction: normalized_prompt,
|
19
|
-
contents: normalized_messages
|
27
|
+
contents: normalized_messages,
|
28
|
+
generationConfig: generation_config
|
20
29
|
}
|
21
30
|
end
|
22
31
|
|
@@ -12,7 +12,8 @@ class LLM
|
|
12
12
|
LLM::Response.new(
|
13
13
|
content: content,
|
14
14
|
raw_response: parsed_response,
|
15
|
-
stop_reason: translated_stop_reason
|
15
|
+
stop_reason: translated_stop_reason,
|
16
|
+
structured_output: structured_output
|
16
17
|
)
|
17
18
|
end
|
18
19
|
|
@@ -48,6 +49,12 @@ class LLM
|
|
48
49
|
def parsed_response
|
49
50
|
raw_response.parsed_response
|
50
51
|
end
|
52
|
+
|
53
|
+
def structured_output
|
54
|
+
@structured_output ||= JSON.parse(parsed_response.dig("candidates", 0, "content", "parts", 0, "text"))
|
55
|
+
rescue JSON::ParserError
|
56
|
+
nil
|
57
|
+
end
|
51
58
|
end
|
52
59
|
end
|
53
60
|
end
|
@@ -12,7 +12,8 @@ class LLM
|
|
12
12
|
LLM::Response.new(
|
13
13
|
content: content,
|
14
14
|
raw_response: parsed_response,
|
15
|
-
stop_reason: normalize_stop_reason
|
15
|
+
stop_reason: normalize_stop_reason,
|
16
|
+
structured_output: structured_output
|
16
17
|
)
|
17
18
|
end
|
18
19
|
|
@@ -42,6 +43,12 @@ class LLM
|
|
42
43
|
def parsed_response
|
43
44
|
@raw_response.parsed_response
|
44
45
|
end
|
46
|
+
|
47
|
+
def structured_output
|
48
|
+
@structured_output ||= JSON.parse(parsed_response.dig("choices", 0, "message", "content"))
|
49
|
+
rescue JSON::ParserError
|
50
|
+
nil
|
51
|
+
end
|
45
52
|
end
|
46
53
|
end
|
47
54
|
end
|
data/lib/llm/clients/open_ai.rb
CHANGED
@@ -18,7 +18,7 @@ class LLM
|
|
18
18
|
model: @llm.canonical_name,
|
19
19
|
messages: messages,
|
20
20
|
temperature: options[:temperature],
|
21
|
-
response_format: options[:response_format],
|
21
|
+
response_format: options[:response_format]&.response_format,
|
22
22
|
max_tokens: options[:max_output_tokens],
|
23
23
|
top_p: options[:top_p],
|
24
24
|
stop: options[:stop_sequences],
|
data/lib/llm/info.rb
CHANGED
@@ -76,19 +76,22 @@ class LLM
|
|
76
76
|
canonical_name: "gpt-4o",
|
77
77
|
display_name: "GPT-4o",
|
78
78
|
provider: :openai,
|
79
|
-
client_class: LLM::Clients::OpenAI
|
79
|
+
client_class: LLM::Clients::OpenAI,
|
80
|
+
supports_structured_outputs: true
|
80
81
|
},
|
81
82
|
{
|
82
83
|
canonical_name: "gpt-4o-mini",
|
83
84
|
display_name: "GPT-4o Mini",
|
84
85
|
provider: :openai,
|
85
|
-
client_class: LLM::Clients::OpenAI
|
86
|
+
client_class: LLM::Clients::OpenAI,
|
87
|
+
supports_structured_outputs: true
|
86
88
|
},
|
87
89
|
{
|
88
90
|
canonical_name: "gpt-4o-mini-2024-07-18",
|
89
91
|
display_name: "GPT-4o Mini 2024-07-18",
|
90
92
|
provider: :openai,
|
91
|
-
client_class: LLM::Clients::OpenAI
|
93
|
+
client_class: LLM::Clients::OpenAI,
|
94
|
+
supports_structured_outputs: true
|
92
95
|
},
|
93
96
|
{
|
94
97
|
canonical_name: "gpt-4o-2024-05-13",
|
@@ -100,13 +103,15 @@ class LLM
|
|
100
103
|
canonical_name: "gpt-4o-2024-08-06",
|
101
104
|
display_name: "GPT-4o 2024-08-06",
|
102
105
|
provider: :openai,
|
103
|
-
client_class: LLM::Clients::OpenAI
|
106
|
+
client_class: LLM::Clients::OpenAI,
|
107
|
+
supports_structured_outputs: true
|
104
108
|
},
|
105
109
|
{
|
106
110
|
canonical_name: "gpt-4o-2024-11-20",
|
107
111
|
display_name: "GPT-4o 2024-11-20",
|
108
112
|
provider: :openai,
|
109
|
-
client_class: LLM::Clients::OpenAI
|
113
|
+
client_class: LLM::Clients::OpenAI,
|
114
|
+
supports_structured_outputs: true
|
110
115
|
},
|
111
116
|
{
|
112
117
|
canonical_name: "chatgpt-4o-latest",
|
@@ -118,13 +123,15 @@ class LLM
|
|
118
123
|
canonical_name: "o1",
|
119
124
|
display_name: "o1",
|
120
125
|
provider: :openai,
|
121
|
-
client_class: LLM::Clients::OpenAI
|
126
|
+
client_class: LLM::Clients::OpenAI,
|
127
|
+
supports_structured_outputs: true
|
122
128
|
},
|
123
129
|
{
|
124
130
|
canonical_name: "o1-2024-12-17",
|
125
131
|
display_name: "o1 2024-12-17",
|
126
132
|
provider: :openai,
|
127
|
-
client_class: LLM::Clients::OpenAI
|
133
|
+
client_class: LLM::Clients::OpenAI,
|
134
|
+
supports_structured_outputs: true
|
128
135
|
},
|
129
136
|
{
|
130
137
|
canonical_name: "o1-preview",
|
@@ -154,13 +161,15 @@ class LLM
|
|
154
161
|
canonical_name: "o3-mini",
|
155
162
|
display_name: "o3 Mini",
|
156
163
|
provider: :openai,
|
157
|
-
client_class: LLM::Clients::OpenAI
|
164
|
+
client_class: LLM::Clients::OpenAI,
|
165
|
+
supports_structured_outputs: true
|
158
166
|
},
|
159
167
|
{
|
160
168
|
canonical_name: "o3-mini-2025-01-31",
|
161
169
|
display_name: "o3 Mini 2025-01-31",
|
162
170
|
provider: :openai,
|
163
|
-
client_class: LLM::Clients::OpenAI
|
171
|
+
client_class: LLM::Clients::OpenAI,
|
172
|
+
supports_structured_outputs: true
|
164
173
|
},
|
165
174
|
|
166
175
|
# Anthropic Models
|
@@ -224,13 +233,15 @@ class LLM
|
|
224
233
|
canonical_name: "gemini-2.0-flash",
|
225
234
|
display_name: "Gemini 2.0 Flash",
|
226
235
|
provider: :google,
|
227
|
-
client_class: LLM::Clients::Gemini
|
236
|
+
client_class: LLM::Clients::Gemini,
|
237
|
+
supports_structured_outputs: true
|
228
238
|
},
|
229
239
|
{
|
230
240
|
canonical_name: "gemini-2.0-flash-lite-preview-02-05",
|
231
241
|
display_name: "Gemini 2.0 Flash Lite Preview 02-05",
|
232
242
|
provider: :google,
|
233
|
-
client_class: LLM::Clients::Gemini
|
243
|
+
client_class: LLM::Clients::Gemini,
|
244
|
+
supports_structured_outputs: true
|
234
245
|
},
|
235
246
|
{
|
236
247
|
canonical_name: "gemini-1.5-flash-8b",
|
data/lib/llm/response.rb
CHANGED
@@ -1,3 +1,11 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
3
|
+
require "ostruct"
|
4
|
+
|
5
|
+
LLM::Response = Struct.new(:content, :raw_response, :stop_reason, :structured_output, keyword_init: true) do
|
6
|
+
def structured_output_object
|
7
|
+
return nil unless structured_output
|
8
|
+
|
9
|
+
OpenStruct.new(structured_output)
|
10
|
+
end
|
11
|
+
end
|
data/lib/llm/schema.rb
ADDED
@@ -0,0 +1,75 @@
|
|
1
|
+
class LLM
|
2
|
+
class Schema
|
3
|
+
def initialize(name, schema)
|
4
|
+
@name = name
|
5
|
+
@schema = schema
|
6
|
+
end
|
7
|
+
|
8
|
+
def self.from_file(file_path)
|
9
|
+
new(File.basename(file_path, ".json"), JSON.parse(File.read(file_path)))
|
10
|
+
end
|
11
|
+
|
12
|
+
def response_format
|
13
|
+
{
|
14
|
+
type: "json_schema",
|
15
|
+
json_schema: {
|
16
|
+
name: @name,
|
17
|
+
strict: true,
|
18
|
+
schema: @schema
|
19
|
+
}
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
def gemini_response_format
|
24
|
+
transform_schema(@schema)
|
25
|
+
end
|
26
|
+
|
27
|
+
def transform_schema(schema)
|
28
|
+
# Initialize the result as an empty hash.
|
29
|
+
openapi_schema = {}
|
30
|
+
|
31
|
+
# Process the "type" field and handle nullability.
|
32
|
+
if schema.key?("type")
|
33
|
+
if schema["type"].is_a?(Array)
|
34
|
+
# Check for "null" in the type array to mark the schema as nullable.
|
35
|
+
if schema["type"].include?("null")
|
36
|
+
openapi_schema["nullable"] = true
|
37
|
+
# Remove "null" from the type array; if a single type remains, use that.
|
38
|
+
remaining_types = schema["type"] - ["null"]
|
39
|
+
openapi_schema["type"] = (remaining_types.size == 1) ? remaining_types.first : remaining_types
|
40
|
+
else
|
41
|
+
openapi_schema["type"] = schema["type"]
|
42
|
+
end
|
43
|
+
else
|
44
|
+
openapi_schema["type"] = schema["type"]
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
# Map simple fields directly: "format", "description", "enum", "maxItems", "minItems".
|
49
|
+
["format", "description", "enum", "maxItems", "minItems"].each do |field|
|
50
|
+
openapi_schema[field] = schema[field] if schema.key?(field)
|
51
|
+
end
|
52
|
+
|
53
|
+
# Recursively process "properties" if present.
|
54
|
+
if schema.key?("properties") && schema["properties"].is_a?(Hash)
|
55
|
+
openapi_schema["properties"] = {}
|
56
|
+
schema["properties"].each do |prop, prop_schema|
|
57
|
+
openapi_schema["properties"][prop] = transform_schema(prop_schema)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
# Copy "required" if present.
|
62
|
+
openapi_schema["required"] = schema["required"] if schema.key?("required")
|
63
|
+
|
64
|
+
# Copy "propertyOrdering" if present (non-standard field).
|
65
|
+
openapi_schema["propertyOrdering"] = schema["propertyOrdering"] if schema.key?("propertyOrdering")
|
66
|
+
|
67
|
+
# Recursively process "items" for array types.
|
68
|
+
if schema.key?("items")
|
69
|
+
openapi_schema["items"] = transform_schema(schema["items"])
|
70
|
+
end
|
71
|
+
|
72
|
+
openapi_schema
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
data/lib/llm.rb
CHANGED
@@ -1,11 +1,13 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "zeitwerk"
|
4
|
+
|
4
5
|
loader = Zeitwerk::Loader.for_gem
|
5
6
|
loader.inflector.inflect(
|
6
7
|
"llm" => "LLM",
|
7
8
|
"open_ai" => "OpenAI"
|
8
9
|
)
|
10
|
+
loader.ignore("#{__dir__}/llm_ruby.rb")
|
9
11
|
loader.setup
|
10
12
|
|
11
13
|
class LLM
|
@@ -15,6 +17,7 @@ class LLM
|
|
15
17
|
@provider = model[:provider]
|
16
18
|
@client_class = model[:client_class]
|
17
19
|
@default_params = model[:additional_default_required_parameters] || {}
|
20
|
+
@supports_structured_outputs = model[:supports_structured_outputs] || false
|
18
21
|
end
|
19
22
|
|
20
23
|
def client
|
@@ -26,6 +29,10 @@ class LLM
|
|
26
29
|
:provider,
|
27
30
|
:default_params
|
28
31
|
|
32
|
+
def supports_structured_outputs?
|
33
|
+
@supports_structured_outputs
|
34
|
+
end
|
35
|
+
|
29
36
|
private
|
30
37
|
|
31
38
|
attr_reader :client_class
|
data/lib/llm_ruby.rb
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require "llm"
|
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm_ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex Gamble
|
8
8
|
bindir: exe
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-02-
|
10
|
+
date: 2025-02-24 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: event_stream_parser
|
@@ -155,8 +155,10 @@ files:
|
|
155
155
|
- lib/llm/info.rb
|
156
156
|
- lib/llm/provider.rb
|
157
157
|
- lib/llm/response.rb
|
158
|
+
- lib/llm/schema.rb
|
158
159
|
- lib/llm/stop_reason.rb
|
159
160
|
- lib/llm/version.rb
|
161
|
+
- lib/llm_ruby.rb
|
160
162
|
homepage: https://github.com/agamble/llm_ruby
|
161
163
|
licenses:
|
162
164
|
- MIT
|