llm.rb 0.14.2 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +8 -19
- data/lib/llm/buffer.rb +1 -1
- data/lib/llm/message.rb +19 -0
- data/lib/llm/provider.rb +1 -1
- data/lib/llm/providers/openai/response/responds.rb +1 -0
- data/lib/llm/providers/openai/responses/stream_parser.rb +76 -0
- data/lib/llm/providers/openai/responses.rb +8 -2
- data/lib/llm/version.rb +1 -1
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 68bbdddd157d6df71729378a46b4759140d7fe13c44a1e14940a0a3a367d277c
|
4
|
+
data.tar.gz: f24aa6b6042b58857ca419c0039422f678e92355f5fdfd0064d2931108338866
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e5043707425445ea5709f4eed33b3feab33a376a66569abe46b6ad93274ffd3044eef1099625ec75f24f5ee5fb5387493adc97332c7be1d6fa2e085d6e33281c
|
7
|
+
data.tar.gz: 6f60c60130904dd23c67d65d50a04614c32659d5fe7af5e37ec2f9d6f25ccda1712a22bf872a7ae4c4aae49c2755cb1adff593fe1435c4231f445c40761a6477
|
data/README.md
CHANGED
@@ -5,10 +5,6 @@ includes OpenAI, Gemini, Anthropic, xAI (grok), DeepSeek, Ollama, and
|
|
5
5
|
LlamaCpp. The toolkit includes full support for chat, streaming, tool calling,
|
6
6
|
audio, images, files, and structured outputs (JSON Schema).
|
7
7
|
|
8
|
-
The library provides a common, uniform interface for all the providers and
|
9
|
-
features it supports, in addition to provider-specific features as well. Keep
|
10
|
-
reading to find out more.
|
11
|
-
|
12
8
|
## Quick start
|
13
9
|
|
14
10
|
#### Demo
|
@@ -29,8 +25,8 @@ reading to find out more.
|
|
29
25
|
|
30
26
|
#### Ecosystem
|
31
27
|
|
32
|
-
* [llm-shell](https://github.com/llmrb/llm-shell) –
|
33
|
-
* [llm-spell](https://github.com/llmrb/llm-spell) –
|
28
|
+
* [llm-shell](https://github.com/llmrb/llm-shell) – a developer-oriented console for Large Language Model communication
|
29
|
+
* [llm-spell](https://github.com/llmrb/llm-spell) – a utility that can correct spelling mistakes with a Large Language Model
|
34
30
|
|
35
31
|
## Features
|
36
32
|
|
@@ -138,15 +134,14 @@ require "llm"
|
|
138
134
|
llm = LLM.openai(key: ENV["KEY"])
|
139
135
|
bot = LLM::Bot.new(llm)
|
140
136
|
url = "https://en.wikipedia.org/wiki/Special:FilePath/Cognac_glass.jpg"
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
end
|
137
|
+
|
138
|
+
bot.chat "Your task is to answer all user queries", role: :system
|
139
|
+
bot.chat ["Tell me about this URL", URI(url)], role: :user
|
140
|
+
bot.chat ["Tell me about this PDF", File.open("handbook.pdf", "rb")], role: :user
|
141
|
+
bot.chat "Are the URL and PDF similar to each other?", role: :user
|
147
142
|
|
148
143
|
# At this point, we execute a single request
|
149
|
-
|
144
|
+
bot.messages.each { print "[#{_1.role}] ", _1.content, "\n" }
|
150
145
|
```
|
151
146
|
|
152
147
|
#### Streaming
|
@@ -528,12 +523,6 @@ over or doesn't cover at all. The API reference is available at
|
|
528
523
|
a blog post that implements image editing with Gemini
|
529
524
|
* [docs/](docs/) – the docs directory contains additional guides
|
530
525
|
|
531
|
-
|
532
|
-
## See also
|
533
|
-
|
534
|
-
* [llm-shell](https://github.com/llmrb/llm-shell) – a shell that uses llm.rb to
|
535
|
-
provide a command-line interface to LLMs.
|
536
|
-
|
537
526
|
## Install
|
538
527
|
|
539
528
|
llm.rb can be installed via rubygems.org:
|
data/lib/llm/buffer.rb
CHANGED
@@ -135,7 +135,7 @@ module LLM
|
|
135
135
|
params = [
|
136
136
|
*oldparams,
|
137
137
|
params.merge(input: messages),
|
138
|
-
@response ? {previous_response_id: @response.
|
138
|
+
@response ? {previous_response_id: @response.response_id} : {}
|
139
139
|
].inject({}, &:merge!)
|
140
140
|
@response = @provider.responses.create(message.content, params.merge(role:))
|
141
141
|
@completed.concat([*pendings, message, *@response.outputs[0]])
|
data/lib/llm/message.rb
CHANGED
@@ -127,6 +127,25 @@ module LLM
|
|
127
127
|
extra[:response]
|
128
128
|
end
|
129
129
|
|
130
|
+
##
|
131
|
+
# @note
|
132
|
+
# This method returns token usage for assistant messages,
|
133
|
+
# and it returns an empty object for non-assistant messages
|
134
|
+
# Returns token usage statistics
|
135
|
+
# @return [LLM::Object]
|
136
|
+
def usage
|
137
|
+
@usage ||= if response
|
138
|
+
LLM::Object.from_hash({
|
139
|
+
input_tokens: response.prompt_tokens || 0,
|
140
|
+
output_tokens: response.completion_tokens || 0,
|
141
|
+
total_tokens: response.total_tokens || 0
|
142
|
+
})
|
143
|
+
else
|
144
|
+
LLM::Object.from_hash({})
|
145
|
+
end
|
146
|
+
end
|
147
|
+
alias_method :token_usage, :usage
|
148
|
+
|
130
149
|
##
|
131
150
|
# Returns a string representation of the message
|
132
151
|
# @return [String]
|
data/lib/llm/provider.rb
CHANGED
@@ -268,7 +268,7 @@ class LLM::Provider
|
|
268
268
|
# @raise [SystemCallError]
|
269
269
|
# When there is a network error at the operating system level
|
270
270
|
# @return [Net::HTTPResponse]
|
271
|
-
def execute(request:, stream: nil, &b)
|
271
|
+
def execute(request:, stream: nil, stream_parser: self.stream_parser, &b)
|
272
272
|
res = if stream
|
273
273
|
client.request(request) do |res|
|
274
274
|
handler = event_handler.new stream_parser.new(stream)
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
class LLM::OpenAI
|
4
|
+
##
|
5
|
+
# @private
|
6
|
+
class Responses::StreamParser
|
7
|
+
##
|
8
|
+
# Returns the fully constructed response body
|
9
|
+
# @return [LLM::Object]
|
10
|
+
attr_reader :body
|
11
|
+
|
12
|
+
##
|
13
|
+
# @param [#<<] io An IO-like object
|
14
|
+
# @return [LLM::OpenAI::Responses::StreamParser]
|
15
|
+
def initialize(io)
|
16
|
+
@body = LLM::Object.new(output: []) # Initialize with an empty output array
|
17
|
+
@io = io
|
18
|
+
end
|
19
|
+
|
20
|
+
##
|
21
|
+
# @param [Hash] chunk
|
22
|
+
# @return [LLM::OpenAI::Responses::StreamParser]
|
23
|
+
def parse!(chunk)
|
24
|
+
tap { handle_event(chunk) }
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def handle_event(chunk)
|
30
|
+
case chunk["type"]
|
31
|
+
when "response.created"
|
32
|
+
chunk.each do |k, v|
|
33
|
+
next if k == "type"
|
34
|
+
@body[k] = v
|
35
|
+
end
|
36
|
+
@body.output ||= []
|
37
|
+
when "response.output_item.added"
|
38
|
+
output_index = chunk["output_index"]
|
39
|
+
item = LLM::Object.from_hash(chunk["item"])
|
40
|
+
@body.output[output_index] = item
|
41
|
+
@body.output[output_index].content ||= []
|
42
|
+
when "response.content_part.added"
|
43
|
+
output_index = chunk["output_index"]
|
44
|
+
content_index = chunk["content_index"]
|
45
|
+
part = LLM::Object.from_hash(chunk["part"])
|
46
|
+
@body.output[output_index] ||= LLM::Object.new(content: [])
|
47
|
+
@body.output[output_index].content ||= []
|
48
|
+
@body.output[output_index].content[content_index] = part
|
49
|
+
when "response.output_text.delta"
|
50
|
+
output_index = chunk["output_index"]
|
51
|
+
content_index = chunk["content_index"]
|
52
|
+
delta_text = chunk["delta"]
|
53
|
+
output_item = @body.output[output_index]
|
54
|
+
if output_item&.content
|
55
|
+
content_part = output_item.content[content_index]
|
56
|
+
if content_part && content_part.type == "output_text"
|
57
|
+
content_part.text ||= ""
|
58
|
+
content_part.text << delta_text
|
59
|
+
@io << delta_text if @io.respond_to?(:<<)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
when "response.output_item.done"
|
63
|
+
output_index = chunk["output_index"]
|
64
|
+
item = LLM::Object.from_hash(chunk["item"])
|
65
|
+
@body.output[output_index] = item
|
66
|
+
when "response.content_part.done"
|
67
|
+
output_index = chunk["output_index"]
|
68
|
+
content_index = chunk["content_index"]
|
69
|
+
part = LLM::Object.from_hash(chunk["part"])
|
70
|
+
@body.output[output_index] ||= LLM::Object.new(content: [])
|
71
|
+
@body.output[output_index].content ||= []
|
72
|
+
@body.output[output_index].content[content_index] = part
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -15,6 +15,7 @@ class LLM::OpenAI
|
|
15
15
|
# [res1, res2].each { llm.responses.delete(_1) }
|
16
16
|
class Responses
|
17
17
|
require_relative "response/responds"
|
18
|
+
require_relative "responses/stream_parser"
|
18
19
|
include Format
|
19
20
|
|
20
21
|
##
|
@@ -37,12 +38,13 @@ class LLM::OpenAI
|
|
37
38
|
def create(prompt, params = {})
|
38
39
|
params = {role: :user, model: @provider.default_model}.merge!(params)
|
39
40
|
params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
|
40
|
-
role = params.delete(:role)
|
41
|
+
role, stream = params.delete(:role), params.delete(:stream)
|
42
|
+
params[:stream] = true if stream.respond_to?(:<<) || stream == true
|
41
43
|
req = Net::HTTP::Post.new("/v1/responses", headers)
|
42
44
|
messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
|
43
45
|
body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
|
44
46
|
set_body_stream(req, StringIO.new(body))
|
45
|
-
res = execute(request: req)
|
47
|
+
res = execute(request: req, stream:, stream_parser:)
|
46
48
|
LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
|
47
49
|
end
|
48
50
|
|
@@ -86,5 +88,9 @@ class LLM::OpenAI
|
|
86
88
|
name = "JSONSchema"
|
87
89
|
{text: {format: {type: "json_schema", name:, schema:}}}
|
88
90
|
end
|
91
|
+
|
92
|
+
def stream_parser
|
93
|
+
LLM::OpenAI::Responses::StreamParser
|
94
|
+
end
|
89
95
|
end
|
90
96
|
end
|
data/lib/llm/version.rb
CHANGED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm.rb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.15.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Antar Azri
|
@@ -241,6 +241,7 @@ files:
|
|
241
241
|
- lib/llm/providers/openai/response/moderations.rb
|
242
242
|
- lib/llm/providers/openai/response/responds.rb
|
243
243
|
- lib/llm/providers/openai/responses.rb
|
244
|
+
- lib/llm/providers/openai/responses/stream_parser.rb
|
244
245
|
- lib/llm/providers/openai/stream_parser.rb
|
245
246
|
- lib/llm/providers/openai/vector_stores.rb
|
246
247
|
- lib/llm/providers/xai.rb
|