rubycanusellm 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7068367ffebce32580d14b9eaf62427bb7bd268fa6a0a0ad3832428e5598e351
4
- data.tar.gz: ed05039faee7b98d6d3a929033484ab5b0a02c6c8fc22cf4b88ed7fe2ba79fa6
3
+ metadata.gz: 4d8c900c65c552170f515c984fd822ee4a004dc45dbfd384106ee98e2f52a6b4
4
+ data.tar.gz: faad0d33ac6759890ba9aea14cdfd45c686f305f4e660dd1a99e25244fb7888a
5
5
  SHA512:
6
- metadata.gz: 253341456facb5a986eede001e24b8d23cacc66de282da73d8f3101221ad497edc33523b18af07bff92c9e1dd025b81a375874fbade5f2f5e82307f87fdb4ec0
7
- data.tar.gz: 2dc6f75656c1f7800f50cb7a55debbd29b8ea3230f74959a222c219d7f4757a57614af61b36447880f1b37375f02e57c225c318305eb0115a4c0f27c1363fd8c
6
+ metadata.gz: 8c56cf78abac614fccc2cb7595d1a356e015bbd4ad6ddc3af3afd6e2364cfa6317d4f45a676c93f869378f00e9a6651729541ae9304a3572aa1c29462040ecc3
7
+ data.tar.gz: cb98a023aa10da202850b5e7783030440384d9646b5ed9ffb2923377296bb07f550ddd057eadd04c96c7e60f4caaa0017c9e272bde08c345bca2cd637e257233
data/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.2.0] - 2026-04-01
4
+
5
+ ### Added
6
+
7
+ - Streaming support for OpenAI and Anthropic providers via `stream: true` option and block interface
8
+ - `RubyCanUseLLM::Chunk` object yielded on each streamed token, with `content` and `role` attributes
9
+
3
10
  ## [0.1.0] - 2025-04-01
4
11
 
5
12
  ### Added
data/README.md CHANGED
@@ -98,6 +98,16 @@ messages = [
98
98
  RubyCanUseLLM.chat(messages, model: "gpt-4o", temperature: 0.5)
99
99
  ```
100
100
 
101
+ ### Streaming
102
+ Pass `stream: true` with a block to receive tokens as they arrive:
103
+ ```ruby
104
+ RubyCanUseLLM.chat(messages, stream: true) do |chunk|
105
+ print chunk.content
106
+ end
107
+ ```
108
+
109
+ Each `chunk` is a `RubyCanUseLLM::Chunk` with `content` (the token text) and `role` (`"assistant"`). Works with both OpenAI and Anthropic.
110
+
101
111
  ### Response
102
112
  ```ruby
103
113
  response.content # "Hello! How can I help?"
@@ -139,7 +149,7 @@ end
139
149
  - [x] `generate:config` command
140
150
  - [x] `generate:completion` command
141
151
  - [x] v0.1.0 release
142
- - [ ] Streaming support
152
+ - [x] Streaming support
143
153
  - [ ] Embeddings + `generate:embedding`
144
154
  - [ ] Mistral and Ollama providers
145
155
  - [ ] Tool calling
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ module RubyCanUseLLM
4
+ class Chunk
5
+ attr_reader :content, :role
6
+
7
+ def initialize(content:, role: "assistant")
8
+ @content = content
9
+ @role = role
10
+ end
11
+ end
12
+ end
@@ -9,11 +9,16 @@ module RubyCanUseLLM
9
9
  class Anthropic < Base
10
10
  API_URL = "https://api.anthropic.com/v1/messages"
11
11
 
12
- def chat(messages, **options)
12
+ def chat(messages, **options, &block)
13
13
  system, user_messages = extract_system(messages)
14
- body = build_body(system, user_messages, options)
15
- response = request(body)
16
- parse_response(response)
14
+ if options[:stream] && block
15
+ body = build_body(system, user_messages, options.except(:stream)).merge(stream: true)
16
+ stream_request(body, &block)
17
+ else
18
+ body = build_body(system, user_messages, options)
19
+ response = request(body)
20
+ parse_response(response)
21
+ end
17
22
  end
18
23
 
19
24
  private
@@ -66,6 +71,51 @@ module RubyCanUseLLM
66
71
  raise TimeoutError, "Request to Anthropic timed out after #{config.timeout}s"
67
72
  end
68
73
 
74
+ def stream_request(body, &block)
75
+ uri = URI(API_URL)
76
+ http = Net::HTTP.new(uri.host, uri.port)
77
+ http.use_ssl = true
78
+ http.read_timeout = config.timeout
79
+
80
+ req = Net::HTTP::Post.new(uri)
81
+ req["x-api-key"] = config.api_key
82
+ req["anthropic-version"] = "2023-06-01"
83
+ req["Content-Type"] = "application/json"
84
+ req["Accept-Encoding"] = "identity"
85
+ req.body = body.to_json
86
+
87
+ http.request(req) do |response|
88
+ case response.code.to_i
89
+ when 401 then raise AuthenticationError, "Invalid Anthropic API key"
90
+ when 429 then raise RateLimitError, "Anthropic rate limit exceeded"
91
+ end
92
+ raise ProviderError, "Anthropic error (#{response.code})" unless response.code.to_i == 200
93
+
94
+ buffer = ""
95
+ current_event = nil
96
+
97
+ response.read_body do |raw_chunk|
98
+ buffer += raw_chunk
99
+ lines = buffer.split("\n", -1)
100
+ buffer = lines.pop || ""
101
+ lines.each do |line|
102
+ line.chomp!
103
+ if line.start_with?("event: ")
104
+ current_event = line[7..]
105
+ elsif line.start_with?("data: ")
106
+ next unless current_event == "content_block_delta"
107
+
108
+ parsed = JSON.parse(line[6..])
109
+ text = parsed.dig("delta", "text")
110
+ block.call(Chunk.new(content: text)) if text
111
+ end
112
+ end
113
+ end
114
+ end
115
+ rescue Net::ReadTimeout, Net::OpenTimeout
116
+ raise TimeoutError, "Request to Anthropic timed out after #{config.timeout}s"
117
+ end
118
+
69
119
  def handle_response(response)
70
120
  case response.code.to_i
71
121
  when 200
@@ -93,4 +143,4 @@ module RubyCanUseLLM
93
143
  end
94
144
  end
95
145
  end
96
- end
146
+ end
@@ -9,10 +9,15 @@ module RubyCanUseLLM
9
9
  class OpenAI < Base
10
10
  API_URL = "https://api.openai.com/v1/chat/completions"
11
11
 
12
- def chat(messages, **options)
13
- body = build_body(messages, options)
14
- response = request(body)
15
- parse_response(response)
12
+ def chat(messages, **options, &block)
13
+ if options[:stream] && block
14
+ body = build_body(messages, options.except(:stream)).merge(stream: true)
15
+ stream_request(body, &block)
16
+ else
17
+ body = build_body(messages, options)
18
+ response = request(body)
19
+ parse_response(response)
20
+ end
16
21
  end
17
22
 
18
23
  private
@@ -47,6 +52,47 @@ module RubyCanUseLLM
47
52
  raise TimeoutError, "Request to OpenAI timed out after #{config.timeout}s"
48
53
  end
49
54
 
55
+ def stream_request(body, &block)
56
+ uri = URI(API_URL)
57
+ http = Net::HTTP.new(uri.host, uri.port)
58
+ http.use_ssl = true
59
+ http.read_timeout = config.timeout
60
+
61
+ req = Net::HTTP::Post.new(uri)
62
+ req["Authorization"] = "Bearer #{config.api_key}"
63
+ req["Content-Type"] = "application/json"
64
+ req["Accept-Encoding"] = "identity"
65
+ req.body = body.to_json
66
+
67
+ http.request(req) do |response|
68
+ case response.code.to_i
69
+ when 401 then raise AuthenticationError, "Invalid OpenAI API key"
70
+ when 429 then raise RateLimitError, "OpenAI rate limit exceeded"
71
+ end
72
+ raise ProviderError, "OpenAI error (#{response.code})" unless response.code.to_i == 200
73
+
74
+ buffer = ""
75
+ response.read_body do |raw_chunk|
76
+ buffer += raw_chunk
77
+ lines = buffer.split("\n", -1)
78
+ buffer = lines.pop || ""
79
+ lines.each do |line|
80
+ line.chomp!
81
+ next unless line.start_with?("data: ")
82
+
83
+ data = line[6..]
84
+ next if data == "[DONE]"
85
+
86
+ parsed = JSON.parse(data)
87
+ content = parsed.dig("choices", 0, "delta", "content")
88
+ block.call(Chunk.new(content: content)) if content && !content.empty?
89
+ end
90
+ end
91
+ end
92
+ rescue Net::ReadTimeout, Net::OpenTimeout
93
+ raise TimeoutError, "Request to OpenAI timed out after #{config.timeout}s"
94
+ end
95
+
50
96
  def handle_response(response)
51
97
  case response.code.to_i
52
98
  when 200
@@ -74,4 +120,4 @@ module RubyCanUseLLM
74
120
  end
75
121
  end
76
122
  end
77
- end
123
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Rubycanusellm
4
- VERSION = "0.1.0"
4
+ VERSION = "0.2.0"
5
5
  end
data/lib/rubycanusellm.rb CHANGED
@@ -4,6 +4,7 @@ require_relative "rubycanusellm/version"
4
4
  require_relative "rubycanusellm/configuration"
5
5
  require_relative "rubycanusellm/errors"
6
6
  require_relative "rubycanusellm/response"
7
+ require_relative "rubycanusellm/chunk"
7
8
  require_relative "rubycanusellm/providers/base"
8
9
  require_relative "rubycanusellm/providers/openai"
9
10
  require_relative "rubycanusellm/providers/anthropic"
@@ -35,8 +36,8 @@ module RubyCanUseLLM
35
36
  end.new(configuration)
36
37
  end
37
38
 
38
- def chat(messages, **options)
39
- client.chat(messages, **options)
39
+ def chat(messages, **options, &block)
40
+ client.chat(messages, **options, &block)
40
41
  end
41
42
  end
42
43
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rubycanusellm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Juan Manuel Guzman Nava
@@ -28,6 +28,7 @@ files:
28
28
  - Rakefile
29
29
  - exe/rubycanusellm
30
30
  - lib/rubycanusellm.rb
31
+ - lib/rubycanusellm/chunk.rb
31
32
  - lib/rubycanusellm/cli.rb
32
33
  - lib/rubycanusellm/configuration.rb
33
34
  - lib/rubycanusellm/errors.rb