llm.rb 8.1.0 → 9.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +120 -2
  3. data/README.md +161 -514
  4. data/lib/llm/active_record/acts_as_llm.rb +7 -8
  5. data/lib/llm/agent.rb +36 -16
  6. data/lib/llm/context.rb +30 -26
  7. data/lib/llm/contract/completion.rb +45 -0
  8. data/lib/llm/cost.rb +81 -4
  9. data/lib/llm/error.rb +1 -1
  10. data/lib/llm/function/array.rb +8 -5
  11. data/lib/llm/function/call_group.rb +39 -0
  12. data/lib/llm/function/fork/task.rb +6 -0
  13. data/lib/llm/function/ractor/task.rb +6 -0
  14. data/lib/llm/function/task.rb +10 -0
  15. data/lib/llm/function.rb +1 -0
  16. data/lib/llm/mcp/transport/http.rb +26 -46
  17. data/lib/llm/mcp/transport/stdio.rb +0 -8
  18. data/lib/llm/mcp.rb +6 -23
  19. data/lib/llm/provider.rb +23 -20
  20. data/lib/llm/providers/anthropic/error_handler.rb +6 -7
  21. data/lib/llm/providers/anthropic/files.rb +2 -2
  22. data/lib/llm/providers/anthropic/response_adapter/completion.rb +30 -0
  23. data/lib/llm/providers/anthropic.rb +1 -1
  24. data/lib/llm/providers/bedrock/error_handler.rb +8 -9
  25. data/lib/llm/providers/bedrock/models.rb +13 -13
  26. data/lib/llm/providers/bedrock/response_adapter/completion.rb +30 -0
  27. data/lib/llm/providers/bedrock.rb +1 -1
  28. data/lib/llm/providers/google/error_handler.rb +6 -7
  29. data/lib/llm/providers/google/files.rb +2 -4
  30. data/lib/llm/providers/google/images.rb +1 -1
  31. data/lib/llm/providers/google/models.rb +0 -2
  32. data/lib/llm/providers/google/response_adapter/completion.rb +30 -0
  33. data/lib/llm/providers/google.rb +1 -1
  34. data/lib/llm/providers/ollama/error_handler.rb +6 -7
  35. data/lib/llm/providers/ollama/models.rb +0 -2
  36. data/lib/llm/providers/ollama/response_adapter/completion.rb +30 -0
  37. data/lib/llm/providers/ollama.rb +1 -1
  38. data/lib/llm/providers/openai/audio.rb +3 -3
  39. data/lib/llm/providers/openai/error_handler.rb +6 -7
  40. data/lib/llm/providers/openai/files.rb +2 -2
  41. data/lib/llm/providers/openai/images.rb +3 -3
  42. data/lib/llm/providers/openai/models.rb +1 -1
  43. data/lib/llm/providers/openai/response_adapter/completion.rb +42 -0
  44. data/lib/llm/providers/openai/response_adapter/responds.rb +39 -0
  45. data/lib/llm/providers/openai/responses.rb +2 -2
  46. data/lib/llm/providers/openai/vector_stores.rb +1 -1
  47. data/lib/llm/providers/openai.rb +1 -1
  48. data/lib/llm/response.rb +10 -8
  49. data/lib/llm/sequel/plugin.rb +7 -8
  50. data/lib/llm/stream/queue.rb +15 -42
  51. data/lib/llm/stream.rb +4 -4
  52. data/lib/llm/transport/execution.rb +67 -0
  53. data/lib/llm/transport/http.rb +134 -0
  54. data/lib/llm/transport/persistent_http.rb +152 -0
  55. data/lib/llm/transport/response/http.rb +113 -0
  56. data/lib/llm/transport/response.rb +112 -0
  57. data/lib/llm/{provider/transport/http → transport}/stream_decoder.rb +8 -4
  58. data/lib/llm/transport.rb +139 -0
  59. data/lib/llm/usage.rb +14 -5
  60. data/lib/llm/version.rb +1 -1
  61. data/lib/llm.rb +2 -12
  62. data/llm.gemspec +2 -16
  63. metadata +11 -19
  64. data/lib/llm/provider/transport/http/execution.rb +0 -115
  65. data/lib/llm/provider/transport/http/interruptible.rb +0 -114
  66. data/lib/llm/provider/transport/http.rb +0 -145
  67. data/lib/llm/utils.rb +0 -19
@@ -0,0 +1,152 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Transport
4
+ ##
5
+ # The {LLM::Transport::PersistentHTTP LLM::Transport::PersistentHTTP}
6
+ # transport is the built-in adapter for
7
+ # [Net::HTTP::Persistent](https://github.com/drbrain/net-http-persistent).
8
+ # It manages pooled HTTP connections, tracks active requests by owner,
9
+ # and interrupts in-flight requests when needed.
10
+ #
11
+ # @api private
12
+ class PersistentHTTP < self
13
+ INTERRUPT_ERRORS = [::IOError, ::EOFError, Errno::EBADF].freeze
14
+ Request = Struct.new(:client, :connection, keyword_init: true)
15
+ @registry = {}
16
+ @monitor = Monitor.new
17
+
18
+ ##
19
+ # Returns the process-wide connection pool registry.
20
+ # @return [Hash]
21
+ def self.registry
22
+ @registry
23
+ end
24
+
25
+ def self.lock(&)
26
+ @monitor.synchronize(&)
27
+ end
28
+
29
+ ##
30
+ # @param [String] host
31
+ # @param [Integer] port
32
+ # @param [Integer] timeout
33
+ # @param [Boolean] ssl
34
+ # @return [LLM::Transport::PersistentHTTP]
35
+ def initialize(host:, port:, timeout:, ssl:)
36
+ @host = host
37
+ @port = port
38
+ @timeout = timeout
39
+ @ssl = ssl
40
+ @base_uri = URI("#{ssl ? "https" : "http"}://#{host}:#{port}/")
41
+ @monitor = Monitor.new
42
+ end
43
+
44
+ ##
45
+ # Returns the current request owner.
46
+ # @return [Object]
47
+ def request_owner
48
+ return Fiber.current unless defined?(::Async)
49
+ Async::Task.current? ? Async::Task.current : Fiber.current
50
+ end
51
+
52
+ ##
53
+ # @return [Array<Class<Exception>>]
54
+ def interrupt_errors
55
+ [*INTERRUPT_ERRORS, *optional_interrupt_errors]
56
+ end
57
+
58
+ ##
59
+ # Interrupt an active request, if any.
60
+ # @param [Fiber] owner
61
+ # @return [nil]
62
+ def interrupt!(owner)
63
+ req = request_for(owner) or return
64
+ lock { (@interrupts ||= {})[owner] = true }
65
+ close_socket(req.connection&.http)
66
+ req.client.finish(req.connection)
67
+ owner.stop if owner.respond_to?(:stop)
68
+ rescue *interrupt_errors
69
+ nil
70
+ end
71
+
72
+ ##
73
+ # Returns whether an execution owner was interrupted.
74
+ # @param [Fiber] owner
75
+ # @return [Boolean, nil]
76
+ def interrupted?(owner)
77
+ lock { @interrupts&.delete(owner) }
78
+ end
79
+
80
+ ##
81
+ # Performs a request on the current HTTP transport.
82
+ # @param [Net::HTTPRequest] request
83
+ # @param [Fiber] owner
84
+ # @param [LLM::Object, nil] stream
85
+ # @yieldparam [LLM::Transport::Response] response
86
+ # @return [Object]
87
+ def request(request, owner:, stream: nil, &b)
88
+ client.connection_for(URI.join(base_uri, request.path)) do |connection|
89
+ set_request(Request.new(client:, connection:), owner)
90
+ perform_request(connection.http, request, stream, &b)
91
+ end
92
+ ensure
93
+ clear_request(owner)
94
+ end
95
+
96
+ private
97
+
98
+ attr_reader :host, :port, :timeout, :ssl, :base_uri
99
+
100
+ def client
101
+ self.class.lock do
102
+ if self.class.registry[client_id]
103
+ self.class.registry[client_id]
104
+ else
105
+ LLM.require "net/http/persistent" unless defined?(Net::HTTP::Persistent)
106
+ client = Net::HTTP::Persistent.new(name: self.class.name)
107
+ client.read_timeout = timeout
108
+ client.open_timeout = timeout
109
+ self.class.registry[client_id] = client
110
+ end
111
+ end
112
+ end
113
+
114
+ def client_id
115
+ "#{host}:#{port}:#{timeout}:#{ssl}"
116
+ end
117
+
118
+ def close_socket(http)
119
+ socket = http&.instance_variable_get(:@socket) or return
120
+ socket = socket.io if socket.respond_to?(:io)
121
+ socket.close
122
+ rescue *interrupt_errors
123
+ nil
124
+ end
125
+
126
+ def request_for(owner)
127
+ lock do
128
+ @requests ||= {}
129
+ @requests[owner]
130
+ end
131
+ end
132
+
133
+ def set_request(req, owner)
134
+ lock do
135
+ @requests ||= {}
136
+ @requests[owner] = req
137
+ end
138
+ end
139
+
140
+ def clear_request(owner)
141
+ lock { @requests&.delete(owner) }
142
+ end
143
+
144
+ def lock(&)
145
+ @monitor.synchronize(&)
146
+ end
147
+
148
+ def optional_interrupt_errors
149
+ defined?(::Async::Stop) ? [Async::Stop] : []
150
+ end
151
+ end
152
+ end
@@ -0,0 +1,113 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Transport::Response
4
+ ##
5
+ # {LLM::Transport::Response::HTTP LLM::Transport::Response::HTTP}
6
+ # adapts a {Net::HTTPResponse Net::HTTPResponse} to the
7
+ # {LLM::Transport::Response LLM::Transport::Response} interface.
8
+ #
9
+ # This is the default wrapper for responses produced by the built-in
10
+ # {LLM::Transport::HTTP LLM::Transport::HTTP} transport.
11
+ class HTTP < self
12
+ ##
13
+ # @return [Net::HTTPResponse]
14
+ attr_reader :res
15
+
16
+ ##
17
+ # @param [Net::HTTPResponse] res
18
+ # @return [LLM::Transport::Response::HTTP]
19
+ def initialize(res)
20
+ @res = res
21
+ end
22
+
23
+ ##
24
+ # @return [String]
25
+ def code
26
+ @res.code
27
+ end
28
+
29
+ ##
30
+ # @return [Object]
31
+ def body
32
+ @res.body
33
+ end
34
+
35
+ ##
36
+ # @param [Object] value
37
+ # @return [Object]
38
+ def body=(value)
39
+ @res.body = value
40
+ end
41
+
42
+ ##
43
+ # @param [String] key
44
+ # @return [String, nil]
45
+ def [](key)
46
+ @res[key]
47
+ end
48
+
49
+ ##
50
+ # @param [Object, nil] dest
51
+ # @yieldparam [String] chunk
52
+ # @return [void]
53
+ def read_body(dest = nil, &block)
54
+ if dest && block
55
+ @res.read_body(dest) { block.call(_1) }
56
+ elsif dest
57
+ @res.read_body(dest)
58
+ elsif block
59
+ @res.read_body { block.call(_1) }
60
+ else
61
+ @res.read_body
62
+ end
63
+ end
64
+
65
+ ##
66
+ # @return [Boolean]
67
+ def success?
68
+ Net::HTTPSuccess === @res
69
+ end
70
+
71
+ ##
72
+ # @return [Boolean]
73
+ def ok?
74
+ Net::HTTPOK === @res
75
+ end
76
+
77
+ ##
78
+ # @return [Boolean]
79
+ def bad_request?
80
+ Net::HTTPBadRequest === @res
81
+ end
82
+
83
+ ##
84
+ # @return [Boolean]
85
+ def unauthorized?
86
+ Net::HTTPUnauthorized === @res
87
+ end
88
+
89
+ ##
90
+ # @return [Boolean]
91
+ def forbidden?
92
+ Net::HTTPForbidden === @res
93
+ end
94
+
95
+ ##
96
+ # @return [Boolean]
97
+ def not_found?
98
+ Net::HTTPNotFound === @res
99
+ end
100
+
101
+ ##
102
+ # @return [Boolean]
103
+ def rate_limited?
104
+ Net::HTTPTooManyRequests === @res
105
+ end
106
+
107
+ ##
108
+ # @return [Boolean]
109
+ def server_error?
110
+ Net::HTTPServerError === @res
111
+ end
112
+ end
113
+ end
@@ -0,0 +1,112 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Transport
4
+ ##
5
+ # {LLM::Transport::Response LLM::Transport::Response} defines the
6
+ # normalized HTTP response interface expected by transports and
7
+ # provider error handlers.
8
+ #
9
+ # Custom transports can execute requests through any underlying HTTP
10
+ # client, then adapt that client's native response object to this
11
+ # interface.
12
+ #
13
+ # This keeps the transport boundary focused on one contract:
14
+ # providers, execution, and error handlers only need a response
15
+ # object that implements
16
+ # {LLM::Transport::Response LLM::Transport::Response}, regardless of
17
+ # how the request was actually performed.
18
+ class Response
19
+ require_relative "response/http"
20
+
21
+ ##
22
+ # @param [Object] res
23
+ # @return [LLM::Transport::Response]
24
+ def self.from(res)
25
+ return res if LLM::Transport::Response === res
26
+ return HTTP.new(res) if Net::HTTPResponse === res
27
+ res
28
+ end
29
+
30
+ ##
31
+ # @return [String]
32
+ def code
33
+ raise NotImplementedError
34
+ end
35
+
36
+ ##
37
+ # @return [Object]
38
+ def body
39
+ raise NotImplementedError
40
+ end
41
+
42
+ ##
43
+ # @param [Object] value
44
+ # @return [Object]
45
+ def body=(value)
46
+ raise NotImplementedError
47
+ end
48
+
49
+ ##
50
+ # @param [String] key
51
+ # @return [String, nil]
52
+ def [](key)
53
+ raise NotImplementedError
54
+ end
55
+
56
+ ##
57
+ # @param [Object, nil] dest
58
+ # @yieldparam [String] chunk
59
+ # @return [void]
60
+ def read_body(dest = nil, &)
61
+ raise NotImplementedError
62
+ end
63
+
64
+ ##
65
+ # @return [Boolean]
66
+ def success?
67
+ raise NotImplementedError
68
+ end
69
+
70
+ ##
71
+ # @return [Boolean]
72
+ def ok?
73
+ raise NotImplementedError
74
+ end
75
+
76
+ ##
77
+ # @return [Boolean]
78
+ def bad_request?
79
+ raise NotImplementedError
80
+ end
81
+
82
+ ##
83
+ # @return [Boolean]
84
+ def unauthorized?
85
+ raise NotImplementedError
86
+ end
87
+
88
+ ##
89
+ # @return [Boolean]
90
+ def forbidden?
91
+ raise NotImplementedError
92
+ end
93
+
94
+ ##
95
+ # @return [Boolean]
96
+ def not_found?
97
+ raise NotImplementedError
98
+ end
99
+
100
+ ##
101
+ # @return [Boolean]
102
+ def rate_limited?
103
+ raise NotImplementedError
104
+ end
105
+
106
+ ##
107
+ # @return [Boolean]
108
+ def server_error?
109
+ raise NotImplementedError
110
+ end
111
+ end
112
+ end
@@ -1,16 +1,20 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::Provider::Transport
3
+ class LLM::Transport
4
4
  ##
5
- # @private
6
- class HTTP::StreamDecoder
5
+ # {LLM::Transport::StreamDecoder LLM::Transport::StreamDecoder}
6
+ # incrementally decodes streamed HTTP response bodies into parser
7
+ # events.
8
+ #
9
+ # @api private
10
+ class StreamDecoder
7
11
  ##
8
12
  # @return [Object]
9
13
  attr_reader :parser
10
14
 
11
15
  ##
12
16
  # @param [#parse!, #body] parser
13
- # @return [LLM::Provider::Transport::HTTP::StreamDecoder]
17
+ # @return [LLM::Transport::StreamDecoder]
14
18
  def initialize(parser)
15
19
  @buffer = +""
16
20
  @cursor = 0
@@ -0,0 +1,139 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Transport LLM::Transport} class defines the execution
6
+ # interface used by {LLM::Provider}.
7
+ #
8
+ # Custom transports can subclass this class and override {#request} to
9
+ # execute provider requests without changing request adapters or
10
+ # response adapters.
11
+ #
12
+ # Providers currently construct {Net::HTTPRequest Net::HTTPRequest}
13
+ # objects before delegating to a transport. Custom transports are
14
+ # therefore expected to execute those requests directly, or transform
15
+ # them into backend-specific request objects before execution.
16
+ #
17
+ # Only {#request} is required. The remaining methods are optional hooks
18
+ # for features such as interruption, request ownership, or persistence,
19
+ # and only need to be implemented when the underlying adapter can
20
+ # support them.
21
+ #
22
+ # Returned responses should implement the
23
+ # {LLM::Transport::Response LLM::Transport::Response} interface. In
24
+ # practice this can mean adapting another client's response object so
25
+ # existing provider execution, response adapters, and error handlers
26
+ # can rely on one normalized response contract instead of
27
+ # transport-specific classes.
28
+ class Transport
29
+ require_relative "transport/response"
30
+ require_relative "transport/stream_decoder"
31
+ require_relative "transport/http"
32
+ require_relative "transport/persistent_http"
33
+ require_relative "transport/execution"
34
+
35
+ ##
36
+ # Returns the built-in Net::HTTP transport class.
37
+ # @return [Class]
38
+ def self.net_http
39
+ HTTP
40
+ end
41
+
42
+ ##
43
+ # Returns the built-in Net::HTTP::Persistent transport class.
44
+ # @return [Class]
45
+ def self.net_http_persistent
46
+ PersistentHTTP
47
+ end
48
+
49
+ ##
50
+ # Performs a request through the transport.
51
+ # @param [Net::HTTPRequest] request
52
+ # @param [Object] owner
53
+ # @param [LLM::Object, nil] stream
54
+ # @yieldparam [LLM::Transport::Response] response
55
+ # @return [Object]
56
+ def request(request, owner:, stream: nil, &)
57
+ raise NotImplementedError
58
+ end
59
+
60
+ ##
61
+ # Returns the current request owner.
62
+ # @return [Object]
63
+ def request_owner
64
+ return Fiber.current unless defined?(::Async)
65
+ Async::Task.current? ? Async::Task.current : Fiber.current
66
+ end
67
+
68
+ ##
69
+ # Returns the exception classes that indicate an interrupted request.
70
+ # @return [Array<Class<Exception>>]
71
+ def interrupt_errors
72
+ []
73
+ end
74
+
75
+ ##
76
+ # Interrupt an active request, if any.
77
+ # @param [Object] owner
78
+ # @return [nil]
79
+ def interrupt!(owner)
80
+ raise NotImplementedError
81
+ end
82
+
83
+ ##
84
+ # Returns whether an execution owner was interrupted.
85
+ # @param [Object] owner
86
+ # @return [Boolean, nil]
87
+ def interrupted?(owner)
88
+ nil
89
+ end
90
+
91
+ ##
92
+ # @note
93
+ # Custom transports may be able to reuse this helper when they
94
+ # operate on Net::HTTPRequest objects, or implement their own
95
+ # request body preparation path instead.
96
+ # @param [Net::HTTPRequest] request
97
+ # @param [IO] io
98
+ # @return [void]
99
+ def set_body_stream(request, io)
100
+ request.body_stream = io
101
+ request["transfer-encoding"] = "chunked" unless request["content-length"]
102
+ end
103
+
104
+ private
105
+
106
+ ##
107
+ # @api private
108
+ # @note
109
+ # Custom transports may be able to reuse this helper when they
110
+ # execute requests through a Net::HTTP-compatible client, or
111
+ # implement their own request execution path instead.
112
+ def perform_request(client, request, stream, &b)
113
+ if stream
114
+ client.request(request) do |raw|
115
+ res = LLM::Transport::Response.from(raw)
116
+ if res.success?
117
+ parser = stream.decoder.new(stream.parser.new(stream.streamer))
118
+ res.read_body(parser)
119
+ body = parser.body
120
+ res.body = (Hash === body || Array === body) ? LLM::Object.from(body) : body
121
+ else
122
+ body = +""
123
+ res.read_body { body << _1 }
124
+ res.body = body
125
+ end
126
+ ensure
127
+ parser&.free
128
+ end
129
+ elsif b
130
+ client.request(request) do |raw|
131
+ res = LLM::Transport::Response.from(raw)
132
+ res.success? ? b.call(res) : res
133
+ end
134
+ else
135
+ LLM::Transport::Response.from(client.request(request))
136
+ end
137
+ end
138
+ end
139
+ end
data/lib/llm/usage.rb CHANGED
@@ -4,13 +4,22 @@
4
4
  # The {LLM::Usage LLM::Usage} class represents token usage for
5
5
  # a given conversation or completion. As a conversation grows,
6
6
  # so does the number of tokens used. This class helps track
7
- # the number of input, output, reasoning and overall token count.
8
- # It can also help track usage of the context window (which may
9
- # vary by model).
10
- class LLM::Usage < Struct.new(:input_tokens, :output_tokens, :reasoning_tokens, :total_tokens, keyword_init: true)
7
+ # the number of input, output, reasoning, cache, and overall
8
+ # token count. It can also help track usage of the context
9
+ # window (which may vary by model).
10
+ class LLM::Usage < Struct.new(
11
+ :input_tokens, :output_tokens, :reasoning_tokens,
12
+ :input_audio_tokens, :output_audio_tokens, :input_image_tokens,
13
+ :cache_read_tokens, :cache_write_tokens, :total_tokens, keyword_init: true
14
+ )
11
15
  ##
12
16
  # @return [String]
13
17
  def to_json(...)
14
- LLM.json.dump({input_tokens:, output_tokens:, reasoning_tokens:, total_tokens:})
18
+ LLM.json.dump({
19
+ input_tokens:, output_tokens:,
20
+ reasoning_tokens:,
21
+ input_audio_tokens:, output_audio_tokens:, input_image_tokens:,
22
+ cache_read_tokens:, cache_write_tokens:, total_tokens:
23
+ })
15
24
  end
16
25
  end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "8.1.0"
4
+ VERSION = "9.0.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -14,8 +14,8 @@ module LLM
14
14
  require_relative "llm/object"
15
15
  require_relative "llm/model"
16
16
  require_relative "llm/version"
17
- require_relative "llm/utils"
18
17
  require_relative "llm/message"
18
+ require_relative "llm/transport"
19
19
  require_relative "llm/response"
20
20
  require_relative "llm/mime"
21
21
  require_relative "llm/multipart"
@@ -37,22 +37,12 @@ module LLM
37
37
 
38
38
  ##
39
39
  # Thread-safe monitors for different contexts
40
- @monitors = {require: Monitor.new, clients: Monitor.new, inherited: Monitor.new, registry: Monitor.new, mcp: Monitor.new}
40
+ @monitors = {require: Monitor.new, inherited: Monitor.new, registry: Monitor.new, mcp: Monitor.new}
41
41
 
42
42
  ##
43
43
  # Model registry
44
44
  @registry = {}
45
45
 
46
- ##
47
- # Shared HTTP clients used by providers.
48
- @clients = {}
49
-
50
- ##
51
- # @api private
52
- def self.clients
53
- @clients
54
- end
55
-
56
46
  ##
57
47
  # Requires an optional runtime dependency
58
48
  # @raise [LLM::DependencyError]
data/llm.gemspec CHANGED
@@ -8,22 +8,8 @@ Gem::Specification.new do |spec|
8
8
  spec.authors = ["Antar Azri", "0x1eef", "Christos Maris", "Rodrigo Serrano"]
9
9
  spec.email = ["azantar@proton.me", "0x1eef@hardenedbsd.org"]
10
10
 
11
- spec.summary = "Lightweight runtime for building capable AI systems in Ruby."
12
-
13
- spec.description = <<~DESCRIPTION
14
- llm.rb is a lightweight runtime for building capable AI systems in Ruby.
15
- It is not just an API wrapper. llm.rb gives you one runtime for providers,
16
- contexts, agents, tools, MCP servers, streaming, schemas, files, and
17
- persisted state, so real systems can be built out of one coherent
18
- execution model instead of a pile of adapters. It stays close to Ruby, runs
19
- on the standard library by default, loads optional pieces only when needed,
20
- includes built-in ActiveRecord support through acts_as_llm and
21
- acts_as_agent, includes built-in Sequel support through plugin :llm,
22
- and is designed for engineers who want control over long-lived,
23
- tool-capable, stateful AI workflows instead of just request/response
24
- helpers.
25
- DESCRIPTION
26
-
11
+ spec.summary = "Ruby's most capable AI runtime"
12
+ spec.description = spec.summary
27
13
  spec.license = "0BSD"
28
14
  spec.required_ruby_version = ">= 3.3.0"
29
15
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 8.1.0
4
+ version: 9.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -264,18 +264,7 @@ dependencies:
264
264
  - - "~>"
265
265
  - !ruby/object:Gem::Version
266
266
  version: '1.5'
267
- description: |
268
- llm.rb is a lightweight runtime for building capable AI systems in Ruby.
269
- It is not just an API wrapper. llm.rb gives you one runtime for providers,
270
- contexts, agents, tools, MCP servers, streaming, schemas, files, and
271
- persisted state, so real systems can be built out of one coherent
272
- execution model instead of a pile of adapters. It stays close to Ruby, runs
273
- on the standard library by default, loads optional pieces only when needed,
274
- includes built-in ActiveRecord support through acts_as_llm and
275
- acts_as_agent, includes built-in Sequel support through plugin :llm,
276
- and is designed for engineers who want control over long-lived,
277
- tool-capable, stateful AI workflows instead of just request/response
278
- helpers.
267
+ description: Ruby's most capable AI runtime
279
268
  email:
280
269
  - azantar@proton.me
281
270
  - 0x1eef@hardenedbsd.org
@@ -315,6 +304,7 @@ files:
315
304
  - lib/llm/file.rb
316
305
  - lib/llm/function.rb
317
306
  - lib/llm/function/array.rb
307
+ - lib/llm/function/call_group.rb
318
308
  - lib/llm/function/fiber_group.rb
319
309
  - lib/llm/function/fork.rb
320
310
  - lib/llm/function/fork/job.rb
@@ -352,10 +342,6 @@ files:
352
342
  - lib/llm/pipe.rb
353
343
  - lib/llm/prompt.rb
354
344
  - lib/llm/provider.rb
355
- - lib/llm/provider/transport/http.rb
356
- - lib/llm/provider/transport/http/execution.rb
357
- - lib/llm/provider/transport/http/interruptible.rb
358
- - lib/llm/provider/transport/http/stream_decoder.rb
359
345
  - lib/llm/providers/anthropic.rb
360
346
  - lib/llm/providers/anthropic/error_handler.rb
361
347
  - lib/llm/providers/anthropic/files.rb
@@ -473,8 +459,14 @@ files:
473
459
  - lib/llm/tracer/logger.rb
474
460
  - lib/llm/tracer/null.rb
475
461
  - lib/llm/tracer/telemetry.rb
462
+ - lib/llm/transport.rb
463
+ - lib/llm/transport/execution.rb
464
+ - lib/llm/transport/http.rb
465
+ - lib/llm/transport/persistent_http.rb
466
+ - lib/llm/transport/response.rb
467
+ - lib/llm/transport/response/http.rb
468
+ - lib/llm/transport/stream_decoder.rb
476
469
  - lib/llm/usage.rb
477
- - lib/llm/utils.rb
478
470
  - lib/llm/version.rb
479
471
  - lib/sequel/plugins/agent.rb
480
472
  - lib/sequel/plugins/llm.rb
@@ -503,5 +495,5 @@ required_rubygems_version: !ruby/object:Gem::Requirement
503
495
  requirements: []
504
496
  rubygems_version: 4.0.3
505
497
  specification_version: 4
506
- summary: Lightweight runtime for building capable AI systems in Ruby.
498
+ summary: Ruby's most capable AI runtime
507
499
  test_files: []