llm.rb 4.7.0 → 4.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +335 -587
  3. data/data/anthropic.json +770 -0
  4. data/data/deepseek.json +75 -0
  5. data/data/google.json +1050 -0
  6. data/data/openai.json +1421 -0
  7. data/data/xai.json +792 -0
  8. data/data/zai.json +330 -0
  9. data/lib/llm/agent.rb +42 -41
  10. data/lib/llm/bot.rb +1 -263
  11. data/lib/llm/buffer.rb +7 -0
  12. data/lib/llm/{session → context}/deserializer.rb +4 -3
  13. data/lib/llm/context.rb +292 -0
  14. data/lib/llm/cost.rb +26 -0
  15. data/lib/llm/error.rb +8 -0
  16. data/lib/llm/eventstream/parser.rb +0 -5
  17. data/lib/llm/function/array.rb +61 -0
  18. data/lib/llm/function/fiber_group.rb +91 -0
  19. data/lib/llm/function/task_group.rb +89 -0
  20. data/lib/llm/function/thread_group.rb +94 -0
  21. data/lib/llm/function.rb +75 -10
  22. data/lib/llm/mcp/command.rb +108 -0
  23. data/lib/llm/mcp/error.rb +31 -0
  24. data/lib/llm/mcp/pipe.rb +82 -0
  25. data/lib/llm/mcp/rpc.rb +118 -0
  26. data/lib/llm/mcp/transport/stdio.rb +85 -0
  27. data/lib/llm/mcp.rb +102 -0
  28. data/lib/llm/message.rb +13 -11
  29. data/lib/llm/model.rb +115 -0
  30. data/lib/llm/prompt.rb +17 -7
  31. data/lib/llm/provider.rb +60 -32
  32. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  33. data/lib/llm/providers/anthropic/files.rb +3 -3
  34. data/lib/llm/providers/anthropic/models.rb +1 -1
  35. data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
  36. data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
  37. data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
  38. data/lib/llm/providers/anthropic.rb +21 -5
  39. data/lib/llm/providers/deepseek.rb +10 -3
  40. data/lib/llm/providers/{gemini → google}/audio.rb +6 -6
  41. data/lib/llm/providers/{gemini → google}/error_handler.rb +20 -5
  42. data/lib/llm/providers/{gemini → google}/files.rb +11 -11
  43. data/lib/llm/providers/{gemini → google}/images.rb +7 -7
  44. data/lib/llm/providers/{gemini → google}/models.rb +5 -5
  45. data/lib/llm/providers/{gemini → google}/request_adapter/completion.rb +7 -3
  46. data/lib/llm/providers/{gemini → google}/request_adapter.rb +1 -1
  47. data/lib/llm/providers/{gemini → google}/response_adapter/completion.rb +7 -7
  48. data/lib/llm/providers/{gemini → google}/response_adapter/embedding.rb +1 -1
  49. data/lib/llm/providers/{gemini → google}/response_adapter/file.rb +1 -1
  50. data/lib/llm/providers/{gemini → google}/response_adapter/files.rb +1 -1
  51. data/lib/llm/providers/{gemini → google}/response_adapter/image.rb +1 -1
  52. data/lib/llm/providers/google/response_adapter/models.rb +13 -0
  53. data/lib/llm/providers/{gemini → google}/response_adapter/web_search.rb +2 -2
  54. data/lib/llm/providers/{gemini → google}/response_adapter.rb +8 -8
  55. data/lib/llm/providers/{gemini → google}/stream_parser.rb +3 -3
  56. data/lib/llm/providers/{gemini.rb → google.rb} +41 -26
  57. data/lib/llm/providers/llamacpp.rb +10 -3
  58. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  59. data/lib/llm/providers/ollama/models.rb +1 -1
  60. data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
  61. data/lib/llm/providers/ollama/response_adapter.rb +2 -0
  62. data/lib/llm/providers/ollama.rb +19 -4
  63. data/lib/llm/providers/openai/error_handler.rb +18 -3
  64. data/lib/llm/providers/openai/files.rb +3 -3
  65. data/lib/llm/providers/openai/images.rb +17 -11
  66. data/lib/llm/providers/openai/models.rb +1 -1
  67. data/lib/llm/providers/openai/response_adapter/completion.rb +9 -1
  68. data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
  69. data/lib/llm/providers/openai/response_adapter/responds.rb +9 -1
  70. data/lib/llm/providers/openai/response_adapter.rb +2 -0
  71. data/lib/llm/providers/openai/responses.rb +16 -1
  72. data/lib/llm/providers/openai/stream_parser.rb +2 -0
  73. data/lib/llm/providers/openai.rb +28 -6
  74. data/lib/llm/providers/xai/images.rb +7 -6
  75. data/lib/llm/providers/xai.rb +10 -3
  76. data/lib/llm/providers/zai.rb +9 -2
  77. data/lib/llm/registry.rb +81 -0
  78. data/lib/llm/schema/enum.rb +16 -0
  79. data/lib/llm/schema/parser.rb +109 -0
  80. data/lib/llm/schema.rb +5 -0
  81. data/lib/llm/server_tool.rb +5 -5
  82. data/lib/llm/session.rb +10 -1
  83. data/lib/llm/tool/param.rb +1 -1
  84. data/lib/llm/tool.rb +86 -5
  85. data/lib/llm/tracer/langsmith.rb +144 -0
  86. data/lib/llm/tracer/logger.rb +9 -1
  87. data/lib/llm/tracer/null.rb +8 -0
  88. data/lib/llm/tracer/telemetry.rb +98 -78
  89. data/lib/llm/tracer.rb +108 -4
  90. data/lib/llm/usage.rb +5 -0
  91. data/lib/llm/version.rb +1 -1
  92. data/lib/llm.rb +40 -6
  93. data/llm.gemspec +45 -8
  94. metadata +87 -28
  95. data/lib/llm/providers/gemini/response_adapter/models.rb +0 -15
@@ -41,7 +41,8 @@ class LLM::OpenAI
41
41
  role, stream = params.delete(:role), params.delete(:stream)
42
42
  params[:stream] = true if stream.respond_to?(:<<) || stream == true
43
43
  req = Net::HTTP::Post.new("/v1/responses", headers)
44
- messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
44
+ messages = build_complete_messages(prompt, params, role)
45
+ @provider.tracer.set_request_metadata(user_input: extract_user_input(messages, fallback: prompt))
45
46
  body = LLM.json.dump({input: [adapt(messages, mode: :response)].flatten}.merge!(params))
46
47
  set_body_stream(req, StringIO.new(body))
47
48
  res, span, tracer = execute(request: req, stream:, stream_parser:, operation: "chat", model: params[:model])
@@ -88,6 +89,14 @@ class LLM::OpenAI
88
89
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
89
90
  end
90
91
 
92
+ def build_complete_messages(prompt, params, role)
93
+ if LLM::Prompt === prompt
94
+ [*(params.delete(:input) || []), *prompt]
95
+ else
96
+ [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
97
+ end
98
+ end
99
+
91
100
  def adapt_schema(params)
92
101
  return {} unless params && params[:schema]
93
102
  schema = params.delete(:schema)
@@ -99,5 +108,11 @@ class LLM::OpenAI
99
108
  def stream_parser
100
109
  LLM::OpenAI::Responses::StreamParser
101
110
  end
111
+
112
+ def extract_user_input(messages, fallback:)
113
+ message = messages.reverse.find(&:user?) || messages.last
114
+ value = message&.content || fallback
115
+ value.is_a?(String) ? value : LLM.json.dump(value)
116
+ end
102
117
  end
103
118
  end
@@ -43,6 +43,7 @@ class LLM::OpenAI
43
43
  target_message = @body["choices"][index]["message"]
44
44
  delta = choice["delta"] || {}
45
45
  delta.each do |key, value|
46
+ next if value.nil?
46
47
  if key == "content"
47
48
  target_message[key] ||= +""
48
49
  target_message[key] << value
@@ -57,6 +58,7 @@ class LLM::OpenAI
57
58
  message_hash = {"role" => "assistant"}
58
59
  @body["choices"][index] = {"message" => message_hash}
59
60
  (choice["delta"] || {}).each do |key, value|
61
+ next if value.nil?
60
62
  if key == "content"
61
63
  @io << value if @io.respond_to?(:<<)
62
64
  message_hash[key] = value
@@ -10,9 +10,9 @@ module LLM
10
10
  # require "llm"
11
11
  #
12
12
  # llm = LLM.openai(key: ENV["KEY"])
13
- # ses = LLM::Session.new(llm)
14
- # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
15
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
13
+ # ctx = LLM::Context.new(llm)
14
+ # ctx.talk ["Tell me about this photo", ctx.local_file("/images/photo.png")]
15
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
16
16
  class OpenAI < Provider
17
17
  require_relative "openai/error_handler"
18
18
  require_relative "openai/request_adapter"
@@ -36,6 +36,13 @@ module LLM
36
36
  super(host: HOST, **)
37
37
  end
38
38
 
39
+ ##
40
+ # @return [Symbol]
41
+ # Returns the provider's name
42
+ def name
43
+ :openai
44
+ end
45
+
39
46
  ##
40
47
  # Provides an embedding
41
48
  # @see https://platform.openai.com/docs/api-reference/embeddings/create OpenAI docs
@@ -65,7 +72,8 @@ module LLM
65
72
  # @return (see LLM::Provider#complete)
66
73
  def complete(prompt, params = {})
67
74
  params, stream, tools, role = normalize_complete_params(params)
68
- req = build_complete_request(prompt, params, role)
75
+ req, messages = build_complete_request(prompt, params, role)
76
+ tracer.set_request_metadata(user_input: extract_user_input(messages, fallback: prompt))
69
77
  res, span, tracer = execute(request: req, stream: stream, operation: "chat", model: params[:model])
70
78
  res = ResponseAdapter.adapt(res, type: :completion)
71
79
  .extend(Module.new { define_method(:__tools__) { tools } })
@@ -212,11 +220,25 @@ module LLM
212
220
  end
213
221
 
214
222
  def build_complete_request(prompt, params, role)
215
- messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
223
+ messages = build_complete_messages(prompt, params, role)
216
224
  body = LLM.json.dump({messages: adapt(messages, mode: :complete).flatten}.merge!(params))
217
225
  req = Net::HTTP::Post.new(completions_path, headers)
218
226
  set_body_stream(req, StringIO.new(body))
219
- req
227
+ [req, messages]
228
+ end
229
+
230
+ def build_complete_messages(prompt, params, role)
231
+ if LLM::Prompt === prompt
232
+ [*(params.delete(:messages) || []), *prompt]
233
+ else
234
+ [*(params.delete(:messages) || []), Message.new(role, prompt)]
235
+ end
236
+ end
237
+
238
+ def extract_user_input(messages, fallback:)
239
+ message = messages.reverse.find(&:user?) || messages.last
240
+ value = message&.content || fallback
241
+ value.is_a?(String) ? value : LLM.json.dump(value)
220
242
  end
221
243
  end
222
244
  end
@@ -5,7 +5,7 @@ class LLM::XAI
5
5
  # The {LLM::XAI::Images LLM::XAI::Images} class provides an interface
6
6
  # for [xAI's images API](https://docs.x.ai/docs/guides/image-generations).
7
7
  # xAI supports multiple response formats: temporary URLs, or binary strings
8
- # encoded in base64. The default is to return temporary URLs.
8
+ # encoded in base64. The default is to return base64-encoded image data.
9
9
  #
10
10
  # @example Temporary URLs
11
11
  # #!/usr/bin/env ruby
@@ -14,7 +14,8 @@ class LLM::XAI
14
14
  # require "fileutils"
15
15
  #
16
16
  # llm = LLM.xai(key: ENV["KEY"])
17
- # res = llm.images.create prompt: "A dog on a rocket to the moon"
17
+ # res = llm.images.create prompt: "A dog on a rocket to the moon",
18
+ # response_format: "url"
18
19
  # FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
19
20
  # "rocket.png"
20
21
  #
@@ -32,26 +33,26 @@ class LLM::XAI
32
33
  # @example
33
34
  # llm = LLM.xai(key: ENV["KEY"])
34
35
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
35
- # res.urls.each { print _1, "\n"}
36
+ # IO.copy_stream res.images[0], "rocket.png"
36
37
  # @see https://docs.x.ai/docs/guides/image-generations xAI docs
37
38
  # @param [String] prompt The prompt
38
39
  # @param [String] model The model to use
39
40
  # @param [Hash] params Other parameters (see xAI docs)
40
41
  # @raise (see LLM::Provider#request)
41
42
  # @return [LLM::Response]
42
- def create(prompt:, model: "grok-2-image-1212", **params)
43
+ def create(prompt:, model: "grok-imagine-image", **params)
43
44
  super
44
45
  end
45
46
 
46
47
  ##
47
48
  # @raise [NotImplementedError]
48
- def edit(model: "grok-2-image-1212", **)
49
+ def edit(model: "grok-imagine-image", **)
49
50
  raise NotImplementedError
50
51
  end
51
52
 
52
53
  ##
53
54
  # @raise [NotImplementedError]
54
- def create_variation(model: "grok-2-image-1212", **)
55
+ def create_variation(model: "grok-imagine-image", **)
55
56
  raise NotImplementedError
56
57
  end
57
58
  end
@@ -11,9 +11,9 @@ module LLM
11
11
  # require "llm"
12
12
  #
13
13
  # llm = LLM.xai(key: ENV["KEY"])
14
- # ses = LLM::Session.new(llm)
15
- # ses.talk ["Tell me about this photo", ses.local_file("/images/photo.png")]
16
- # ses.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
14
+ # ctx = LLM::Context.new(llm)
15
+ # ctx.talk ["Tell me about this photo", ctx.local_file("/images/photo.png")]
16
+ # ctx.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
17
17
  class XAI < OpenAI
18
18
  require_relative "xai/images"
19
19
 
@@ -25,6 +25,13 @@ module LLM
25
25
  super
26
26
  end
27
27
 
28
+ ##
29
+ # @return [Symbol]
30
+ # Returns the provider's name
31
+ def name
32
+ :xai
33
+ end
34
+
28
35
  ##
29
36
  # @raise [NotImplementedError]
30
37
  def files
@@ -11,8 +11,8 @@ module LLM
11
11
  # require "llm"
12
12
  #
13
13
  # llm = LLM.zai(key: ENV["KEY"])
14
- # ses = LLM::Session.new(llm, stream: $stdout)
15
- # ses.talk "Hello"
14
+ # ctx = LLM::Context.new(llm, stream: $stdout)
15
+ # ctx.talk "Hello"
16
16
  class ZAI < OpenAI
17
17
  ##
18
18
  # @param [String] host A regional host or the default ("api.z.ai")
@@ -21,6 +21,13 @@ module LLM
21
21
  super
22
22
  end
23
23
 
24
+ ##
25
+ # @return [Symbol]
26
+ # Returns the provider's name
27
+ def name
28
+ :zai
29
+ end
30
+
24
31
  ##
25
32
  # @raise [NotImplementedError]
26
33
  def files
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::Registry LLM::Registry} class provides a small API over
5
+ # provider model data. It exposes model metadata such as pricing,
6
+ # capabilities, modalities, and limits from the registry files
7
+ # stored under `data/`. The data is provided by https://models.dev
8
+ # and shipped with llm.rb.
9
+ class LLM::Registry
10
+ @root = File.join(__dir__, "..", "..")
11
+
12
+ ##
13
+ # @raise [LLM::Error]
14
+ # Might raise an error
15
+ # @param [Symbol]
16
+ # A provider name
17
+ # @return [LLM::Registry]
18
+ def self.for(name)
19
+ path = File.join @root, "data", "#{name}.json"
20
+ if File.file?(path)
21
+ new LLM.json.load(File.binread(path))
22
+ else
23
+ raise LLM::NoSuchRegistryError, "no registry found for #{name}"
24
+ end
25
+ end
26
+
27
+ ##
28
+ # @param [Hash] blob
29
+ # A model registry
30
+ # @return [LLM::Registry]
31
+ def initialize(blob)
32
+ @registry = LLM::Object.from(blob)
33
+ @models = @registry.models
34
+ end
35
+
36
+ ##
37
+ # @return [LLM::Object]
38
+ # Returns model costs
39
+ def cost(model:)
40
+ lookup(model:).cost
41
+ end
42
+
43
+ ##
44
+ # @return [LLM::Object]
45
+ # Returns model modalities
46
+ def modalities(model:)
47
+ lookup(model:).modalities
48
+ end
49
+
50
+ ##
51
+ # @return [LLM::Object]
52
+ # Returns model limits such as the context window size
53
+ def limit(model:)
54
+ lookup(model:).limit
55
+ end
56
+
57
+ private
58
+
59
+ def lookup(model:)
60
+ if @models.key?(model)
61
+ @models[model]
62
+ else
63
+ patterns = {/-\d{4}-\d{2}-\d{2}$/ => "", /\A(gpt-.*)-\d{4}$/ => "\\1"}
64
+ fallback = find_map(patterns) { model.dup.sub!(_1, _2) } || "none"
65
+ if @models.key?(fallback)
66
+ @models[fallback]
67
+ else
68
+ raise LLM::NoSuchModelError, "no such model: #{model} (fallback: #{fallback})"
69
+ end
70
+ end
71
+ end
72
+
73
+ ##
74
+ # Similar to #{find} but returns the block's return value
75
+ # @return [Object, nil]
76
+ def find_map(pair)
77
+ result = nil
78
+ pair.each_pair { break if result = yield(_1, _2) }
79
+ result
80
+ end
81
+ end
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Schema
4
+ ##
5
+ # The {LLM::Schema::Enum LLM::Schema::Enum} class represents a
6
+ # string value constrained to one of a fixed set of values.
7
+ class Enum
8
+ ##
9
+ # Returns a string leaf constrained to the given values
10
+ # @param [Array<String>] values
11
+ # @return [LLM::Schema::String]
12
+ def self.[](*values)
13
+ LLM::Schema::String.new.enum(*values)
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Schema
4
+ ##
5
+ # The {LLM::Schema::Parser LLM::Schema::Parser} module provides
6
+ # methods for parsing a JSON schema into {LLM::Schema::Leaf}
7
+ # objects. It is used by {LLM::Schema LLM::Schema} to convert
8
+ # external JSON schema definitions into the schema objects used
9
+ # throughout llm.rb.
10
+ module Parser
11
+ ##
12
+ # Parses a JSON schema into an {LLM::Schema::Leaf}.
13
+ # @param [Hash] schema
14
+ # The JSON schema to parse
15
+ # @raise [TypeError]
16
+ # When the schema is not supported
17
+ # @return [LLM::Schema::Leaf]
18
+ def parse(schema, root = nil)
19
+ schema = normalize_schema(schema)
20
+ root ||= schema
21
+ schema = resolve_ref(schema, root)
22
+ case schema["type"]
23
+ when "object" then apply(parse_object(schema, root), schema)
24
+ when "array" then apply(parse_array(schema, root), schema)
25
+ when "string" then apply(parse_string(schema), schema)
26
+ when "integer" then apply(parse_integer(schema), schema)
27
+ when "number" then apply(parse_number(schema), schema)
28
+ when "boolean" then apply(schema().boolean, schema)
29
+ when "null" then apply(schema().null, schema)
30
+ else raise TypeError, "unsupported schema type #{schema["type"].inspect}"
31
+ end
32
+ end
33
+
34
+ private
35
+
36
+ def parse_object(schema, root)
37
+ properties = (schema["properties"] || {})
38
+ .transform_keys(&:to_s)
39
+ .transform_values { parse(_1, root) }
40
+ required = schema["required"] || []
41
+ required.each do |key|
42
+ next unless properties[key]
43
+ properties[key].required
44
+ end
45
+ schema().object(properties)
46
+ end
47
+
48
+ def parse_array(schema, root)
49
+ items = schema["items"] ? parse(schema["items"], root) : schema().null
50
+ schema().array(items)
51
+ end
52
+
53
+ def parse_string(schema)
54
+ leaf = schema().string
55
+ leaf.min(schema["minLength"]) if schema.key?("minLength")
56
+ leaf.max(schema["maxLength"]) if schema.key?("maxLength")
57
+ leaf
58
+ end
59
+
60
+ def parse_integer(schema)
61
+ leaf = schema().integer
62
+ leaf.min(schema["minimum"]) if schema.key?("minimum")
63
+ leaf.max(schema["maximum"]) if schema.key?("maximum")
64
+ leaf.multiple_of(schema["multipleOf"]) if schema.key?("multipleOf")
65
+ leaf
66
+ end
67
+
68
+ def parse_number(schema)
69
+ leaf = schema().number
70
+ leaf.min(schema["minimum"]) if schema.key?("minimum")
71
+ leaf.max(schema["maximum"]) if schema.key?("maximum")
72
+ leaf.multiple_of(schema["multipleOf"]) if schema.key?("multipleOf")
73
+ leaf
74
+ end
75
+
76
+ def apply(leaf, schema)
77
+ leaf.description(schema["description"]) if schema.key?("description")
78
+ leaf.default(schema["default"]) if schema.key?("default")
79
+ leaf.enum(*schema["enum"]) if schema.key?("enum")
80
+ leaf.const(schema["const"]) if schema.key?("const")
81
+ leaf
82
+ end
83
+
84
+ def normalize_schema(schema)
85
+ case schema
86
+ when LLM::Object
87
+ normalize_schema(schema.to_h)
88
+ when Hash
89
+ schema.each_with_object({}) do |(key, value), out|
90
+ out[key.to_s] = normalize_schema(value)
91
+ end
92
+ when Array
93
+ schema.map { normalize_schema(_1) }
94
+ else
95
+ schema
96
+ end
97
+ end
98
+
99
+ def resolve_ref(schema, root)
100
+ return schema unless schema.key?("$ref")
101
+ ref = schema["$ref"]
102
+ raise TypeError, "unsupported schema ref #{ref.inspect}" unless ref.start_with?("#/")
103
+ target = ref.delete_prefix("#/").split("/").reduce(root) { |node, key| node.fetch(key) }
104
+ normalize_schema(target).merge(schema.except("$ref"))
105
+ rescue KeyError
106
+ raise TypeError, "unresolvable schema ref #{ref.inspect}"
107
+ end
108
+ end
109
+ end
data/lib/llm/schema.rb CHANGED
@@ -31,15 +31,20 @@
31
31
  # end
32
32
  class LLM::Schema
33
33
  require_relative "schema/version"
34
+ require_relative "schema/parser"
34
35
  require_relative "schema/leaf"
35
36
  require_relative "schema/object"
36
37
  require_relative "schema/array"
37
38
  require_relative "schema/string"
39
+ require_relative "schema/enum"
38
40
  require_relative "schema/number"
39
41
  require_relative "schema/integer"
40
42
  require_relative "schema/boolean"
41
43
  require_relative "schema/null"
42
44
 
45
+ @__monitor = Monitor.new
46
+ extend LLM::Schema::Parser
47
+
43
48
  ##
44
49
  # Configures a monitor for a subclass
45
50
  # @return [void]
@@ -8,10 +8,10 @@
8
8
  #
9
9
  # @example
10
10
  # #!/usr/bin/env ruby
11
- # llm = LLM.gemini ENV["KEY"]
12
- # ses = LLM::Session.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
- # ses.talk("Summarize today's news", role: :user)
14
- # print ses.messages.find(&:assistant?).content, "\n"
11
+ # llm = LLM.google ENV["KEY"]
12
+ # ctx = LLM::Context.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
+ # ctx.talk("Summarize today's news", role: :user)
14
+ # print ctx.messages.find(&:assistant?).content, "\n"
15
15
  class LLM::ServerTool < Struct.new(:name, :options, :provider)
16
16
  ##
17
17
  # @return [String]
@@ -24,7 +24,7 @@ class LLM::ServerTool < Struct.new(:name, :options, :provider)
24
24
  def to_h
25
25
  case provider.class.to_s
26
26
  when "LLM::Anthropic" then options.merge("name" => name.to_s)
27
- when "LLM::Gemini" then {name => options}
27
+ when "LLM::Google" then {name => options}
28
28
  else options.merge("type" => name.to_s)
29
29
  end
30
30
  end
data/lib/llm/session.rb CHANGED
@@ -1,3 +1,12 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require_relative "bot"
3
+ require_relative "context"
4
+
5
+ module LLM
6
+ # Backward-compatible alias for LLM::Context
7
+ # @deprecated Use {LLM::Context} instead. Scheduled for removal in v6.0.
8
+ Session = Context
9
+
10
+ # Scheduled for removal in v6.0
11
+ deprecate_constant :Session
12
+ end
@@ -65,7 +65,7 @@ class LLM::Tool
65
65
  leaf.required if required
66
66
  leaf.description(description) if description
67
67
  leaf.default(default) if default
68
- leaf.enum(enum) if enum
68
+ leaf.enum(*enum) if enum
69
69
  leaf
70
70
  end
71
71
  end
data/lib/llm/tool.rb CHANGED
@@ -22,7 +22,7 @@ class LLM::Tool
22
22
  extend LLM::Tool::Param
23
23
 
24
24
  types = [
25
- :Leaf, :String, :Array,
25
+ :Leaf, :String, :Enum, :Array,
26
26
  :Object, :Integer, :Number,
27
27
  :Boolean, :Null
28
28
  ]
@@ -30,14 +30,87 @@ class LLM::Tool
30
30
  const_set constant, LLM::Schema.const_get(constant)
31
31
  end
32
32
 
33
+ ##
34
+ # @param [LLM::MCP] mcp
35
+ # The MCP client that will execute the tool call
36
+ # @param [Hash] tool
37
+ # A tool (as a raw Hash)
38
+ # @return [Class<LLM::Tool>]
39
+ # Returns a subclass of LLM::Tool
40
+ def self.mcp(mcp, tool)
41
+ klass = Class.new(LLM::Tool) do
42
+ name tool["name"]
43
+ description tool["description"]
44
+ params { tool["inputSchema"] || {type: "object", properties: {}} }
45
+
46
+ define_singleton_method(:inspect) do
47
+ "<LLM::Tool:0x#{object_id.to_s(16)} name=#{tool["name"]} (mcp)>"
48
+ end
49
+ singleton_class.alias_method :to_s, :inspect
50
+
51
+ define_singleton_method(:mcp?) do
52
+ true
53
+ end
54
+
55
+ define_method(:call) do |**args|
56
+ mcp.call_tool(tool["name"], args)
57
+ end
58
+ end
59
+ unregister(klass)
60
+ end
61
+
62
+ ##
63
+ # Returns all subclasses of LLM::Tool
64
+ # @note
65
+ # This method excludes tools who haven't defined a name
66
+ # as well as tools defined via MCP.
67
+ # @return [Array<LLM::Tool>]
68
+ def self.registry
69
+ lock do
70
+ @registry.select(&:name)
71
+ end
72
+ end
73
+ @registry = []
74
+
75
+ ##
76
+ # Clear the registry
77
+ # @return [void]
78
+ def self.clear_registry!
79
+ lock do
80
+ @registry.clear
81
+ nil
82
+ end
83
+ end
84
+
85
+ ##
86
+ # Register a tool in the registry
87
+ # @param [LLM::Tool] tool
88
+ # @api private
89
+ def self.register(tool)
90
+ lock do
91
+ @registry << tool
92
+ end
93
+ end
94
+
95
+ ##
96
+ # Unregister a tool from the registry
97
+ # @param [LLM::Tool] tool
98
+ # @api private
99
+ def self.unregister(tool)
100
+ lock do
101
+ @registry.delete(tool)
102
+ end
103
+ end
104
+
33
105
  ##
34
106
  # Registers the tool as a function when inherited
35
107
  # @param [Class] klass The subclass
36
108
  # @return [void]
37
- def self.inherited(klass)
109
+ def self.inherited(tool)
38
110
  LLM.lock(:inherited) do
39
- klass.instance_eval { @__monitor ||= Monitor.new }
40
- klass.function.register(klass)
111
+ tool.instance_eval { @__monitor ||= Monitor.new }
112
+ tool.function.register(tool)
113
+ LLM::Tool.register(tool)
41
114
  end
42
115
  end
43
116
 
@@ -75,7 +148,7 @@ class LLM::Tool
75
148
  # @api private
76
149
  def self.function
77
150
  lock do
78
- @function ||= LLM::Function.new(self)
151
+ @function ||= LLM::Function.new(nil)
79
152
  end
80
153
  end
81
154
 
@@ -84,4 +157,12 @@ class LLM::Tool
84
157
  def self.lock(&)
85
158
  @__monitor.synchronize(&)
86
159
  end
160
+ @__monitor = Monitor.new
161
+
162
+ ##
163
+ # Returns true if the tool is an MCP tool
164
+ # @return [Boolean]
165
+ def self.mcp?
166
+ false
167
+ end
87
168
  end