anthropic 1.13.0 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +19 -0
  3. data/README.md +31 -1
  4. data/lib/anthropic/helpers/input_schema/base_model.rb +6 -3
  5. data/lib/anthropic/helpers/input_schema/json_schema_converter.rb +9 -3
  6. data/lib/anthropic/helpers/input_schema/supported_schemas.rb +106 -0
  7. data/lib/anthropic/helpers/input_schema/union_of.rb +3 -1
  8. data/lib/anthropic/helpers/messages.rb +107 -0
  9. data/lib/anthropic/helpers/streaming/message_stream.rb +54 -43
  10. data/lib/anthropic/helpers/tools/base_tool.rb +82 -0
  11. data/lib/anthropic/helpers/tools/runner.rb +156 -0
  12. data/lib/anthropic/helpers/tools.rb +5 -0
  13. data/lib/anthropic/internal/transport/base_client.rb +7 -1
  14. data/lib/anthropic/internal/transport/pooled_net_requester.rb +6 -2
  15. data/lib/anthropic/models/beta/beta_tool_use_block.rb +14 -0
  16. data/lib/anthropic/models/tool_use_block.rb +6 -6
  17. data/lib/anthropic/resources/beta/messages.rb +23 -5
  18. data/lib/anthropic/resources/messages.rb +7 -81
  19. data/lib/anthropic/version.rb +1 -1
  20. data/lib/anthropic.rb +15 -10
  21. data/manifest.yaml +1 -0
  22. data/rbi/anthropic/helpers/input_schema/base_model.rbi +7 -2
  23. data/rbi/anthropic/helpers/tools/base_tool.rbi +51 -0
  24. data/rbi/anthropic/helpers/tools/runner.rbi +40 -0
  25. data/rbi/anthropic/helpers/tools.rbi +5 -0
  26. data/rbi/anthropic/internal/transport/base_client.rbi +5 -0
  27. data/rbi/anthropic/internal/transport/pooled_net_requester.rbi +6 -2
  28. data/rbi/anthropic/internal/type/base_model.rbi +8 -4
  29. data/rbi/anthropic/models/tool_use_block.rbi +3 -0
  30. data/rbi/anthropic/resources/beta/messages.rbi +296 -0
  31. data/sig/anthropic/internal/transport/base_client.rbs +2 -0
  32. data/sig/anthropic/internal/transport/pooled_net_requester.rbs +4 -1
  33. metadata +11 -4
  34. data/lib/anthropic/helpers/input_schema/property_mapping.rb +0 -47
  35. /data/rbi/anthropic/helpers/{structured_output.rbi → input_schema.rbi} +0 -0
@@ -31,7 +31,7 @@ module Anthropic
31
31
  #
32
32
  # Assumes superclass fields are totally defined before fields are accessed /
33
33
  # defined on subclasses.
34
- sig { params(child: T.self_type).void }
34
+ sig { params(child: Anthropic::Internal::Type::BaseModel).void }
35
35
  def inherited(child)
36
36
  end
37
37
 
@@ -276,9 +276,13 @@ module Anthropic
276
276
 
277
277
  # Create a new instance of a model.
278
278
  sig do
279
- params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns(
280
- T.attached_class
281
- )
279
+ params(
280
+ data:
281
+ T.any(
282
+ T::Hash[Symbol, T.anything],
283
+ Anthropic::Internal::Type::BaseModel
284
+ )
285
+ ).returns(T.attached_class)
282
286
  end
283
287
  def self.new(data = {})
284
288
  end
@@ -20,6 +20,9 @@ module Anthropic
20
20
  sig { returns(Symbol) }
21
21
  attr_accessor :type
22
22
 
23
+ sig { returns(T.nilable(T.anything)) }
24
+ attr_accessor :parsed
25
+
23
26
  sig do
24
27
  params(
25
28
  id: String,
@@ -7,6 +7,302 @@ module Anthropic
7
7
  sig { returns(Anthropic::Resources::Beta::Messages::Batches) }
8
8
  attr_reader :batches
9
9
 
10
+ sig do
11
+ params(
12
+ max_tokens: Integer,
13
+ messages: T::Array[Anthropic::Beta::BetaMessageParam::OrHash],
14
+ model: T.any(Anthropic::Model::OrSymbol, String),
15
+ container:
16
+ T.nilable(
17
+ T.any(Anthropic::Beta::BetaContainerParams::OrHash, String)
18
+ ),
19
+ context_management:
20
+ T.nilable(Anthropic::Beta::BetaContextManagementConfig::OrHash),
21
+ mcp_servers:
22
+ T::Array[
23
+ Anthropic::Beta::BetaRequestMCPServerURLDefinition::OrHash
24
+ ],
25
+ metadata: Anthropic::Beta::BetaMetadata::OrHash,
26
+ service_tier:
27
+ Anthropic::Beta::MessageCreateParams::ServiceTier::OrSymbol,
28
+ stop_sequences: T::Array[String],
29
+ system_: Anthropic::Beta::MessageCreateParams::System::Variants,
30
+ temperature: Float,
31
+ thinking:
32
+ T.any(
33
+ Anthropic::Beta::BetaThinkingConfigEnabled::OrHash,
34
+ Anthropic::Beta::BetaThinkingConfigDisabled::OrHash
35
+ ),
36
+ tool_choice:
37
+ T.any(
38
+ Anthropic::Beta::BetaToolChoiceAuto::OrHash,
39
+ Anthropic::Beta::BetaToolChoiceAny::OrHash,
40
+ Anthropic::Beta::BetaToolChoiceTool::OrHash,
41
+ Anthropic::Beta::BetaToolChoiceNone::OrHash
42
+ ),
43
+ tools:
44
+ T::Array[
45
+ T.any(
46
+ Anthropic::Beta::BetaTool::OrHash,
47
+ Anthropic::Beta::BetaToolBash20241022::OrHash,
48
+ Anthropic::Beta::BetaToolBash20250124::OrHash,
49
+ Anthropic::Beta::BetaCodeExecutionTool20250522::OrHash,
50
+ Anthropic::Beta::BetaCodeExecutionTool20250825::OrHash,
51
+ Anthropic::Beta::BetaToolComputerUse20241022::OrHash,
52
+ Anthropic::Beta::BetaMemoryTool20250818::OrHash,
53
+ Anthropic::Beta::BetaToolComputerUse20250124::OrHash,
54
+ Anthropic::Beta::BetaToolTextEditor20241022::OrHash,
55
+ Anthropic::Beta::BetaToolTextEditor20250124::OrHash,
56
+ Anthropic::Beta::BetaToolTextEditor20250429::OrHash,
57
+ Anthropic::Beta::BetaToolTextEditor20250728::OrHash,
58
+ Anthropic::Beta::BetaWebSearchTool20250305::OrHash,
59
+ Anthropic::Beta::BetaWebFetchTool20250910::OrHash
60
+ )
61
+ ],
62
+ top_k: Integer,
63
+ top_p: Float,
64
+ betas: T::Array[T.any(String, Anthropic::AnthropicBeta::OrSymbol)],
65
+ stream: T.noreturn,
66
+ request_options: Anthropic::RequestOptions::OrHash
67
+ ).returns(Anthropic::Helpers::Tools::Runner)
68
+ end
69
+ def tool_runner(
70
+ # Body param: The maximum number of tokens to generate before stopping.
71
+ #
72
+ # Note that our models may stop _before_ reaching this maximum. This parameter
73
+ # only specifies the absolute maximum number of tokens to generate.
74
+ #
75
+ # Different models have different maximum values for this parameter. See
76
+ # [models](https://docs.claude.com/en/docs/models-overview) for details.
77
+ max_tokens:,
78
+ # Body param: Input messages.
79
+ #
80
+ # Our models are trained to operate on alternating `user` and `assistant`
81
+ # conversational turns. When creating a new `Message`, you specify the prior
82
+ # conversational turns with the `messages` parameter, and the model then generates
83
+ # the next `Message` in the conversation. Consecutive `user` or `assistant` turns
84
+ # in your request will be combined into a single turn.
85
+ #
86
+ # Each input message must be an object with a `role` and `content`. You can
87
+ # specify a single `user`-role message, or you can include multiple `user` and
88
+ # `assistant` messages.
89
+ #
90
+ # If the final message uses the `assistant` role, the response content will
91
+ # continue immediately from the content in that message. This can be used to
92
+ # constrain part of the model's response.
93
+ #
94
+ # Example with a single `user` message:
95
+ #
96
+ # ```json
97
+ # [{ "role": "user", "content": "Hello, Claude" }]
98
+ # ```
99
+ #
100
+ # Example with multiple conversational turns:
101
+ #
102
+ # ```json
103
+ # [
104
+ # { "role": "user", "content": "Hello there." },
105
+ # { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
106
+ # { "role": "user", "content": "Can you explain LLMs in plain English?" }
107
+ # ]
108
+ # ```
109
+ #
110
+ # Example with a partially-filled response from Claude:
111
+ #
112
+ # ```json
113
+ # [
114
+ # {
115
+ # "role": "user",
116
+ # "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
117
+ # },
118
+ # { "role": "assistant", "content": "The best answer is (" }
119
+ # ]
120
+ # ```
121
+ #
122
+ # Each input message `content` may be either a single `string` or an array of
123
+ # content blocks, where each block has a specific `type`. Using a `string` for
124
+ # `content` is shorthand for an array of one content block of type `"text"`. The
125
+ # following input messages are equivalent:
126
+ #
127
+ # ```json
128
+ # { "role": "user", "content": "Hello, Claude" }
129
+ # ```
130
+ #
131
+ # ```json
132
+ # { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
133
+ # ```
134
+ #
135
+ # See [input examples](https://docs.claude.com/en/api/messages-examples).
136
+ #
137
+ # Note that if you want to include a
138
+ # [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
139
+ # top-level `system` parameter — there is no `"system"` role for input messages in
140
+ # the Messages API.
141
+ #
142
+ # There is a limit of 100,000 messages in a single request.
143
+ messages:,
144
+ # Body param: The model that will complete your prompt.\n\nSee
145
+ # [models](https://docs.anthropic.com/en/docs/models-overview) for additional
146
+ # details and options.
147
+ model:,
148
+ # Body param: Container identifier for reuse across requests.
149
+ container: nil,
150
+ # Body param: Context management configuration.
151
+ #
152
+ # This allows you to control how Claude manages context across multiple requests,
153
+ # such as whether to clear function results or not.
154
+ context_management: nil,
155
+ # Body param: MCP servers to be utilized in this request
156
+ mcp_servers: nil,
157
+ # Body param: An object describing metadata about the request.
158
+ metadata: nil,
159
+ # Body param: Determines whether to use priority capacity (if available) or
160
+ # standard capacity for this request.
161
+ #
162
+ # Anthropic offers different levels of service for your API requests. See
163
+ # [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
164
+ service_tier: nil,
165
+ # Body param: Custom text sequences that will cause the model to stop generating.
166
+ #
167
+ # Our models will normally stop when they have naturally completed their turn,
168
+ # which will result in a response `stop_reason` of `"end_turn"`.
169
+ #
170
+ # If you want the model to stop generating when it encounters custom strings of
171
+ # text, you can use the `stop_sequences` parameter. If the model encounters one of
172
+ # the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
173
+ # and the response `stop_sequence` value will contain the matched stop sequence.
174
+ stop_sequences: nil,
175
+ # Body param: System prompt.
176
+ #
177
+ # A system prompt is a way of providing context and instructions to Claude, such
178
+ # as specifying a particular goal or role. See our
179
+ # [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
180
+ system_: nil,
181
+ # Body param: Amount of randomness injected into the response.
182
+ #
183
+ # Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
184
+ # for analytical / multiple choice, and closer to `1.0` for creative and
185
+ # generative tasks.
186
+ #
187
+ # Note that even with `temperature` of `0.0`, the results will not be fully
188
+ # deterministic.
189
+ temperature: nil,
190
+ # Body param: Configuration for enabling Claude's extended thinking.
191
+ #
192
+ # When enabled, responses include `thinking` content blocks showing Claude's
193
+ # thinking process before the final answer. Requires a minimum budget of 1,024
194
+ # tokens and counts towards your `max_tokens` limit.
195
+ #
196
+ # See
197
+ # [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
198
+ # for details.
199
+ thinking: nil,
200
+ # Body param: How the model should use the provided tools. The model can use a
201
+ # specific tool, any available tool, decide by itself, or not use tools at all.
202
+ tool_choice: nil,
203
+ # Body param: Definitions of tools that the model may use.
204
+ #
205
+ # If you include `tools` in your API request, the model may return `tool_use`
206
+ # content blocks that represent the model's use of those tools. You can then run
207
+ # those tools using the tool input generated by the model and then optionally
208
+ # return results back to the model using `tool_result` content blocks.
209
+ #
210
+ # There are two types of tools: **client tools** and **server tools**. The
211
+ # behavior described below applies to client tools. For
212
+ # [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
213
+ # see their individual documentation as each has its own behavior (e.g., the
214
+ # [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
215
+ #
216
+ # Each tool definition includes:
217
+ #
218
+ # - `name`: Name of the tool.
219
+ # - `description`: Optional, but strongly-recommended description of the tool.
220
+ # - `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
221
+ # tool `input` shape that the model will produce in `tool_use` output content
222
+ # blocks.
223
+ #
224
+ # For example, if you defined `tools` as:
225
+ #
226
+ # ```json
227
+ # [
228
+ # {
229
+ # "name": "get_stock_price",
230
+ # "description": "Get the current stock price for a given ticker symbol.",
231
+ # "input_schema": {
232
+ # "type": "object",
233
+ # "properties": {
234
+ # "ticker": {
235
+ # "type": "string",
236
+ # "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
237
+ # }
238
+ # },
239
+ # "required": ["ticker"]
240
+ # }
241
+ # }
242
+ # ]
243
+ # ```
244
+ #
245
+ # And then asked the model "What's the S&P 500 at today?", the model might produce
246
+ # `tool_use` content blocks in the response like this:
247
+ #
248
+ # ```json
249
+ # [
250
+ # {
251
+ # "type": "tool_use",
252
+ # "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
253
+ # "name": "get_stock_price",
254
+ # "input": { "ticker": "^GSPC" }
255
+ # }
256
+ # ]
257
+ # ```
258
+ #
259
+ # You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
260
+ # input, and return the following back to the model in a subsequent `user`
261
+ # message:
262
+ #
263
+ # ```json
264
+ # [
265
+ # {
266
+ # "type": "tool_result",
267
+ # "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
268
+ # "content": "259.75 USD"
269
+ # }
270
+ # ]
271
+ # ```
272
+ #
273
+ # Tools can be used for workflows that include running client-side tools and
274
+ # functions, or more generally whenever you want the model to produce a particular
275
+ # JSON structure of output.
276
+ #
277
+ # See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
278
+ tools: nil,
279
+ # Body param: Only sample from the top K options for each subsequent token.
280
+ #
281
+ # Used to remove "long tail" low probability responses.
282
+ # [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
283
+ #
284
+ # Recommended for advanced use cases only. You usually only need to use
285
+ # `temperature`.
286
+ top_k: nil,
287
+ # Body param: Use nucleus sampling.
288
+ #
289
+ # In nucleus sampling, we compute the cumulative distribution over all the options
290
+ # for each subsequent token in decreasing probability order and cut it off once it
291
+ # reaches a particular probability specified by `top_p`. You should either alter
292
+ # `temperature` or `top_p`, but not both.
293
+ #
294
+ # Recommended for advanced use cases only. You usually only need to use
295
+ # `temperature`.
296
+ top_p: nil,
297
+ # Header param: Optional header to specify the beta version(s) you want to use.
298
+ betas: nil,
299
+ # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create`
300
+ # for streaming and non-streaming use cases, respectively.
301
+ stream: false,
302
+ request_options: {}
303
+ )
304
+ end
305
+
10
306
  # See {Anthropic::Resources::Beta::Messages#stream_raw} for streaming counterpart.
11
307
  #
12
308
  # Send a structured list of input messages with text and/or image content, and the
@@ -87,6 +87,8 @@ module Anthropic
87
87
 
88
88
  private def auth_headers: -> ::Hash[String, String]
89
89
 
90
+ private def user_agent: -> String
91
+
90
92
  private def generate_idempotency_key: -> String
91
93
 
92
94
  private def build_request: (
@@ -17,7 +17,10 @@ module Anthropic
17
17
 
18
18
  DEFAULT_MAX_CONNECTIONS: Integer
19
19
 
20
- def self.connect: (URI::Generic url) -> top
20
+ def self.connect: (
21
+ cert_store: OpenSSL::X509::Store,
22
+ url: URI::Generic
23
+ ) -> top
21
24
 
22
25
  def self.calibrate_socket_timeout: (top conn, Float deadline) -> void
23
26
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: anthropic
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.13.0
4
+ version: 1.14.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Anthropic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-10-29 00:00:00.000000000 Z
11
+ date: 2025-11-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool
@@ -48,11 +48,15 @@ files:
48
48
  - lib/anthropic/helpers/input_schema/enum_of.rb
49
49
  - lib/anthropic/helpers/input_schema/json_schema_converter.rb
50
50
  - lib/anthropic/helpers/input_schema/parsed_json.rb
51
- - lib/anthropic/helpers/input_schema/property_mapping.rb
51
+ - lib/anthropic/helpers/input_schema/supported_schemas.rb
52
52
  - lib/anthropic/helpers/input_schema/union_of.rb
53
+ - lib/anthropic/helpers/messages.rb
53
54
  - lib/anthropic/helpers/streaming.rb
54
55
  - lib/anthropic/helpers/streaming/events.rb
55
56
  - lib/anthropic/helpers/streaming/message_stream.rb
57
+ - lib/anthropic/helpers/tools.rb
58
+ - lib/anthropic/helpers/tools/base_tool.rb
59
+ - lib/anthropic/helpers/tools/runner.rb
56
60
  - lib/anthropic/helpers/vertex/client.rb
57
61
  - lib/anthropic/input_schema.rb
58
62
  - lib/anthropic/internal.rb
@@ -434,6 +438,7 @@ files:
434
438
  - rbi/anthropic/errors.rbi
435
439
  - rbi/anthropic/file_part.rbi
436
440
  - rbi/anthropic/helpers/bedrock/client.rbi
441
+ - rbi/anthropic/helpers/input_schema.rbi
437
442
  - rbi/anthropic/helpers/input_schema/array_of.rbi
438
443
  - rbi/anthropic/helpers/input_schema/base_model.rbi
439
444
  - rbi/anthropic/helpers/input_schema/boolean.rbi
@@ -442,7 +447,9 @@ files:
442
447
  - rbi/anthropic/helpers/input_schema/union_of.rbi
443
448
  - rbi/anthropic/helpers/streaming/events.rbi
444
449
  - rbi/anthropic/helpers/streaming/message_stream.rbi
445
- - rbi/anthropic/helpers/structured_output.rbi
450
+ - rbi/anthropic/helpers/tools.rbi
451
+ - rbi/anthropic/helpers/tools/base_tool.rbi
452
+ - rbi/anthropic/helpers/tools/runner.rbi
446
453
  - rbi/anthropic/helpers/vertex/client.rbi
447
454
  - rbi/anthropic/input_schema.rbi
448
455
  - rbi/anthropic/internal.rbi
@@ -1,47 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Anthropic
4
- module Helpers
5
- module InputSchema
6
- # rubocop:disable Style/MutableConstant
7
- PROPERTY_MAPPING = {
8
- # Numeric properties:
9
- minimum: :minimum,
10
- maximum: :maximum,
11
- exclusive_minimum: :exclusiveMinimum,
12
- exclusive_maximum: :exclusiveMaximum,
13
- multiple_of: :multipleOf,
14
-
15
- # String properties:
16
- min_length: :minLength,
17
- max_length: :maxLength,
18
- pattern: :pattern,
19
- format: :format,
20
- content_media_type: :contentMediaType,
21
- content_encoding: :contentEncoding,
22
-
23
- # Array properties:
24
- min_items: :minItems,
25
- max_items: :maxItems,
26
- unique_items: :uniqueItems,
27
- prefix_items: :prefixItems,
28
- contains: :contains,
29
- min_contains: :minContains,
30
- max_contains: :maxContains,
31
-
32
- # Object properties:
33
- pattern_properties: :patternProperties,
34
- dependent_schemas: :dependentSchemas,
35
- dependent_required: :dependentRequired,
36
- property_names: :propertyNames,
37
-
38
- # Metadata:
39
- default: :default,
40
- examples: :examples,
41
-
42
- doc: :description
43
- }
44
- # rubocop:enable Style/MutableConstant
45
- end
46
- end
47
- end