ruby-anthropic 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.circleci/config.yml +45 -0
- data/.github/ISSUE_TEMPLATE/bug_report.md +38 -0
- data/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
- data/.github/dependabot.yml +15 -0
- data/.gitignore +16 -0
- data/.rspec +3 -0
- data/.rubocop.yml +30 -0
- data/CHANGELOG.md +63 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/CONTRIBUTING.md +3 -0
- data/Gemfile +13 -0
- data/Gemfile.lock +88 -0
- data/LICENSE.txt +21 -0
- data/README.md +413 -0
- data/Rakefile +19 -0
- data/anthropic.gemspec +31 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/lib/anthropic/client.rb +113 -0
- data/lib/anthropic/compatibility.rb +10 -0
- data/lib/anthropic/http.rb +200 -0
- data/lib/anthropic/http_headers.rb +38 -0
- data/lib/anthropic/messages/batches.rb +112 -0
- data/lib/anthropic/messages/client.rb +13 -0
- data/lib/anthropic/version.rb +3 -0
- data/lib/anthropic.rb +72 -0
- data/lib/ruby/anthropic.rb +2 -0
- data/pull_request_template.md +5 -0
- metadata +123 -0
data/README.md
ADDED
@@ -0,0 +1,413 @@
|
|
1
|
+
# Anthropic
|
2
|
+
|
3
|
+
[](https://badge.fury.io/rb/ruby-anthropic)
|
4
|
+
[](https://github.com/alexrudall/ruby-anthropic/blob/main/LICENSE.txt)
|
5
|
+
[](https://circleci.com/gh/alexrudall/ruby-anthropic)
|
6
|
+
|
7
|
+
> [!IMPORTANT]
|
8
|
+
> This gem has been renamed from `anthropic` to `ruby-anthropic`, to make way for the new [official Anthropic Ruby SDK](https://github.com/anthropics/anthropic-sdk-ruby) 🎉
|
9
|
+
> If you wish to keep using this gem, you just need to update your Gemfile from `anthropic` to `ruby-anthropic` and version `0.4.2` or greater. No other changes are needed and it will continue to work as normal.
|
10
|
+
|
11
|
+
Use the [Anthropic API](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) with Ruby! 🤖🌌
|
12
|
+
|
13
|
+
You can get access to the API [here](https://docs.anthropic.com/claude/docs/getting-access-to-claude).
|
14
|
+
|
15
|
+
[🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🤖 OpenAI Gem](https://github.com/alexrudall/ruby-openai) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
|
16
|
+
|
17
|
+
### Bundler
|
18
|
+
|
19
|
+
Add this line to your application's Gemfile:
|
20
|
+
|
21
|
+
```ruby
|
22
|
+
gem "anthropic"
|
23
|
+
```
|
24
|
+
|
25
|
+
And then execute:
|
26
|
+
|
27
|
+
$ bundle install
|
28
|
+
|
29
|
+
### Gem install
|
30
|
+
|
31
|
+
Or install with:
|
32
|
+
|
33
|
+
$ gem install anthropic
|
34
|
+
|
35
|
+
and require with:
|
36
|
+
|
37
|
+
```ruby
|
38
|
+
require "anthropic"
|
39
|
+
```
|
40
|
+
|
41
|
+
## Usage
|
42
|
+
|
43
|
+
- Get your API key from [https://console.anthropic.com/account/keys](https://console.anthropic.com/account/keys)
|
44
|
+
|
45
|
+
### Quickstart
|
46
|
+
|
47
|
+
For a quick test you can pass your token directly to a new client:
|
48
|
+
|
49
|
+
```ruby
|
50
|
+
client = Anthropic::Client.new(
|
51
|
+
access_token: "access_token_goes_here",
|
52
|
+
log_errors: true # Highly recommended in development, so you can see what errors Anthropic is returning. Not recommended in production because it could leak private data to your logs.
|
53
|
+
)
|
54
|
+
```
|
55
|
+
|
56
|
+
### With Config
|
57
|
+
|
58
|
+
For a more robust setup, you can configure the gem with your API keys, for example in an `anthropic.rb` initializer file. Never hardcode secrets into your codebase - instead use something like [dotenv](https://github.com/motdotla/dotenv) to pass the keys safely into your environments or rails credentials if you are using this in a rails project.
|
59
|
+
|
60
|
+
```ruby
|
61
|
+
Anthropic.configure do |config|
|
62
|
+
# With dotenv
|
63
|
+
config.access_token = ENV.fetch("ANTHROPIC_API_KEY"),
|
64
|
+
# OR
|
65
|
+
# With Rails credentials
|
66
|
+
config.access_token = Rails.application.credentials.dig(:anthropic, :api_key),
|
67
|
+
config.log_errors = true # Highly recommended in development, so you can see what errors Anthropic is returning. Not recommended in production because it could leak private data to your logs.
|
68
|
+
end
|
69
|
+
```
|
70
|
+
|
71
|
+
Then you can create a client like this:
|
72
|
+
|
73
|
+
```ruby
|
74
|
+
client = Anthropic::Client.new
|
75
|
+
```
|
76
|
+
|
77
|
+
#### Change version or timeout
|
78
|
+
|
79
|
+
You can change to a different dated version (different from the URL version which is just `v1`) of Anthropic's API by passing `anthropic_version` when initializing the client. If you don't the default latest will be used, which is "2023-06-01". [More info](https://docs.anthropic.com/claude/reference/versioning)
|
80
|
+
|
81
|
+
The default timeout for any request using this library is 120 seconds. You can change that by passing a number of seconds to the `request_timeout` when initializing the client.
|
82
|
+
|
83
|
+
```ruby
|
84
|
+
client = Anthropic::Client.new(
|
85
|
+
access_token: "access_token_goes_here",
|
86
|
+
log_errors: true, # Optional
|
87
|
+
anthropic_version: "2023-01-01", # Optional
|
88
|
+
request_timeout: 240 # Optional
|
89
|
+
)
|
90
|
+
```
|
91
|
+
|
92
|
+
You can also set these keys when configuring the gem:
|
93
|
+
|
94
|
+
```ruby
|
95
|
+
Anthropic.configure do |config|
|
96
|
+
config.access_token = ENV.fetch("ANTHROPIC_API_KEY")
|
97
|
+
config.anthropic_version = "2023-01-01" # Optional
|
98
|
+
config.request_timeout = 240 # Optional
|
99
|
+
end
|
100
|
+
```
|
101
|
+
|
102
|
+
#### Logging
|
103
|
+
|
104
|
+
##### Errors
|
105
|
+
By default, the `anthropic` gem does not log any `Faraday::Error`s encountered while executing a network request to avoid leaking data (e.g. 400s, 500s, SSL errors and more - see [here](https://www.rubydoc.info/github/lostisland/faraday/Faraday/Error) for a complete list of subclasses of `Faraday::Error` and what can cause them).
|
106
|
+
|
107
|
+
If you would like to enable this functionality, you can set `log_errors` to `true` when configuring the client:
|
108
|
+
```ruby
|
109
|
+
client = Anthropic::Client.new(log_errors: true)
|
110
|
+
```
|
111
|
+
|
112
|
+
### Models
|
113
|
+
|
114
|
+
Available Models:
|
115
|
+
|
116
|
+
| Name | API Name |
|
117
|
+
| --------------- | ------------------------ |
|
118
|
+
| Claude 3 Opus | claude-3-opus-20240229 |
|
119
|
+
| Claude 3 Sonnet | claude-3-sonnet-20240229 |
|
120
|
+
| Claude 3 Haiku | claude-3-haiku-20240307 |
|
121
|
+
|
122
|
+
You can find the latest model names in the [Anthropic API documentation](https://docs.anthropic.com/claude/docs/models-overview#model-recommendations).
|
123
|
+
|
124
|
+
### Messages
|
125
|
+
|
126
|
+
```
|
127
|
+
POST https://api.anthropic.com/v1/messages
|
128
|
+
```
|
129
|
+
|
130
|
+
Send a sequence of messages (user or assistant) to the API and receive a message in response.
|
131
|
+
|
132
|
+
```ruby
|
133
|
+
response = client.messages(
|
134
|
+
parameters: {
|
135
|
+
model: "claude-3-haiku-20240307", # claude-3-opus-20240229, claude-3-sonnet-20240229
|
136
|
+
system: "Respond only in Spanish.",
|
137
|
+
messages: [
|
138
|
+
{"role": "user", "content": "Hello, Claude!"}
|
139
|
+
],
|
140
|
+
max_tokens: 1000
|
141
|
+
}
|
142
|
+
)
|
143
|
+
# => {
|
144
|
+
# => "id" => "msg_0123MiRVCgSG2PaQZwCGbgmV",
|
145
|
+
# => "type" => "message",
|
146
|
+
# => "role" => "assistant",
|
147
|
+
# => "content" => [{"type"=>"text", "text"=>"¡Hola! Es un gusto saludarte. ¿En qué puedo ayudarte hoy?"}],
|
148
|
+
# => "model" => "claude-3-haiku-20240307",
|
149
|
+
# => "stop_reason" => "end_turn",
|
150
|
+
# => "stop_sequence" => nil,
|
151
|
+
# => "usage" => {"input_tokens"=>17, "output_tokens"=>32}
|
152
|
+
# => }
|
153
|
+
```
|
154
|
+
|
155
|
+
### Batches
|
156
|
+
The Batches API can be used to process multiple Messages API requests at once. Once a Message Batch is created, it begins processing a soon as possible.
|
157
|
+
|
158
|
+
Batches can contain up to 100,000 requests or 256 MB in total size, whichever limit is reached first.
|
159
|
+
|
160
|
+
Create a batch of message requests:
|
161
|
+
```ruby
|
162
|
+
response = client.messages.batches.create(
|
163
|
+
parameters: {
|
164
|
+
requests: [
|
165
|
+
{
|
166
|
+
custom_id: "my-first-request",
|
167
|
+
params: {
|
168
|
+
model: "claude-3-haiku-20240307",
|
169
|
+
max_tokens: 1024,
|
170
|
+
messages: [
|
171
|
+
{ role: "user", content: "Hello, world" }
|
172
|
+
]
|
173
|
+
}
|
174
|
+
},
|
175
|
+
{
|
176
|
+
custom_id: "my-second-request",
|
177
|
+
params: {
|
178
|
+
model: "claude-3-haiku-20240307",
|
179
|
+
max_tokens: 1024,
|
180
|
+
messages: [
|
181
|
+
{ role: "user", content: "Hi again, friend" }
|
182
|
+
]
|
183
|
+
}
|
184
|
+
}
|
185
|
+
]
|
186
|
+
}
|
187
|
+
)
|
188
|
+
batch_id = response["id"]
|
189
|
+
```
|
190
|
+
|
191
|
+
You can retrieve information about a specific batch:
|
192
|
+
```ruby
|
193
|
+
batch = client.messages.batches.get(id: batch_id)
|
194
|
+
```
|
195
|
+
|
196
|
+
To cancel a batch that is in progress:
|
197
|
+
```ruby
|
198
|
+
client.messages.batches.cancel(id: batch_id)
|
199
|
+
```
|
200
|
+
|
201
|
+
List all batches:
|
202
|
+
```ruby
|
203
|
+
client.messages.batches.list
|
204
|
+
```
|
205
|
+
|
206
|
+
#### Notes (as of 31 March 2025)
|
207
|
+
|
208
|
+
- If individual batch items have errors, you will not be billed for those specific items.
|
209
|
+
- Batches will be listed in the account indefinitely.
|
210
|
+
- Results are fetchable only within 29 days after batch creation.
|
211
|
+
- When you cancel a batch, any unprocessed items will not be billed.
|
212
|
+
- Batches in other workspaces are not accessible with API keys from a different workspace.
|
213
|
+
- If a batch item takes more than 24 hours to process, it will expire and not be billed.
|
214
|
+
|
215
|
+
Once processing ends, you can fetch the results:
|
216
|
+
```ruby
|
217
|
+
results = client.messages.batches.results(id: batch_id)
|
218
|
+
```
|
219
|
+
|
220
|
+
Results are returned as a .jsonl file, with each line containing the result of a single request. Results may be in any order - use the custom_id field to match results to requests.
|
221
|
+
|
222
|
+
### Vision
|
223
|
+
|
224
|
+
Claude 3 family of models comes with vision capabilities that allow Claude to understand and analyze images.
|
225
|
+
|
226
|
+
Transform an image to base64 and send to the API.
|
227
|
+
|
228
|
+
```ruby
|
229
|
+
require 'base64'
|
230
|
+
|
231
|
+
image = File.open(FILE_PATH, 'rb') { |file| file.read }
|
232
|
+
|
233
|
+
imgbase64 = Base64.strict_encode64(image)
|
234
|
+
|
235
|
+
response = client.messages(
|
236
|
+
parameters: {
|
237
|
+
model: "claude-3-haiku-20240307", # claude-3-opus-20240229, claude-3-sonnet-20240229
|
238
|
+
system: "Respond only in Spanish.",
|
239
|
+
messages: [
|
240
|
+
{"role": "user", "content": [
|
241
|
+
{
|
242
|
+
"type":"image","source":
|
243
|
+
{"type":"base64","media_type":"image/png", imgbase64 }
|
244
|
+
},
|
245
|
+
{"type":"text","text":"What is this"}
|
246
|
+
]
|
247
|
+
}
|
248
|
+
],
|
249
|
+
max_tokens: 1000
|
250
|
+
}
|
251
|
+
)
|
252
|
+
|
253
|
+
# => {
|
254
|
+
# => "id" => "msg_0123MiRVCgSG2PaQZwCGbgmV",
|
255
|
+
# => "type" => "message",
|
256
|
+
# => "role" => "assistant",
|
257
|
+
# => "content" => [{"type"=>"text", "text"=>"This
|
258
|
+
# => image depicts a person, presumably a student or young adult, holding a tablet
|
259
|
+
# => device. "}],
|
260
|
+
# => "model" => "claude-3-haiku-20240307",
|
261
|
+
# => "stop_reason" => "end_turn",
|
262
|
+
# => "stop_sequence" => nil,
|
263
|
+
# => "usage" => {"input_tokens"=>17, "output_tokens"=>32}
|
264
|
+
# => }
|
265
|
+
```
|
266
|
+
|
267
|
+
|
268
|
+
### Additional parameters
|
269
|
+
|
270
|
+
You can add other parameters to the parameters hash, like `temperature` and even `top_k` or `top_p`. They will just be passed to the Anthropic server. You
|
271
|
+
can read more about the supported parameters [here](https://docs.anthropic.com/claude/reference/messages_post).
|
272
|
+
|
273
|
+
There are two special parameters, though, to do with... streaming. Keep reading to find out more.
|
274
|
+
|
275
|
+
### JSON
|
276
|
+
|
277
|
+
If you want your output to be json, it is recommended to provide an additional message like this:
|
278
|
+
|
279
|
+
```ruby
|
280
|
+
[{ role: "user", content: "Give me the heights of the 3 tallest mountains. Answer in the provided JSON format. Only include JSON." },
|
281
|
+
{ role: "assistant", content: '[{"name": "Mountain Name", "height": "height in km"}]' }]
|
282
|
+
```
|
283
|
+
|
284
|
+
Then Claude v3, even Haiku, might respond with:
|
285
|
+
|
286
|
+
```ruby
|
287
|
+
[{"name"=>"Mount Everest", "height"=>"8.85 km"}, {"name"=>"K2", "height"=>"8.61 km"}, {"name"=>"Kangchenjunga", "height"=>"8.58 km"}]
|
288
|
+
```
|
289
|
+
|
290
|
+
### Streaming
|
291
|
+
|
292
|
+
There are two modes of streaming: raw and preprocessed. The default is raw. You can call it like this:
|
293
|
+
|
294
|
+
```ruby
|
295
|
+
client.messages(
|
296
|
+
parameters: {
|
297
|
+
model: "claude-3-haiku-20240307",
|
298
|
+
messages: [{ role: "user", content: "How high is the sky?" }],
|
299
|
+
max_tokens: 50,
|
300
|
+
stream: Proc.new { |chunk| print chunk }
|
301
|
+
}
|
302
|
+
)
|
303
|
+
```
|
304
|
+
|
305
|
+
This still returns a regular response at the end, but also gives you direct access to every single chunk returned by Anthropic as they come in. Even if you don't want to
|
306
|
+
use the streaming, you may find this useful to avoid timeouts, which can happen if you send Opus a large input context, and expect a long response... It has been known to take
|
307
|
+
several minutes to compile the full response - which is longer than our 120 second default timeout. But when streaming, the connection does not time out.
|
308
|
+
|
309
|
+
Here is an example of a stream you might get back:
|
310
|
+
|
311
|
+
```ruby
|
312
|
+
{"type"=>"message_start", "message"=>{"id"=>"msg_01WMWvcZq5JEMLf6Jja4Bven", "type"=>"message", "role"=>"assistant", "model"=>"claude-3-haiku-20240307", "stop_sequence"=>nil, "usage"=>{"input_tokens"=>13, "output_tokens"=>1}, "content"=>[], "stop_reason"=>nil}}
|
313
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>"There"}}
|
314
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" is"}}
|
315
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" no"}}
|
316
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" single"}}
|
317
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" defin"}}
|
318
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>"itive"}}
|
319
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" \""}}
|
320
|
+
...
|
321
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>"'s"}}
|
322
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" atmosphere"}}
|
323
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" extends"}}
|
324
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" up"}}
|
325
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" to"}}
|
326
|
+
{"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" about"}}
|
327
|
+
{"type"=>"content_block_stop", "index"=>0}
|
328
|
+
{"type"=>"message_delta", "delta"=>{"stop_reason"=>"max_tokens", "stop_sequence"=>nil}, "usage"=>{"output_tokens"=>50}}
|
329
|
+
{"type"=>"message_stop"}
|
330
|
+
```
|
331
|
+
|
332
|
+
Now, you may find this... somewhat less than practical. Surely, the vast majority of developers will not want to deal with so much
|
333
|
+
boilerplate json.
|
334
|
+
|
335
|
+
Luckily, you can ask the anthropic gem to preprocess things for you!
|
336
|
+
|
337
|
+
First, if you expect simple text output, you can receive it delta by delta:
|
338
|
+
|
339
|
+
```ruby
|
340
|
+
client.messages(
|
341
|
+
parameters: {
|
342
|
+
model: "claude-3-haiku-20240307",
|
343
|
+
messages: [{ role: "user", content: "How high is the sky?" }],
|
344
|
+
max_tokens: 50,
|
345
|
+
stream: Proc.new { |incremental_response, delta| process_your(incremental_response, delta) },
|
346
|
+
preprocess_stream: :text
|
347
|
+
}
|
348
|
+
)
|
349
|
+
```
|
350
|
+
|
351
|
+
The first block argument, `incremental_response`, accrues everything that's been returned so far, so you don't have to. If you just want the last bit,
|
352
|
+
then use the second, `delta` argument.
|
353
|
+
|
354
|
+
But what if you want to stream JSON?
|
355
|
+
|
356
|
+
Partial JSON is not very useful. But it is common enough to request a collection of JSON objects as a response, as in our earlier example of asking for the heights of the 3 tallest mountains.
|
357
|
+
|
358
|
+
If you ask it to, this gem will also do its best to sort this out for you:
|
359
|
+
|
360
|
+
```ruby
|
361
|
+
client.messages(
|
362
|
+
parameters: {
|
363
|
+
model: "claude-3-haiku-20240307",
|
364
|
+
messages: [{ role: "user", content: "How high is the sky?" }],
|
365
|
+
max_tokens: 50,
|
366
|
+
stream: Proc.new { |json_object| process_your(json_object) },
|
367
|
+
preprocess_stream: :json
|
368
|
+
}
|
369
|
+
)
|
370
|
+
```
|
371
|
+
|
372
|
+
Each time a `}` is reached in the stream, the preprocessor will take what it has in the preprocessing stack, pick out whatever's between the widest `{` and `}`, and try to parse it into a JSON object.
|
373
|
+
If it succeeds, it will pass you the json object, reset its preprocessing stack, and carry on.
|
374
|
+
|
375
|
+
If the parsing fails despite reaching a `}`, currently, it will catch the Error, log it to `$stdout`, ignore the malformed object, reset the preprocessing stack and carry on. This does mean that it is possible,
|
376
|
+
if the AI is sending some malformed JSON (which can happen, albeit rarely), that some objects will be lost.
|
377
|
+
|
378
|
+
## Development
|
379
|
+
|
380
|
+
After checking out the repo, run `bin/setup` to install dependencies. You can run `bin/console` for an interactive prompt that will allow you to experiment.
|
381
|
+
|
382
|
+
To install this gem onto your local machine, run `bundle exec rake install`.
|
383
|
+
|
384
|
+
To run all tests, execute the command `bundle exec rake`, which will also run the linter (Rubocop). This repository uses [VCR](https://github.com/vcr/vcr) to log API requests.
|
385
|
+
|
386
|
+
> [!WARNING]
|
387
|
+
> If you have an `ANTHROPIC_API_KEY` in your `ENV`, running the specs will use this to run the specs against the actual API, which will be slow and cost you money - 2 cents or more! Remove it from your environment with `unset` or similar if you just want to run the specs against the stored VCR responses.
|
388
|
+
|
389
|
+
### Warning
|
390
|
+
|
391
|
+
If you have an `ANTHROPIC_API_KEY` in your `ENV`, running the specs will use this to run the specs against the actual API, which will be slow and cost you money - 2 cents or more! Remove it from your environment with `unset` or similar if you just want to run the specs against the stored VCR responses.
|
392
|
+
|
393
|
+
## Release
|
394
|
+
|
395
|
+
First run the specs without VCR so they actually hit the API. This will cost 2 cents or more. Set ANTHROPIC_API_KEY in your environment or pass it in like this:
|
396
|
+
|
397
|
+
```
|
398
|
+
ANTHROPIC_API_KEY=123abc bundle exec rspec
|
399
|
+
```
|
400
|
+
|
401
|
+
Then update the version number in `version.rb`, update `CHANGELOG.md`, run `bundle install` to update Gemfile.lock, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
402
|
+
|
403
|
+
## Contributing
|
404
|
+
|
405
|
+
Bug reports and pull requests are welcome on GitHub at <https://github.com/alexrudall/ruby-anthropic>. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [code of conduct](https://github.com/alexrudall/ruby-anthropic/blob/main/CODE_OF_CONDUCT.md).
|
406
|
+
|
407
|
+
## License
|
408
|
+
|
409
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
410
|
+
|
411
|
+
## Code of Conduct
|
412
|
+
|
413
|
+
Everyone interacting in the Ruby Anthropic project's codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/alexrudall/ruby-anthropic/blob/main/CODE_OF_CONDUCT.md).
|
data/Rakefile
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
require "bundler/gem_tasks"
|
2
|
+
require "rspec/core/rake_task"
|
3
|
+
require "rubocop/rake_task"
|
4
|
+
|
5
|
+
RSpec::Core::RakeTask.new(:spec)
|
6
|
+
|
7
|
+
task :default do
|
8
|
+
Rake::Task["test"].invoke
|
9
|
+
Rake::Task["lint"].invoke
|
10
|
+
end
|
11
|
+
|
12
|
+
task :test do
|
13
|
+
Rake::Task["spec"].invoke
|
14
|
+
end
|
15
|
+
|
16
|
+
task :lint do
|
17
|
+
RuboCop::RakeTask.new(:rubocop)
|
18
|
+
Rake::Task["rubocop"].invoke
|
19
|
+
end
|
data/anthropic.gemspec
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
require_relative "lib/anthropic/version"
|
2
|
+
|
3
|
+
Gem::Specification.new do |spec|
|
4
|
+
spec.name = "ruby-anthropic"
|
5
|
+
spec.version = Anthropic::VERSION
|
6
|
+
spec.authors = ["Alex"]
|
7
|
+
spec.email = ["alexrudall@users.noreply.github.com"]
|
8
|
+
|
9
|
+
spec.summary = "Anthropic API + Ruby! 🤖🌌"
|
10
|
+
spec.homepage = "https://github.com/alexrudall/ruby-anthropic"
|
11
|
+
spec.license = "MIT"
|
12
|
+
spec.required_ruby_version = Gem::Requirement.new(">= 2.6.0")
|
13
|
+
|
14
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
15
|
+
spec.metadata["source_code_uri"] = "https://github.com/alexrudall/ruby-anthropic"
|
16
|
+
spec.metadata["changelog_uri"] = "https://github.com/alexrudall/ruby-anthropic/blob/main/CHANGELOG.md"
|
17
|
+
spec.metadata["rubygems_mfa_required"] = "true"
|
18
|
+
|
19
|
+
# Specify which files should be added to the gem when it is released.
|
20
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
21
|
+
spec.files = Dir.chdir(File.expand_path(__dir__)) do
|
22
|
+
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
23
|
+
end
|
24
|
+
spec.bindir = "exe"
|
25
|
+
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
|
+
spec.require_paths = ["lib"]
|
27
|
+
|
28
|
+
spec.add_dependency "event_stream_parser", ">= 0.3.0", "< 2.0.0"
|
29
|
+
spec.add_dependency "faraday", ">= 1"
|
30
|
+
spec.add_dependency "faraday-multipart", ">= 1"
|
31
|
+
end
|
data/bin/console
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require "bundler/setup"
|
4
|
+
require "anthropic"
|
5
|
+
|
6
|
+
# You can add fixtures and/or initialization code here to make experimenting
|
7
|
+
# with your gem easier. You can also use a different console, if you like.
|
8
|
+
|
9
|
+
# (If you use this, don't forget to add pry to your Gemfile!)
|
10
|
+
# require "pry"
|
11
|
+
# Pry.start
|
12
|
+
|
13
|
+
require "irb"
|
14
|
+
IRB.start(__FILE__)
|
data/bin/setup
ADDED
@@ -0,0 +1,113 @@
|
|
1
|
+
require_relative "messages/batches"
|
2
|
+
require_relative "messages/client"
|
3
|
+
|
4
|
+
module Anthropic
|
5
|
+
class Client
|
6
|
+
include Anthropic::HTTP
|
7
|
+
|
8
|
+
CONFIG_KEYS = %i[
|
9
|
+
access_token
|
10
|
+
anthropic_version
|
11
|
+
api_version
|
12
|
+
log_errors
|
13
|
+
uri_base
|
14
|
+
request_timeout
|
15
|
+
extra_headers
|
16
|
+
].freeze
|
17
|
+
attr_reader(*CONFIG_KEYS)
|
18
|
+
|
19
|
+
def initialize(config = {}, &faraday_middleware)
|
20
|
+
CONFIG_KEYS.each do |key|
|
21
|
+
# Set instance variables like api_type & access_token. Fall back to global config
|
22
|
+
# if not present.
|
23
|
+
instance_variable_set(
|
24
|
+
"@#{key}",
|
25
|
+
config[key].nil? ? Anthropic.configuration.send(key) : config[key]
|
26
|
+
)
|
27
|
+
end
|
28
|
+
@faraday_middleware = faraday_middleware
|
29
|
+
end
|
30
|
+
|
31
|
+
# @deprecated (but still works while Anthropic API responds to it)
|
32
|
+
def complete(parameters: {})
|
33
|
+
parameters[:prompt] = wrap_prompt(prompt: parameters[:prompt])
|
34
|
+
json_post(path: "/complete", parameters: parameters)
|
35
|
+
end
|
36
|
+
|
37
|
+
# Anthropic API Parameters as of 2024-05-07:
|
38
|
+
# @see https://docs.anthropic.com/claude/reference/messages_post
|
39
|
+
#
|
40
|
+
# When called without parameters, returns a Messages::Batches instance for batch operations.
|
41
|
+
# When called with parameters, creates a single message.
|
42
|
+
#
|
43
|
+
# @param [Hash] parameters
|
44
|
+
# @option parameters [Array] :messages - Required. An array of messages to send to the API. Each
|
45
|
+
# message should have a role and content. Single message example:
|
46
|
+
# +[{ role: "user", content: "Hello, Claude!" }]+
|
47
|
+
# @option parameters [String] :model - see https://docs.anthropic.com/claude/docs/models-overview
|
48
|
+
# @option parameters [Integer] :max_tokens - Required, must be less than 4096 - @see https://docs.anthropic.com/claude/docs/models-overview
|
49
|
+
# @option parameters [String] :system - Optional but recommended. @see https://docs.anthropic.com/claude/docs/system-prompts
|
50
|
+
# @option parameters [Float] :temperature - Optional, defaults to 1.0
|
51
|
+
# @option parameters [Proc] :stream - Optional, if present, must be a Proc that will receive the
|
52
|
+
# content fragments as they come in
|
53
|
+
# @option parameters [String] :preprocess_stream - If true, the streaming Proc will be pre-
|
54
|
+
# processed. Specifically, instead of being passed a raw Hash like:
|
55
|
+
# {"type"=>"content_block_delta", "index"=>0, "delta"=>{"type"=>"text_delta", "text"=>" of"}}
|
56
|
+
# the Proc will instead be passed something nicer. If +preprocess_stream+ is set to +"json"+
|
57
|
+
# or +:json+, then the Proc will only receive full json objects, one at a time.
|
58
|
+
# If +preprocess_stream+ is set to +"text"+ or +:text+ then the Proc will receive two
|
59
|
+
# arguments: the first will be the text accrued so far, and the second will be the delta
|
60
|
+
# just received in the current chunk.
|
61
|
+
#
|
62
|
+
# @returns [Hash, Messages::Client] Returns a Hash response from the API when creating a message
|
63
|
+
# with parameters, or a Messages::Client instance when called without parameters
|
64
|
+
# @example Creating a message:
|
65
|
+
# {
|
66
|
+
# "id" => "msg_013xVudG9xjSvLGwPKMeVXzG",
|
67
|
+
# "type" => "message",
|
68
|
+
# "role" => "assistant",
|
69
|
+
# "content" => [{"type" => "text", "text" => "The sky has no distinct"}],
|
70
|
+
# "model" => "claude-2.1",
|
71
|
+
# "stop_reason" => "max_tokens",
|
72
|
+
# "stop_sequence" => nil,
|
73
|
+
# "usage" => {"input_tokens" => 15, "output_tokens" => 5}
|
74
|
+
# }
|
75
|
+
# @example Accessing batches:
|
76
|
+
# client.messages.batches.create(requests: [...])
|
77
|
+
# client.messages.batches.get(id: "batch_123")
|
78
|
+
def messages(**args)
|
79
|
+
return @messages ||= Messages::Client.new(self) unless args && args[:parameters]
|
80
|
+
|
81
|
+
json_post(path: "/messages", parameters: args[:parameters])
|
82
|
+
end
|
83
|
+
|
84
|
+
# Adds Anthropic beta features to API requests. Can be used in two ways:
|
85
|
+
#
|
86
|
+
# 1. Multiple betas in one call with comma-separated string:
|
87
|
+
# client.beta("feature1,feature2").messages
|
88
|
+
#
|
89
|
+
# 2. Chaining multiple beta calls:
|
90
|
+
# client.beta("feature1").beta("feature2").messages
|
91
|
+
#
|
92
|
+
# @param version [String] The beta version(s) to enable
|
93
|
+
# @return [Client] A new client instance with the beta header(s)
|
94
|
+
def beta(version)
|
95
|
+
dup.tap do |client|
|
96
|
+
existing_beta = client.extra_headers["anthropic-beta"]
|
97
|
+
combined_beta = [existing_beta, version].compact.join(",")
|
98
|
+
client.add_headers("anthropic-beta" => combined_beta)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
private
|
103
|
+
|
104
|
+
# Used only by @deprecated +complete+ method
|
105
|
+
def wrap_prompt(prompt:, prefix: "\n\nHuman: ", suffix: "\n\nAssistant:")
|
106
|
+
return if prompt.nil?
|
107
|
+
|
108
|
+
prompt.prepend(prefix) unless prompt.start_with?(prefix)
|
109
|
+
prompt.concat(suffix) unless prompt.end_with?(suffix)
|
110
|
+
prompt
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|