ruby-openai 7.0.0 → 7.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8334c2f0658ff33f39e96e8316b5c7122ca8810f5bfaca945c8d5f1add38b85a
4
- data.tar.gz: b761ed842f7c27ba7f3e6e3137f6d60f2d51cb1a32f585989fd2b1992d256b46
3
+ metadata.gz: a86dc627f27eeea7cf3eb1bf2eec2b0209d0bb8c11fef0eb6fd6518f7f10cfe9
4
+ data.tar.gz: 712ab627670853d680c8858a9d27aef5a82be09de8d53e5f7156ee608ba8d939
5
5
  SHA512:
6
- metadata.gz: 7c53448def0a2a9849744a09c10f91fb4c16eb23fbf0d52be748574a9a96b05cb5641821c7728b9d95764cf50399f23c48bfdf3444d86ab80e65602e2264f6ba
7
- data.tar.gz: 8806bbe03d2cde1b1fdfb691bcc2c4d803837faed6f001027135bdf9ed54a4bdf4eb01128c36d9d84b35622876ec087c494980e67b01a72a8ecb01040180cc72
6
+ metadata.gz: 72e14dc39495046b71ca147953582a24f8c9261955f2ca2a8d898ca7f8e136b459c31583620c16db3fa80c39da61c1f3a4cc932c5b3f2e71741fed42719eaeaf
7
+ data.tar.gz: 82db19d40f9b44fedb73d8f310771af71096d3d7e8e56f96d000a70f4c61abb1f21cbe98187d70fec1c3d639921eabe8cd8e456cac92dbcfa6e2efcb655f865b
data/.gitignore CHANGED
@@ -10,11 +10,6 @@
10
10
  /test/tmp/
11
11
  /test/version_tmp/
12
12
  /tmp/
13
- /.bundle/
14
- /.yardoc
15
- /_yardoc/
16
- /doc/
17
-
18
13
 
19
14
  # Used by dotenv library to load environment variables.
20
15
  .env
data/CHANGELOG.md CHANGED
@@ -5,6 +5,26 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [7.1.0] - 2024-06-10
9
+
10
+ ### Added
11
+
12
+ - Add new Vector Store endpoints - thanks to [@willywg](https://github.com/willywg) for this PR!
13
+ - Add parameters to batches.list endpoint so you can for example use `after` - thanks to [@marckohlbrugge](https://github.com/marckohlbrugge)!
14
+ - Add vision as permitted purpose for files - thanks again to [@willywg](https://github.com/willywg) for the PR.
15
+ - Add improved README example of tool calling - thanks [@krschacht](https://github.com/krschacht) - check out his project [HostedGPT](https://github.com/AllYourBot/hostedgpt)!
16
+
17
+ ### Fixed
18
+
19
+ - Fix broken link in README table of contents - thanks to [@garrettgsb](https://github.com/garrettgsb)!
20
+ - Skip sending nil headers - thanks to [@drnic](https://github.com/drnic)!
21
+
22
+ ## [7.0.1] - 2024-04-30
23
+
24
+ ### Fixed
25
+
26
+ - Update to v2 of Assistants in Messages, Runs, RunSteps and Threads - thanks to [@willywg](https://github.com/willywg) and others for pointing this out.
27
+
8
28
  ## [7.0.0] - 2024-04-27
9
29
 
10
30
  ### Added
data/Gemfile CHANGED
@@ -5,8 +5,8 @@ gemspec
5
5
 
6
6
  gem "byebug", "~> 11.1.3"
7
7
  gem "dotenv", "~> 2.8.1"
8
- gem "rake", "~> 13.1"
8
+ gem "rake", "~> 13.2"
9
9
  gem "rspec", "~> 3.13"
10
10
  gem "rubocop", "~> 1.50.2"
11
11
  gem "vcr", "~> 6.1.0"
12
- gem "webmock", "~> 3.19.1"
12
+ gem "webmock", "~> 3.23.1"
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- ruby-openai (7.0.0)
4
+ ruby-openai (7.1.0)
5
5
  event_stream_parser (>= 0.3.0, < 2.0.0)
6
6
  faraday (>= 1)
7
7
  faraday-multipart (>= 1)
@@ -9,12 +9,14 @@ PATH
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
- addressable (2.8.5)
12
+ addressable (2.8.6)
13
13
  public_suffix (>= 2.0.2, < 6.0)
14
14
  ast (2.4.2)
15
15
  base64 (0.2.0)
16
+ bigdecimal (3.1.8)
16
17
  byebug (11.1.3)
17
- crack (0.4.5)
18
+ crack (1.0.0)
19
+ bigdecimal
18
20
  rexml
19
21
  diff-lcs (1.5.1)
20
22
  dotenv (2.8.1)
@@ -26,17 +28,18 @@ GEM
26
28
  faraday-multipart (1.0.4)
27
29
  multipart-post (~> 2)
28
30
  faraday-net_http (3.0.2)
29
- hashdiff (1.0.1)
31
+ hashdiff (1.1.0)
30
32
  json (2.6.3)
31
33
  multipart-post (2.3.0)
32
34
  parallel (1.22.1)
33
35
  parser (3.2.2.0)
34
36
  ast (~> 2.4.1)
35
- public_suffix (5.0.3)
37
+ public_suffix (5.0.5)
36
38
  rainbow (3.1.1)
37
- rake (13.1.0)
39
+ rake (13.2.1)
38
40
  regexp_parser (2.8.0)
39
- rexml (3.2.6)
41
+ rexml (3.2.9)
42
+ strscan
40
43
  rspec (3.13.0)
41
44
  rspec-core (~> 3.13.0)
42
45
  rspec-expectations (~> 3.13.0)
@@ -64,9 +67,10 @@ GEM
64
67
  parser (>= 3.2.1.0)
65
68
  ruby-progressbar (1.13.0)
66
69
  ruby2_keywords (0.0.5)
70
+ strscan (3.1.0)
67
71
  unicode-display_width (2.4.2)
68
72
  vcr (6.1.0)
69
- webmock (3.19.1)
73
+ webmock (3.23.1)
70
74
  addressable (>= 2.8.0)
71
75
  crack (>= 0.3.2)
72
76
  hashdiff (>= 0.4.0, < 2.0.0)
@@ -77,12 +81,12 @@ PLATFORMS
77
81
  DEPENDENCIES
78
82
  byebug (~> 11.1.3)
79
83
  dotenv (~> 2.8.1)
80
- rake (~> 13.1)
84
+ rake (~> 13.2)
81
85
  rspec (~> 3.13)
82
86
  rubocop (~> 1.50.2)
83
87
  ruby-openai!
84
88
  vcr (~> 6.1.0)
85
- webmock (~> 3.19.1)
89
+ webmock (~> 3.23.1)
86
90
 
87
91
  BUNDLED WITH
88
92
  2.4.5
data/README.md CHANGED
@@ -4,13 +4,13 @@
4
4
  [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/alexrudall/ruby-openai/blob/main/LICENSE.txt)
5
5
  [![CircleCI Build Status](https://circleci.com/gh/alexrudall/ruby-openai.svg?style=shield)](https://circleci.com/gh/alexrudall/ruby-openai)
6
6
 
7
- Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖🩵
7
+ Use the [OpenAI API](https://openai.com/blog/openai-api/) with Ruby! 🤖❤️
8
8
 
9
- Stream text with GPT-4, transcribe and translate audio with Whisper, or create images with DALL·E...
9
+ Stream text with GPT-4o, transcribe and translate audio with Whisper, or create images with DALL·E...
10
10
 
11
11
  [🚢 Hire me](https://peaceterms.com?utm_source=ruby-openai&utm_medium=readme&utm_id=26072023) | [🎮 Ruby AI Builders Discord](https://discord.gg/k4Uc224xVD) | [🐦 Twitter](https://twitter.com/alexrudall) | [🧠 Anthropic Gem](https://github.com/alexrudall/anthropic) | [🚂 Midjourney Gem](https://github.com/alexrudall/midjourney)
12
12
 
13
- # Table of Contents
13
+ ## Contents
14
14
 
15
15
  - [Ruby OpenAI](#ruby-openai)
16
16
  - [Table of Contents](#table-of-contents)
@@ -27,6 +27,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
27
27
  - [Faraday middleware](#faraday-middleware)
28
28
  - [Azure](#azure)
29
29
  - [Ollama](#ollama)
30
+ - [Groq](#groq)
30
31
  - [Counting Tokens](#counting-tokens)
31
32
  - [Models](#models)
32
33
  - [Chat](#chat)
@@ -34,14 +35,20 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
34
35
  - [Vision](#vision)
35
36
  - [JSON Mode](#json-mode)
36
37
  - [Functions](#functions)
37
- - [Edits](#edits)
38
+ - [Completions](#completions)
38
39
  - [Embeddings](#embeddings)
39
40
  - [Batches](#batches)
40
41
  - [Files](#files)
42
+ - [For fine-tuning purposes](#for-fine-tuning-purposes)
43
+ - [For assistant purposes](#for-assistant-purposes)
41
44
  - [Finetunes](#finetunes)
45
+ - [Vector Stores](#vector-stores)
46
+ - [Vector Store Files](#vector-store-files)
47
+ - [Vector Store File Batches](#vector-store-file-batches)
42
48
  - [Assistants](#assistants)
43
49
  - [Threads and Messages](#threads-and-messages)
44
50
  - [Runs](#runs)
51
+ - [Create and Run](#create-and-run)
45
52
  - [Runs involving function tools](#runs-involving-function-tools)
46
53
  - [Image Generation](#image-generation)
47
54
  - [DALL·E 2](#dalle-2)
@@ -53,7 +60,7 @@ Stream text with GPT-4, transcribe and translate audio with Whisper, or create i
53
60
  - [Translate](#translate)
54
61
  - [Transcribe](#transcribe)
55
62
  - [Speech](#speech)
56
- - [Errors](#errors)
63
+ - [Errors](#errors-1)
57
64
  - [Development](#development)
58
65
  - [Release](#release)
59
66
  - [Contributing](#contributing)
@@ -100,7 +107,10 @@ require "openai"
100
107
  For a quick test you can pass your token directly to a new client:
101
108
 
102
109
  ```ruby
103
- client = OpenAI::Client.new(access_token: "access_token_goes_here")
110
+ client = OpenAI::Client.new(
111
+ access_token: "access_token_goes_here",
112
+ log_errors: true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
113
+ )
104
114
  ```
105
115
 
106
116
  ### With Config
@@ -109,8 +119,9 @@ For a more robust setup, you can configure the gem with your API keys, for examp
109
119
 
110
120
  ```ruby
111
121
  OpenAI.configure do |config|
112
- config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
113
- config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional.
122
+ config.access_token = ENV.fetch("OPENAI_ACCESS_TOKEN")
123
+ config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
124
+ config.log_errors = true # Highly recommended in development, so you can see what errors OpenAI is returning. Not recommended in production because it could leak private data to your logs.
114
125
  end
115
126
  ```
116
127
 
@@ -239,6 +250,27 @@ client.chat(
239
250
  # => Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat?
240
251
  ```
241
252
 
253
+ #### Groq
254
+
255
+ [Groq API Chat](https://console.groq.com/docs/quickstart) is broadly compatible with the OpenAI API, with a [few minor differences](https://console.groq.com/docs/openai). Get an access token from [here](https://console.groq.com/keys), then:
256
+
257
+ ```ruby
258
+ client = OpenAI::Client.new(
259
+ access_token: "groq_access_token_goes_here",
260
+ uri_base: "https://api.groq.com/openai"
261
+ )
262
+
263
+ client.chat(
264
+ parameters: {
265
+ model: "llama3-8b-8192", # Required.
266
+ messages: [{ role: "user", content: "Hello!"}], # Required.
267
+ temperature: 0.7,
268
+ stream: proc do |chunk, _bytesize|
269
+ print chunk.dig("choices", 0, "delta", "content")
270
+ end
271
+ })
272
+ ```
273
+
242
274
  ### Counting Tokens
243
275
 
244
276
  OpenAI parses prompt text into [tokens](https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them), which are words or portions of words. (These tokens are unrelated to your API access_token.) Counting tokens can help you estimate your [costs](https://openai.com/pricing). It can also help you ensure your prompt text size is within the max-token limits of your model's context window, and choose an appropriate [`max_tokens`](https://platform.openai.com/docs/api-reference/chat/create#chat/create-max_tokens) completion parameter so your response will fit as well.
@@ -257,7 +289,7 @@ There are different models that can be used to generate text. For a full list an
257
289
 
258
290
  ```ruby
259
291
  client.models.list
260
- client.models.retrieve(id: "gpt-3.5-turbo")
292
+ client.models.retrieve(id: "gpt-4o")
261
293
  ```
262
294
 
263
295
  ### Chat
@@ -267,7 +299,7 @@ GPT is a model that can be used to generate text in a conversational style. You
267
299
  ```ruby
268
300
  response = client.chat(
269
301
  parameters: {
270
- model: "gpt-3.5-turbo", # Required.
302
+ model: "gpt-4o", # Required.
271
303
  messages: [{ role: "user", content: "Hello!"}], # Required.
272
304
  temperature: 0.7,
273
305
  })
@@ -284,7 +316,7 @@ You can stream from the API in realtime, which can be much faster and used to cr
284
316
  ```ruby
285
317
  client.chat(
286
318
  parameters: {
287
- model: "gpt-3.5-turbo", # Required.
319
+ model: "gpt-4o", # Required.
288
320
  messages: [{ role: "user", content: "Describe a character called Anna!"}], # Required.
289
321
  temperature: 0.7,
290
322
  stream: proc do |chunk, _bytesize|
@@ -320,12 +352,12 @@ puts response.dig("choices", 0, "message", "content")
320
352
 
321
353
  #### JSON Mode
322
354
 
323
- You can set the response_format to ask for responses in JSON (at least for `gpt-3.5-turbo-1106`):
355
+ You can set the response_format to ask for responses in JSON:
324
356
 
325
357
  ```ruby
326
358
  response = client.chat(
327
359
  parameters: {
328
- model: "gpt-3.5-turbo-1106",
360
+ model: "gpt-4o",
329
361
  response_format: { type: "json_object" },
330
362
  messages: [{ role: "user", content: "Hello! Give me some JSON please."}],
331
363
  temperature: 0.7,
@@ -345,7 +377,7 @@ You can stream it as well!
345
377
  ```ruby
346
378
  response = client.chat(
347
379
  parameters: {
348
- model: "gpt-3.5-turbo-1106",
380
+ model: "gpt-4o",
349
381
  messages: [{ role: "user", content: "Can I have some JSON please?"}],
350
382
  response_format: { type: "json_object" },
351
383
  stream: proc do |chunk, _bytesize|
@@ -376,26 +408,29 @@ You can describe and pass in functions and the model will intelligently choose t
376
408
  ```ruby
377
409
 
378
410
  def get_current_weather(location:, unit: "fahrenheit")
379
- # use a weather api to fetch weather
411
+ # Here you could use a weather api to fetch the weather.
412
+ "The weather in #{location} is nice 🌞 #{unit}"
380
413
  end
381
414
 
415
+ messages = [
416
+ {
417
+ "role": "user",
418
+ "content": "What is the weather like in San Francisco?",
419
+ },
420
+ ]
421
+
382
422
  response =
383
423
  client.chat(
384
424
  parameters: {
385
- model: "gpt-3.5-turbo",
386
- messages: [
387
- {
388
- "role": "user",
389
- "content": "What is the weather like in San Francisco?",
390
- },
391
- ],
425
+ model: "gpt-4o",
426
+ messages: messages, # Defined above because we'll use it again
392
427
  tools: [
393
428
  {
394
429
  type: "function",
395
430
  function: {
396
431
  name: "get_current_weather",
397
432
  description: "Get the current weather in a given location",
398
- parameters: {
433
+ parameters: { # Format: https://json-schema.org/understanding-json-schema
399
434
  type: :object,
400
435
  properties: {
401
436
  location: {
@@ -412,31 +447,51 @@ response =
412
447
  },
413
448
  }
414
449
  ],
415
- tool_choice: {
416
- type: "function",
417
- function: {
418
- name: "get_current_weather"
419
- }
420
- }
450
+ tool_choice: "required" # Optional, defaults to "auto"
451
+ # Can also put "none" or specific functions, see docs
421
452
  },
422
453
  )
423
454
 
424
455
  message = response.dig("choices", 0, "message")
425
456
 
426
457
  if message["role"] == "assistant" && message["tool_calls"]
427
- function_name = message.dig("tool_calls", 0, "function", "name")
428
- args =
429
- JSON.parse(
430
- message.dig("tool_calls", 0, "function", "arguments"),
458
+ message["tool_calls"].each do |tool_call|
459
+ tool_call_id = tool_call.dig("id")
460
+ function_name = tool_call.dig("function", "name")
461
+ function_args = JSON.parse(
462
+ tool_call.dig("function", "arguments"),
431
463
  { symbolize_names: true },
432
464
  )
465
+ function_response = case function_name
466
+ when "get_current_weather"
467
+ get_current_weather(**function_args) # => "The weather is nice 🌞"
468
+ else
469
+ # decide how to handle
470
+ end
471
+
472
+ # For a subsequent message with the role "tool", OpenAI requires the preceding message to have a tool_calls argument.
473
+ messages << message
433
474
 
434
- case function_name
435
- when "get_current_weather"
436
- get_current_weather(**args)
475
+ messages << {
476
+ tool_call_id: tool_call_id,
477
+ role: "tool",
478
+ name: function_name,
479
+ content: function_response
480
+ } # Extend the conversation with the results of the functions
437
481
  end
482
+
483
+ second_response = client.chat(
484
+ parameters: {
485
+ model: "gpt-4o",
486
+ messages: messages
487
+ })
488
+
489
+ puts second_response.dig("choices", 0, "message", "content")
490
+
491
+ # At this point, the model has decided to call functions, you've called the functions
492
+ # and provided the response back, and the model has considered this and responded.
438
493
  end
439
- # => "The weather is nice 🌞"
494
+ # => "It looks like the weather is nice and sunny in San Francisco! If you're planning to go out, it should be a pleasant day."
440
495
  ```
441
496
 
442
497
  ### Completions
@@ -446,7 +501,7 @@ Hit the OpenAI API for a completion using other GPT-3 models:
446
501
  ```ruby
447
502
  response = client.completions(
448
503
  parameters: {
449
- model: "gpt-3.5-turbo",
504
+ model: "gpt-4o",
450
505
  prompt: "Once upon a time",
451
506
  max_tokens: 5
452
507
  })
@@ -471,18 +526,16 @@ puts response.dig("data", 0, "embedding")
471
526
  ```
472
527
 
473
528
  ### Batches
474
-
475
- The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently, only the `/v1/chat/completions` endpoint is supported for batches.
529
+ The Batches endpoint allows you to create and manage large batches of API requests to run asynchronously. Currently, the supported endpoints for batches are `/v1/chat/completions` (Chat Completions API) and `/v1/embeddings` (Embeddings API).
476
530
 
477
531
  To use the Batches endpoint, you need to first upload a JSONL file containing the batch requests using the Files endpoint. The file must be uploaded with the purpose set to `batch`. Each line in the JSONL file represents a single request and should have the following format:
478
-
479
532
  ```json
480
533
  {
481
534
  "custom_id": "request-1",
482
535
  "method": "POST",
483
536
  "url": "/v1/chat/completions",
484
537
  "body": {
485
- "model": "gpt-3.5-turbo",
538
+ "model": "gpt-4o",
486
539
  "messages": [
487
540
  { "role": "system", "content": "You are a helpful assistant." },
488
541
  { "role": "user", "content": "What is 2+2?" }
@@ -542,7 +595,7 @@ These files are in JSONL format, with each line representing the output or error
542
595
  "id": "chatcmpl-abc123",
543
596
  "object": "chat.completion",
544
597
  "created": 1677858242,
545
- "model": "gpt-3.5-turbo-0301",
598
+ "model": "gpt-4o",
546
599
  "choices": [
547
600
  {
548
601
  "index": 0,
@@ -559,7 +612,7 @@ These files are in JSONL format, with each line representing the output or error
559
612
  If a request fails with a non-HTTP error, the error object will contain more information about the cause of the failure.
560
613
 
561
614
  ### Files
562
-
615
+ #### For fine-tuning purposes
563
616
  Put your data in a `.jsonl` file like this:
564
617
 
565
618
  ```json
@@ -577,6 +630,24 @@ client.files.content(id: "file-123")
577
630
  client.files.delete(id: "file-123")
578
631
  ```
579
632
 
633
+ #### For assistant purposes
634
+
635
+ You can send a file path:
636
+
637
+ ```ruby
638
+ client.files.upload(parameters: { file: "path/to/file.pdf", purpose: "assistants" })
639
+ ```
640
+
641
+ or a File object
642
+
643
+ ```ruby
644
+ my_file = File.open("path/to/file.pdf", "rb")
645
+ client.files.upload(parameters: { file: my_file, purpose: "assistants" })
646
+ ```
647
+
648
+
649
+ See supported file types on [API documentation](https://platform.openai.com/docs/assistants/tools/file-search/supported-files).
650
+
580
651
  ### Finetunes
581
652
 
582
653
  Upload your fine-tuning data in a `.jsonl` file as above and get its ID:
@@ -592,7 +663,7 @@ You can then use this file ID to create a fine tuning job:
592
663
  response = client.finetunes.create(
593
664
  parameters: {
594
665
  training_file: file_id,
595
- model: "gpt-3.5-turbo"
666
+ model: "gpt-4o"
596
667
  })
597
668
  fine_tune_id = response["id"]
598
669
  ```
@@ -629,6 +700,135 @@ You can also capture the events for a job:
629
700
  client.finetunes.list_events(id: fine_tune_id)
630
701
  ```
631
702
 
703
+ ### Vector Stores
704
+ Vector Store objects give the File Search tool the ability to search your files.
705
+
706
+ You can create a new vector store:
707
+
708
+ ```ruby
709
+ response = client.vector_stores.create(
710
+ parameters: {
711
+ name: "my vector store",
712
+ file_ids: ["file-abc123", "file-def456"]
713
+ }
714
+ )
715
+
716
+ vector_store_id = response["id"]
717
+ ```
718
+
719
+ Given a `vector_store_id` you can `retrieve` the current field values:
720
+
721
+ ```ruby
722
+ client.vector_stores.retrieve(id: vector_store_id)
723
+ ```
724
+
725
+ You can get a `list` of all vector stores currently available under the organization:
726
+
727
+ ```ruby
728
+ client.vector_stores.list
729
+ ```
730
+
731
+ You can modify an existing vector store, except for the `file_ids`:
732
+
733
+ ```ruby
734
+ response = client.vector_stores.modify(
735
+ id: vector_store_id,
736
+ parameters: {
737
+ name: "Modified Test Vector Store",
738
+ }
739
+ )
740
+ ```
741
+
742
+ You can delete vector stores:
743
+
744
+ ```ruby
745
+ client.vector_stores.delete(id: vector_store_id)
746
+ ```
747
+
748
+ ### Vector Store Files
749
+ Vector store files represent files inside a vector store.
750
+
751
+ You can create a new vector store file by attaching a File to a vector store.
752
+
753
+ ```ruby
754
+ response = client.vector_store_files.create(
755
+ vector_store_id: "vector-store-abc123",
756
+ parameters: {
757
+ file_id: "file-abc123"
758
+ }
759
+ )
760
+
761
+ vector_store_file_id = response["id"]
762
+ ```
763
+
764
+ Given a `vector_store_file_id` you can `retrieve` the current field values:
765
+
766
+ ```ruby
767
+ client.vector_store_files.retrieve(
768
+ vector_store_id: "vector-store-abc123",
769
+ id: vector_store_file_id
770
+ )
771
+ ```
772
+
773
+ You can get a `list` of all vector store files currently available under the vector store:
774
+
775
+ ```ruby
776
+ client.vector_store_files.list(vector_store_id: "vector-store-abc123")
777
+ ```
778
+
779
+ You can delete a vector store file:
780
+
781
+ ```ruby
782
+ client.vector_store_files.delete(
783
+ vector_store_id: "vector-store-abc123",
784
+ id: vector_store_file_id
785
+ )
786
+ ```
787
+ Note: This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the delete file endpoint.
788
+
789
+ ### Vector Store File Batches
790
+ Vector store file batches represent operations to add multiple files to a vector store.
791
+
792
+ You can create a new vector store file batch by attaching multiple Files to a vector store.
793
+
794
+ ```ruby
795
+ response = client.vector_store_file_batches.create(
796
+ vector_store_id: "vector-store-abc123",
797
+ parameters: {
798
+ file_ids: ["file-abc123", "file-def456"]
799
+ }
800
+ )
801
+
802
+ file_batch_id = response["id"]
803
+ ```
804
+
805
+ Given a `file_batch_id` you can `retrieve` the current field values:
806
+
807
+ ```ruby
808
+ client.vector_store_file_batches.retrieve(
809
+ vector_store_id: "vector-store-abc123",
810
+ id: file_batch_id
811
+ )
812
+ ```
813
+
814
+ You can get a `list` of all vector store files in a batch currently available under the vector store:
815
+
816
+ ```ruby
817
+ client.vector_store_file_batches.list(
818
+ vector_store_id: "vector-store-abc123",
819
+ id: file_batch_id
820
+ )
821
+ ```
822
+
823
+ You can cancel a vector store file batch (This attempts to cancel the processing of files in this batch as soon as possible):
824
+
825
+ ```ruby
826
+ client.vector_store_file_batches.cancel(
827
+ vector_store_id: "vector-store-abc123",
828
+ id: file_batch_id
829
+ )
830
+ ```
831
+
632
832
  ### Assistants
633
833
 
634
834
  Assistants are stateful actors that can have many conversations and use tools to perform tasks (see [Assistant Overview](https://platform.openai.com/docs/assistants/overview)).
@@ -638,16 +838,23 @@ To create a new assistant:
638
838
  ```ruby
639
839
  response = client.assistants.create(
640
840
  parameters: {
641
- model: "gpt-3.5-turbo-1106", # Retrieve via client.models.list. Assistants need 'gpt-3.5-turbo-1106' or later.
841
+ model: "gpt-4o",
642
842
  name: "OpenAI-Ruby test assistant",
643
843
  description: nil,
644
- instructions: "You are a helpful assistant for coding a OpenAI API client using the OpenAI-Ruby gem.",
844
+ instructions: "You are a Ruby dev bot. When asked a question, write and run Ruby code to answer the question",
645
845
  tools: [
646
- { type: 'retrieval' }, # Allow access to files attached using file_ids
647
- { type: 'code_interpreter' }, # Allow access to Python code interpreter
846
+ { type: "code_interpreter" },
847
+ { type: "file_search" }
648
848
  ],
649
- "file_ids": ["file-123"], # See Files section above for how to upload files
650
- "metadata": { my_internal_version_id: '1.0.0' }
849
+ tool_resources: {
850
+ code_interpreter: {
851
+ file_ids: [] # See Files section above for how to upload files
852
+ },
853
+ file_search: {
854
+ vector_store_ids: [] # See Vector Stores section above for how to add vector stores
855
+ }
856
+ },
857
+ "metadata": { my_internal_version_id: "1.0.0" }
651
858
  })
652
859
  assistant_id = response["id"]
653
860
  ```
@@ -829,11 +1036,7 @@ client.runs.list(thread_id: thread_id, parameters: { order: "asc", limit: 3 })
829
1036
  You can also create a thread and run in one call like this:
830
1037
 
831
1038
  ```ruby
832
- response = client.threads.create_and_run(
833
- parameters: {
834
- model: 'gpt-3.5-turbo',
835
- messages: [{ role: 'user', content: "What's deep learning?"}]
836
- })
1039
+ response = client.runs.create_thread_and_run(parameters: { assistant_id: assistant_id })
837
1040
  run_id = response['id']
838
1041
  thread_id = response['thread_id']
839
1042
  ```
@@ -970,7 +1173,7 @@ response = client.audio.transcribe(
970
1173
  parameters: {
971
1174
  model: "whisper-1",
972
1175
  file: File.open("path_to_file", "rb"),
973
- language: "en" # Optional.
1176
+ language: "en" # Optional
974
1177
  })
975
1178
  puts response["text"]
976
1179
  # => "Transcription of the text"
@@ -985,7 +1188,9 @@ response = client.audio.speech(
985
1188
  parameters: {
986
1189
  model: "tts-1",
987
1190
  input: "This is a speech test!",
988
- voice: "alloy"
1191
+ voice: "alloy",
1192
+ response_format: "mp3", # Optional
1193
+ speed: 1.0 # Optional
989
1194
  }
990
1195
  )
991
1196
  File.binwrite('demo.mp3', response)
@@ -998,7 +1203,7 @@ HTTP errors can be caught like this:
998
1203
 
999
1204
  ```
1000
1205
  begin
1001
- OpenAI::Client.new.models.retrieve(id: "gpt-3.5-turbo")
1206
+ OpenAI::Client.new.models.retrieve(id: "gpt-4o")
1002
1207
  rescue Faraday::Error => e
1003
1208
  raise "Got a Faraday error: #{e}"
1004
1209
  end
@@ -1,7 +1,9 @@
1
1
  module OpenAI
2
2
  class Assistants
3
+ BETA_VERSION = "v2".freeze
4
+
3
5
  def initialize(client:)
4
- @client = client.beta(assistants: "v2")
6
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
7
  end
6
8
 
7
9
  def list
@@ -1,11 +1,11 @@
1
1
  module OpenAI
2
2
  class Batches
3
3
  def initialize(client:)
4
- @client = client.beta(assistants: "v1")
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
5
  end
6
6
 
7
- def list
8
- @client.get(path: "/batches")
7
+ def list(parameters: {})
8
+ @client.get(path: "/batches", parameters: parameters)
9
9
  end
10
10
 
11
11
  def retrieve(id:)
data/lib/openai/client.rb CHANGED
@@ -78,6 +78,18 @@ module OpenAI
78
78
  @run_steps ||= OpenAI::RunSteps.new(client: self)
79
79
  end
80
80
 
81
+ def vector_stores
82
+ @vector_stores ||= OpenAI::VectorStores.new(client: self)
83
+ end
84
+
85
+ def vector_store_files
86
+ @vector_store_files ||= OpenAI::VectorStoreFiles.new(client: self)
87
+ end
88
+
89
+ def vector_store_file_batches
90
+ @vector_store_file_batches ||= OpenAI::VectorStoreFileBatches.new(client: self)
91
+ end
92
+
81
93
  def batches
82
94
  @batches ||= OpenAI::Batches.new(client: self)
83
95
  end
data/lib/openai/files.rb CHANGED
@@ -4,6 +4,7 @@ module OpenAI
4
4
  assistants
5
5
  batch
6
6
  fine-tune
7
+ vision
7
8
  ].freeze
8
9
 
9
10
  def initialize(client:)
@@ -19,7 +19,7 @@ module OpenAI
19
19
  "Content-Type" => "application/json",
20
20
  "Authorization" => "Bearer #{@access_token}",
21
21
  "OpenAI-Organization" => @organization_id
22
- }
22
+ }.compact
23
23
  end
24
24
 
25
25
  def azure_headers
@@ -1,7 +1,7 @@
1
1
  module OpenAI
2
2
  class Messages
3
3
  def initialize(client:)
4
- @client = client.beta(assistants: "v1")
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
5
  end
6
6
 
7
7
  def list(thread_id:, parameters: {})
@@ -1,7 +1,7 @@
1
1
  module OpenAI
2
2
  class RunSteps
3
3
  def initialize(client:)
4
- @client = client.beta(assistants: "v1")
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
5
  end
6
6
 
7
7
  def list(thread_id:, run_id:, parameters: {})
data/lib/openai/runs.rb CHANGED
@@ -1,7 +1,7 @@
1
1
  module OpenAI
2
2
  class Runs
3
3
  def initialize(client:)
4
- @client = client.beta(assistants: "v1")
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
5
  end
6
6
 
7
7
  def list(thread_id:, parameters: {})
@@ -1,7 +1,7 @@
1
1
  module OpenAI
2
2
  class Threads
3
3
  def initialize(client:)
4
- @client = client.beta(assistants: "v1")
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
5
  end
6
6
 
7
7
  def retrieve(id:)
@@ -0,0 +1,29 @@
1
+ module OpenAI
2
+ class VectorStoreFileBatches
3
+ def initialize(client:)
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
+ end
6
+
7
+ def list(vector_store_id:, id:, parameters: {})
8
+ @client.get(
9
+ path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/files",
10
+ parameters: parameters
11
+ )
12
+ end
13
+
14
+ def retrieve(vector_store_id:, id:)
15
+ @client.get(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}")
16
+ end
17
+
18
+ def create(vector_store_id:, parameters: {})
19
+ @client.json_post(
20
+ path: "/vector_stores/#{vector_store_id}/file_batches",
21
+ parameters: parameters
22
+ )
23
+ end
24
+
25
+ def cancel(vector_store_id:, id:)
26
+ @client.post(path: "/vector_stores/#{vector_store_id}/file_batches/#{id}/cancel")
27
+ end
28
+ end
29
+ end
@@ -0,0 +1,23 @@
1
+ module OpenAI
2
+ class VectorStoreFiles
3
+ def initialize(client:)
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
+ end
6
+
7
+ def list(vector_store_id:, parameters: {})
8
+ @client.get(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
9
+ end
10
+
11
+ def retrieve(vector_store_id:, id:)
12
+ @client.get(path: "/vector_stores/#{vector_store_id}/files/#{id}")
13
+ end
14
+
15
+ def create(vector_store_id:, parameters: {})
16
+ @client.json_post(path: "/vector_stores/#{vector_store_id}/files", parameters: parameters)
17
+ end
18
+
19
+ def delete(vector_store_id:, id:)
20
+ @client.delete(path: "/vector_stores/#{vector_store_id}/files/#{id}")
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,27 @@
1
+ module OpenAI
2
+ class VectorStores
3
+ def initialize(client:)
4
+ @client = client.beta(assistants: OpenAI::Assistants::BETA_VERSION)
5
+ end
6
+
7
+ def list(parameters: {})
8
+ @client.get(path: "/vector_stores", parameters: parameters)
9
+ end
10
+
11
+ def retrieve(id:)
12
+ @client.get(path: "/vector_stores/#{id}")
13
+ end
14
+
15
+ def create(parameters: {})
16
+ @client.json_post(path: "/vector_stores", parameters: parameters)
17
+ end
18
+
19
+ def modify(id:, parameters: {})
20
+ @client.json_post(path: "/vector_stores/#{id}", parameters: parameters)
21
+ end
22
+
23
+ def delete(id:)
24
+ @client.delete(path: "/vector_stores/#{id}")
25
+ end
26
+ end
27
+ end
@@ -1,3 +1,3 @@
1
1
  module OpenAI
2
- VERSION = "7.0.0".freeze
2
+ VERSION = "7.1.0".freeze
3
3
  end
data/lib/openai.rb CHANGED
@@ -12,6 +12,9 @@ require_relative "openai/threads"
12
12
  require_relative "openai/messages"
13
13
  require_relative "openai/runs"
14
14
  require_relative "openai/run_steps"
15
+ require_relative "openai/vector_stores"
16
+ require_relative "openai/vector_store_files"
17
+ require_relative "openai/vector_store_file_batches"
15
18
  require_relative "openai/audio"
16
19
  require_relative "openai/version"
17
20
  require_relative "openai/batches"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 7.0.0
4
+ version: 7.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Alex
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-04-27 00:00:00.000000000 Z
11
+ date: 2024-06-10 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: event_stream_parser
@@ -102,6 +102,9 @@ files:
102
102
  - lib/openai/run_steps.rb
103
103
  - lib/openai/runs.rb
104
104
  - lib/openai/threads.rb
105
+ - lib/openai/vector_store_file_batches.rb
106
+ - lib/openai/vector_store_files.rb
107
+ - lib/openai/vector_stores.rb
105
108
  - lib/openai/version.rb
106
109
  - lib/ruby/openai.rb
107
110
  - pull_request_template.md
@@ -130,7 +133,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
130
133
  - !ruby/object:Gem::Version
131
134
  version: '0'
132
135
  requirements: []
133
- rubygems_version: 3.5.9
136
+ rubygems_version: 3.5.11
134
137
  signing_key:
135
138
  specification_version: 4
136
139
  summary: "OpenAI API + Ruby! \U0001F916❤️"