ruby-openai 4.2.0 → 4.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +65 -7
- data/CHANGELOG.md +6 -0
- data/Gemfile.lock +2 -2
- data/README.md +76 -4
- data/lib/openai/client.rb +3 -1
- data/lib/openai/http.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +2 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 666cf605fbc981b59131ef5005ec32fd0eabd604f924d907ae0ee0d707b739bb
|
4
|
+
data.tar.gz: fe938931bad97952bd8829ab1ec336896c12ed247c18bc62f3df116917919498
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 2799064a7bbfc79dc47ea4168b0a7755e53aad89e3f3d55a79defba569592141aca70bdfd86747d1690b4ab72acf0d542844d6dd7f1fc8e235c6e3bf7f911854
|
7
|
+
data.tar.gz: d81441ee5b5a1b4070c438ae02f363b95f133d571971c9876bd1cc239225cbd193955c4ce23efbba0c45af6d2726d06bd6d27b418a2a682edcf9de5c1c701b7f
|
data/.gitignore
CHANGED
@@ -1,16 +1,74 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
1
|
+
### Ruby ###
|
2
|
+
*.gem
|
3
|
+
*.rbc
|
4
|
+
/.config
|
4
5
|
/coverage/
|
5
|
-
/
|
6
|
+
/InstalledFiles
|
6
7
|
/pkg/
|
7
8
|
/spec/reports/
|
9
|
+
/spec/examples.txt
|
10
|
+
/test/tmp/
|
11
|
+
/test/version_tmp/
|
8
12
|
/tmp/
|
13
|
+
/.bundle/
|
14
|
+
/.yardoc
|
15
|
+
/_yardoc/
|
16
|
+
/doc/
|
17
|
+
|
18
|
+
|
19
|
+
# Used by dotenv library to load environment variables.
|
20
|
+
.env
|
21
|
+
|
22
|
+
# Ignore Byebug command history file.
|
23
|
+
.byebug_history
|
24
|
+
|
25
|
+
## Specific to RubyMotion:
|
26
|
+
.dat*
|
27
|
+
.repl_history
|
28
|
+
build/
|
29
|
+
*.bridgesupport
|
30
|
+
build-iPhoneOS/
|
31
|
+
build-iPhoneSimulator/
|
32
|
+
|
33
|
+
## Specific to RubyMotion (use of CocoaPods):
|
34
|
+
#
|
35
|
+
# We recommend against adding the Pods directory to your .gitignore. However
|
36
|
+
# you should judge for yourself, the pros and cons are mentioned at:
|
37
|
+
# https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control
|
38
|
+
# vendor/Pods/
|
39
|
+
|
40
|
+
## Documentation cache and generated files:
|
41
|
+
/.yardoc/
|
42
|
+
/_yardoc/
|
43
|
+
/doc/
|
44
|
+
/rdoc/
|
45
|
+
|
46
|
+
## Environment normalization:
|
47
|
+
/.bundle/
|
48
|
+
/vendor/bundle
|
49
|
+
/lib/bundler/man/
|
50
|
+
|
51
|
+
# for a library or gem, you might want to ignore these files since the code is
|
52
|
+
# intended to run in multiple environments; otherwise, check them in:
|
53
|
+
# Gemfile.lock
|
54
|
+
# .ruby-version
|
55
|
+
# .ruby-gemset
|
56
|
+
|
57
|
+
# unless supporting rvm < 1.11.0 or doing something fancy, ignore this:
|
58
|
+
.rvmrc
|
59
|
+
|
60
|
+
# Used by RuboCop. Remote config files pulled in from inherit_from directive.
|
61
|
+
# .rubocop-https?--*
|
9
62
|
|
10
63
|
# rspec failure tracking
|
11
64
|
.rspec_status
|
12
65
|
|
13
|
-
|
14
|
-
.
|
66
|
+
# IDE
|
67
|
+
.idea
|
68
|
+
.idea/
|
69
|
+
.idea/*
|
70
|
+
.vscode
|
71
|
+
.vs/
|
15
72
|
|
16
|
-
|
73
|
+
# Mac
|
74
|
+
.DS_Store
|
data/CHANGELOG.md
CHANGED
@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
7
7
|
|
8
|
+
## [4.3.0] - 2023-06-20
|
9
|
+
|
10
|
+
### Added
|
11
|
+
|
12
|
+
- Add extra-headers to config to allow setting openai-caching-proxy-worker TTL, Helicone Auth and anything else ya need. Ty to [@deltaguita](https://github.com/deltaguita) and [@marckohlbrugge](https://github.com/marckohlbrugge) for the PR!
|
13
|
+
|
8
14
|
## [4.2.0] - 2023-06-20
|
9
15
|
|
10
16
|
### Added
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
ruby-openai (4.
|
4
|
+
ruby-openai (4.3.0)
|
5
5
|
faraday (>= 1)
|
6
6
|
faraday-multipart (>= 1)
|
7
7
|
|
@@ -16,7 +16,7 @@ GEM
|
|
16
16
|
rexml
|
17
17
|
diff-lcs (1.5.0)
|
18
18
|
dotenv (2.8.1)
|
19
|
-
faraday (2.7.
|
19
|
+
faraday (2.7.10)
|
20
20
|
faraday-net_http (>= 2.0, < 3.1)
|
21
21
|
ruby2_keywords (>= 0.0.4)
|
22
22
|
faraday-multipart (1.0.4)
|
data/README.md
CHANGED
@@ -70,13 +70,18 @@ client = OpenAI::Client.new
|
|
70
70
|
|
71
71
|
#### Custom timeout or base URI
|
72
72
|
|
73
|
-
The default timeout for any request using this library is 120 seconds. You can change that by passing a number of seconds to the `request_timeout` when initializing the client. You can also change the base URI used for all requests, eg. to use observability tools like [Helicone](https://docs.helicone.ai/quickstart/integrate-in-one-line-of-code):
|
73
|
+
The default timeout for any request using this library is 120 seconds. You can change that by passing a number of seconds to the `request_timeout` when initializing the client. You can also change the base URI used for all requests, eg. to use observability tools like [Helicone](https://docs.helicone.ai/quickstart/integrate-in-one-line-of-code), and add arbitrary other headers e.g. for [openai-caching-proxy-worker](https://github.com/6/openai-caching-proxy-worker):
|
74
74
|
|
75
75
|
```ruby
|
76
76
|
client = OpenAI::Client.new(
|
77
77
|
access_token: "access_token_goes_here",
|
78
78
|
uri_base: "https://oai.hconeai.com/",
|
79
|
-
request_timeout: 240
|
79
|
+
request_timeout: 240,
|
80
|
+
extra_headers: {
|
81
|
+
"X-Proxy-TTL" => "43200", # For https://github.com/6/openai-caching-proxy-worker#specifying-a-cache-ttl
|
82
|
+
"X-Proxy-Refresh": "true" # For https://github.com/6/openai-caching-proxy-worker#refreshing-the-cache
|
83
|
+
"Helicone-Auth": "Bearer HELICONE_API_KEY", # For https://docs.helicone.ai/getting-started/integration-method/openai-proxy
|
84
|
+
}
|
80
85
|
)
|
81
86
|
```
|
82
87
|
|
@@ -88,10 +93,15 @@ OpenAI.configure do |config|
|
|
88
93
|
config.organization_id = ENV.fetch("OPENAI_ORGANIZATION_ID") # Optional
|
89
94
|
config.uri_base = "https://oai.hconeai.com/" # Optional
|
90
95
|
config.request_timeout = 240 # Optional
|
96
|
+
config.extra_headers = {
|
97
|
+
"X-Proxy-TTL" => "43200", # For https://github.com/6/openai-caching-proxy-worker#specifying-a-cache-ttl
|
98
|
+
"X-Proxy-Refresh": "true" # For https://github.com/6/openai-caching-proxy-worker#refreshing-the-cache
|
99
|
+
"Helicone-Auth": "Bearer HELICONE_API_KEY", # For https://docs.helicone.ai/getting-started/integration-method/openai-proxy
|
100
|
+
} # Optional
|
91
101
|
end
|
92
102
|
```
|
93
103
|
|
94
|
-
|
104
|
+
#### Azure
|
95
105
|
|
96
106
|
To use the [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) API, you can configure the gem like this:
|
97
107
|
|
@@ -164,6 +174,68 @@ client.chat(
|
|
164
174
|
# => "Anna is a young woman in her mid-twenties, with wavy chestnut hair that falls to her shoulders..."
|
165
175
|
```
|
166
176
|
|
177
|
+
Note: the API docs state that token usage is included in the streamed chat chunk objects, but this doesn't currently appear to be the case. If you need to work out how many tokens are being used while streaming, try [tiktoken_ruby](https://github.com/IAPark/tiktoken_ruby).
|
178
|
+
|
179
|
+
### Functions
|
180
|
+
|
181
|
+
You can describe and pass in functions and the model will intelligently choose to output a JSON object containing arguments to call those them. For example, if you want the model to use your method `get_current_weather` to get the current weather in a given location:
|
182
|
+
|
183
|
+
```ruby
|
184
|
+
def get_current_weather(location:, unit: "fahrenheit")
|
185
|
+
# use a weather api to fetch weather
|
186
|
+
end
|
187
|
+
|
188
|
+
response =
|
189
|
+
client.chat(
|
190
|
+
parameters: {
|
191
|
+
model: "gpt-3.5-turbo-0613",
|
192
|
+
messages: [
|
193
|
+
{
|
194
|
+
"role": "user",
|
195
|
+
"content": "What is the weather like in San Francisco?",
|
196
|
+
},
|
197
|
+
],
|
198
|
+
functions: [
|
199
|
+
{
|
200
|
+
name: "get_current_weather",
|
201
|
+
description: "Get the current weather in a given location",
|
202
|
+
parameters: {
|
203
|
+
type: :object,
|
204
|
+
properties: {
|
205
|
+
location: {
|
206
|
+
type: :string,
|
207
|
+
description: "The city and state, e.g. San Francisco, CA",
|
208
|
+
},
|
209
|
+
unit: {
|
210
|
+
type: "string",
|
211
|
+
enum: %w[celsius fahrenheit],
|
212
|
+
},
|
213
|
+
},
|
214
|
+
required: ["location"],
|
215
|
+
},
|
216
|
+
},
|
217
|
+
],
|
218
|
+
},
|
219
|
+
)
|
220
|
+
|
221
|
+
message = response.dig("choices", 0, "message")
|
222
|
+
|
223
|
+
if message["role"] == "assistant" && message["function_call"]
|
224
|
+
function_name = message.dig("function_call", "name")
|
225
|
+
args =
|
226
|
+
JSON.parse(
|
227
|
+
message.dig("function_call", "arguments"),
|
228
|
+
{ symbolize_names: true },
|
229
|
+
)
|
230
|
+
|
231
|
+
case function_name
|
232
|
+
when "get_current_weather"
|
233
|
+
get_current_weather(**args)
|
234
|
+
end
|
235
|
+
end
|
236
|
+
# => "The weather is nice 🌞"
|
237
|
+
```
|
238
|
+
|
167
239
|
### Completions
|
168
240
|
|
169
241
|
Hit the OpenAI API for a completion using other GPT-3 models:
|
@@ -202,7 +274,7 @@ You can use the embeddings endpoint to get a vector of numbers representing an i
|
|
202
274
|
```ruby
|
203
275
|
response = client.embeddings(
|
204
276
|
parameters: {
|
205
|
-
model: "
|
277
|
+
model: "text-embedding-ada-002",
|
206
278
|
input: "The food was delicious and the waiter..."
|
207
279
|
}
|
208
280
|
)
|
data/lib/openai/client.rb
CHANGED
@@ -2,11 +2,13 @@ module OpenAI
|
|
2
2
|
class Client
|
3
3
|
extend OpenAI::HTTP
|
4
4
|
|
5
|
-
def initialize(access_token: nil, organization_id: nil, uri_base: nil, request_timeout: nil
|
5
|
+
def initialize(access_token: nil, organization_id: nil, uri_base: nil, request_timeout: nil,
|
6
|
+
extra_headers: {})
|
6
7
|
OpenAI.configuration.access_token = access_token if access_token
|
7
8
|
OpenAI.configuration.organization_id = organization_id if organization_id
|
8
9
|
OpenAI.configuration.uri_base = uri_base if uri_base
|
9
10
|
OpenAI.configuration.request_timeout = request_timeout if request_timeout
|
11
|
+
OpenAI.configuration.extra_headers = extra_headers
|
10
12
|
end
|
11
13
|
|
12
14
|
def chat(parameters: {})
|
data/lib/openai/http.rb
CHANGED
@@ -85,7 +85,7 @@ module OpenAI
|
|
85
85
|
"Content-Type" => "application/json",
|
86
86
|
"Authorization" => "Bearer #{OpenAI.configuration.access_token}",
|
87
87
|
"OpenAI-Organization" => OpenAI.configuration.organization_id
|
88
|
-
}
|
88
|
+
}.merge(OpenAI.configuration.extra_headers)
|
89
89
|
end
|
90
90
|
|
91
91
|
def azure_headers
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -15,7 +15,8 @@ module OpenAI
|
|
15
15
|
|
16
16
|
class Configuration
|
17
17
|
attr_writer :access_token
|
18
|
-
attr_accessor :api_type, :api_version, :organization_id, :uri_base, :request_timeout
|
18
|
+
attr_accessor :api_type, :api_version, :organization_id, :uri_base, :request_timeout,
|
19
|
+
:extra_headers
|
19
20
|
|
20
21
|
DEFAULT_API_VERSION = "v1".freeze
|
21
22
|
DEFAULT_URI_BASE = "https://api.openai.com/".freeze
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 4.
|
4
|
+
version: 4.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-08-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: faraday
|