openai.rb 0.0.0 → 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.ruby-version +1 -1
- data/Gemfile +4 -4
- data/Gemfile.lock +17 -14
- data/README.md +401 -0
- data/bin/codegen +64 -55
- data/bin/console +7 -1
- data/lib/openai/api/cache.rb +137 -0
- data/lib/openai/api/client.rb +86 -0
- data/lib/openai/api/resource.rb +235 -0
- data/lib/openai/api/response.rb +352 -0
- data/lib/openai/api.rb +61 -0
- data/lib/openai/chat.rb +75 -0
- data/lib/openai/tokenizer.rb +50 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +29 -358
- data/openai.gemspec +7 -3
- data/spec/data/sample_french.mp3 +0 -0
- data/spec/data/sample_image.png +0 -0
- data/spec/data/sample_image_mask.png +0 -0
- data/spec/shared/api_resource_context.rb +22 -0
- data/spec/spec_helper.rb +4 -0
- data/spec/unit/openai/api/audio_spec.rb +78 -0
- data/spec/unit/openai/api/cache_spec.rb +115 -0
- data/spec/unit/openai/api/chat_completions_spec.rb +116 -0
- data/spec/unit/openai/api/completions_spec.rb +119 -0
- data/spec/unit/openai/api/edits_spec.rb +40 -0
- data/spec/unit/openai/api/embeddings_spec.rb +45 -0
- data/spec/unit/openai/api/files_spec.rb +163 -0
- data/spec/unit/openai/api/fine_tunes_spec.rb +322 -0
- data/spec/unit/openai/api/images_spec.rb +137 -0
- data/spec/unit/openai/api/models_spec.rb +98 -0
- data/spec/unit/openai/api/moderations_spec.rb +61 -0
- data/spec/unit/openai/api/response_spec.rb +203 -0
- data/spec/unit/openai/tokenizer_spec.rb +45 -0
- data/spec/unit/openai_spec.rb +47 -736
- metadata +83 -2
@@ -0,0 +1,116 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
RSpec.describe OpenAI::API, '#chat_completions' do
|
4
|
+
include_context 'an API Resource'
|
5
|
+
|
6
|
+
let(:resource) { api.chat_completions }
|
7
|
+
let(:response_body) do
|
8
|
+
{
|
9
|
+
"id": 'chatcmpl-123',
|
10
|
+
"object": 'chat.completion',
|
11
|
+
"created": 1_677_652_288,
|
12
|
+
"choices": [
|
13
|
+
{
|
14
|
+
"index": 0,
|
15
|
+
"message": {
|
16
|
+
"role": 'assistant',
|
17
|
+
"content": "\n\nHello there, how may I assist you today?"
|
18
|
+
},
|
19
|
+
"finish_reason": 'stop'
|
20
|
+
}
|
21
|
+
],
|
22
|
+
"usage": {
|
23
|
+
"prompt_tokens": 9,
|
24
|
+
"completion_tokens": 12,
|
25
|
+
"total_tokens": 21
|
26
|
+
}
|
27
|
+
}
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'can create a chat completion' do
|
31
|
+
messages = [
|
32
|
+
{ "text": 'Hello there!', "user": 'customer' },
|
33
|
+
{ "text": 'Can you help me with my order?', "user": 'customer' },
|
34
|
+
{ "text": 'Sure, what would you like to do?', "user": 'assistant' }
|
35
|
+
]
|
36
|
+
completion = resource.create(model: 'text-davinci-002', messages: messages)
|
37
|
+
|
38
|
+
expect(completion.id).to eql('chatcmpl-123')
|
39
|
+
expect(completion.choices.first.index).to eql(0)
|
40
|
+
expect(completion.choices.first.message.role).to eql('assistant')
|
41
|
+
expect(completion.choices.first.message.content).to eql("\n\nHello there, how may I assist you today?")
|
42
|
+
expect(completion.choices.first.finish_reason).to eql('stop')
|
43
|
+
expect(completion.usage.prompt_tokens).to eql(9)
|
44
|
+
expect(completion.usage.completion_tokens).to eql(12)
|
45
|
+
expect(completion.usage.total_tokens).to eql(21)
|
46
|
+
end
|
47
|
+
|
48
|
+
it 'raises when a block is given for a non-streaming request' do
|
49
|
+
expect { resource.create(model: 'text-davinci-002', messages: []) { print 'noop' } }
|
50
|
+
.to raise_error('Non-streaming responses do not support blocks')
|
51
|
+
end
|
52
|
+
|
53
|
+
context 'when streaming is enabled' do
|
54
|
+
let(:response_chunks) do
|
55
|
+
[
|
56
|
+
chunk(role: 'assistant'),
|
57
|
+
chunk(content: 'He'),
|
58
|
+
chunk(content: 'llo,'),
|
59
|
+
chunk(content: ' world'),
|
60
|
+
chunk({ content: '!' }, finish_reason: 'stop')
|
61
|
+
]
|
62
|
+
end
|
63
|
+
|
64
|
+
let(:response) do
|
65
|
+
instance_double(
|
66
|
+
HTTP::Response,
|
67
|
+
status: HTTP::Response::Status.new(response_status_code),
|
68
|
+
body: response_body
|
69
|
+
)
|
70
|
+
end
|
71
|
+
|
72
|
+
let(:response_body) do
|
73
|
+
instance_double(HTTP::Response::Body).tap do |double|
|
74
|
+
allow(double).to receive(:each)
|
75
|
+
.and_yield(response_chunks.first)
|
76
|
+
.and_yield(response_chunks[1])
|
77
|
+
.and_yield(response_chunks[2])
|
78
|
+
.and_yield(response_chunks[3])
|
79
|
+
.and_yield(response_chunks[4])
|
80
|
+
.and_yield('data: [DONE]')
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
before do
|
85
|
+
allow(http).to receive(:persistent).and_yield(http)
|
86
|
+
end
|
87
|
+
|
88
|
+
def chunk(delta, finish_reason: nil)
|
89
|
+
data = {
|
90
|
+
id: 'chatcmpl-6y5rBH2fvMeGqAAH81Wkp8QdqESEx',
|
91
|
+
object: 'chat.completion.chunk',
|
92
|
+
created: 1_679_780_213,
|
93
|
+
model: 'gpt-3.5-turbo-0301',
|
94
|
+
choices: [delta: delta, index: 0, finish_reason: finish_reason]
|
95
|
+
}
|
96
|
+
|
97
|
+
"data: #{JSON.dump(data)}"
|
98
|
+
end
|
99
|
+
|
100
|
+
it 'yields chunks as they are served' do
|
101
|
+
chunks = []
|
102
|
+
resource.create(model: 'text-davinci-002', messages: [], stream: true) do |chunk|
|
103
|
+
chunks << chunk
|
104
|
+
end
|
105
|
+
|
106
|
+
expect(chunks).to all(be_an_instance_of(OpenAI::API::Response::ChatCompletionChunk))
|
107
|
+
texts = chunks.map { |chunk| chunk.choices.first.delta.content }
|
108
|
+
expect(texts.join('')).to eql('Hello, world!')
|
109
|
+
end
|
110
|
+
|
111
|
+
it 'raises when a block is not given' do
|
112
|
+
expect { resource.create(model: 'text-davinci-002', messages: [], stream: true) }
|
113
|
+
.to raise_error('Streaming responses require a block')
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
RSpec.describe OpenAI::API, '#completions' do
|
4
|
+
include_context 'an API Resource'
|
5
|
+
|
6
|
+
let(:resource) { api.completions }
|
7
|
+
|
8
|
+
let(:response_body) do
|
9
|
+
{
|
10
|
+
"id": 'cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7',
|
11
|
+
"object": 'text_completion',
|
12
|
+
"created": 1_589_478_378,
|
13
|
+
"model": 'text-davinci-003',
|
14
|
+
"choices": [
|
15
|
+
{
|
16
|
+
"text": "\n\nThis is indeed a test",
|
17
|
+
"index": 0,
|
18
|
+
"logprobs": nil,
|
19
|
+
"finish_reason": 'length'
|
20
|
+
}
|
21
|
+
],
|
22
|
+
"usage": {
|
23
|
+
"prompt_tokens": 5,
|
24
|
+
"completion_tokens": 7,
|
25
|
+
"total_tokens": 12
|
26
|
+
}
|
27
|
+
}
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'can create a completion' do
|
31
|
+
completion = resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
|
32
|
+
|
33
|
+
expect(http)
|
34
|
+
.to have_received(:post)
|
35
|
+
.with('https://api.openai.com/v1/completions', hash_including(:json))
|
36
|
+
|
37
|
+
expect(completion.id).to eql('cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7')
|
38
|
+
expect(completion.model).to eql('text-davinci-003')
|
39
|
+
expect(completion.choices.first.text).to eql("\n\nThis is indeed a test")
|
40
|
+
expect(completion.choices.first.index).to eql(0)
|
41
|
+
expect(completion.choices.first.logprobs).to be_nil
|
42
|
+
expect(completion.choices.first.finish_reason).to eql('length')
|
43
|
+
expect(completion.usage.prompt_tokens).to eql(5)
|
44
|
+
expect(completion.usage.completion_tokens).to eql(7)
|
45
|
+
expect(completion.usage.total_tokens).to eql(12)
|
46
|
+
end
|
47
|
+
|
48
|
+
it 'raises when a block is given for a non-streaming request' do
|
49
|
+
expect { resource.create(model: 'text-davinci-002', prompt: 'Hello, world!') { print 'noop' } }
|
50
|
+
.to raise_error('Non-streaming responses do not support blocks')
|
51
|
+
end
|
52
|
+
|
53
|
+
context 'when streaming is enabled' do
|
54
|
+
let(:response_chunks) do
|
55
|
+
[
|
56
|
+
chunk('He'),
|
57
|
+
chunk('llo,'),
|
58
|
+
chunk(' world'),
|
59
|
+
chunk('!', finish_reason: 'stop')
|
60
|
+
]
|
61
|
+
end
|
62
|
+
|
63
|
+
let(:response) do
|
64
|
+
instance_double(
|
65
|
+
HTTP::Response,
|
66
|
+
status: HTTP::Response::Status.new(response_status_code),
|
67
|
+
body: response_body
|
68
|
+
)
|
69
|
+
end
|
70
|
+
|
71
|
+
let(:response_body) do
|
72
|
+
instance_double(HTTP::Response::Body).tap do |double|
|
73
|
+
allow(double).to receive(:each)
|
74
|
+
.and_yield(response_chunks.first)
|
75
|
+
.and_yield(response_chunks[1])
|
76
|
+
.and_yield(response_chunks[2])
|
77
|
+
.and_yield(response_chunks[3])
|
78
|
+
.and_yield('data: [DONE]')
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
before do
|
83
|
+
allow(http).to receive(:persistent).and_yield(http)
|
84
|
+
end
|
85
|
+
|
86
|
+
def chunk(text, finish_reason: nil)
|
87
|
+
data = {
|
88
|
+
"id": 'cmpl-6y5B6Ak8wBk2nKsqVtSlFeJAG1dUM',
|
89
|
+
"object": 'text_completion',
|
90
|
+
"created": 1_679_777_604,
|
91
|
+
"choices": [{
|
92
|
+
"text": text,
|
93
|
+
"index": 0,
|
94
|
+
"logprobs": nil,
|
95
|
+
"finish_reason": finish_reason
|
96
|
+
}],
|
97
|
+
"model": 'text-davinci-002'
|
98
|
+
}
|
99
|
+
|
100
|
+
"data: #{JSON.dump(data)}"
|
101
|
+
end
|
102
|
+
|
103
|
+
it 'yields chunks as they are served' do
|
104
|
+
chunks = []
|
105
|
+
resource.create(model: 'text-davinci-002', prompt: 'Hello, world!', stream: true) do |chunk|
|
106
|
+
chunks << chunk
|
107
|
+
end
|
108
|
+
|
109
|
+
expect(chunks).to all(be_an_instance_of(OpenAI::API::Response::Completion))
|
110
|
+
texts = chunks.map { |chunk| chunk.choices.first.text }
|
111
|
+
expect(texts.join('')).to eql('Hello, world!')
|
112
|
+
end
|
113
|
+
|
114
|
+
it 'raises when a block is not given' do
|
115
|
+
expect { resource.create(model: 'text-davinci-002', prompt: 'Hello, world!', stream: true) }
|
116
|
+
.to raise_error('Streaming responses require a block')
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
RSpec.describe OpenAI::API, '#edits' do
|
4
|
+
include_context 'an API Resource'
|
5
|
+
|
6
|
+
let(:resource) { api.edits }
|
7
|
+
let(:response_body) do
|
8
|
+
{
|
9
|
+
"object": 'edit',
|
10
|
+
"created": 1_589_478_378,
|
11
|
+
"choices": [
|
12
|
+
{
|
13
|
+
"text": 'What day of the week is it?',
|
14
|
+
"index": 0
|
15
|
+
}
|
16
|
+
],
|
17
|
+
"usage": {
|
18
|
+
"prompt_tokens": 25,
|
19
|
+
"completion_tokens": 32,
|
20
|
+
"total_tokens": 57
|
21
|
+
}
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
25
|
+
it 'can create an edit' do
|
26
|
+
edit = resource.create(model: 'text-davinci-002',
|
27
|
+
instruction: 'Change "world" to "solar system" in the following text: "Hello, world!"')
|
28
|
+
|
29
|
+
expect(http)
|
30
|
+
.to have_received(:post)
|
31
|
+
.with('https://api.openai.com/v1/edits', hash_including(:json))
|
32
|
+
|
33
|
+
expect(edit.object).to eql('edit')
|
34
|
+
expect(edit.choices.first.text).to eql('What day of the week is it?')
|
35
|
+
expect(edit.choices.first.index).to eql(0)
|
36
|
+
expect(edit.usage.prompt_tokens).to eql(25)
|
37
|
+
expect(edit.usage.completion_tokens).to eql(32)
|
38
|
+
expect(edit.usage.total_tokens).to eql(57)
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
RSpec.describe OpenAI::API, '#embeddings' do
|
4
|
+
include_context 'an API Resource'
|
5
|
+
|
6
|
+
let(:resource) { api.embeddings }
|
7
|
+
let(:response_body) do
|
8
|
+
{
|
9
|
+
"object": 'list',
|
10
|
+
"data": [
|
11
|
+
{
|
12
|
+
"object": 'embedding',
|
13
|
+
"embedding": [
|
14
|
+
0.0023064255,
|
15
|
+
-0.009327292,
|
16
|
+
-0.0028842222
|
17
|
+
],
|
18
|
+
"index": 0
|
19
|
+
}
|
20
|
+
],
|
21
|
+
"model": 'text-embedding-ada-002',
|
22
|
+
"usage": {
|
23
|
+
"prompt_tokens": 8,
|
24
|
+
"total_tokens": 8
|
25
|
+
}
|
26
|
+
}
|
27
|
+
end
|
28
|
+
|
29
|
+
it 'can create an embedding' do
|
30
|
+
embedding = resource.create(model: 'text-embedding-ada-002', input: 'Hello, world!')
|
31
|
+
|
32
|
+
expect(http)
|
33
|
+
.to have_received(:post)
|
34
|
+
.with('https://api.openai.com/v1/embeddings', hash_including(:json))
|
35
|
+
|
36
|
+
expect(embedding.object).to eql('list')
|
37
|
+
expect(embedding.data.first.object).to eql('embedding')
|
38
|
+
expect(embedding.data.first.embedding.length).to eql(3)
|
39
|
+
expect(embedding.data.first.embedding.first).to eql(0.0023064255)
|
40
|
+
expect(embedding.data.first.index).to eql(0)
|
41
|
+
expect(embedding.model).to eql('text-embedding-ada-002')
|
42
|
+
expect(embedding.usage.prompt_tokens).to eql(8)
|
43
|
+
expect(embedding.usage.total_tokens).to eql(8)
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,163 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
RSpec.describe OpenAI::API, '#files' do
|
4
|
+
include_context 'an API Resource'
|
5
|
+
|
6
|
+
let(:resource) { api.files }
|
7
|
+
let(:sample_file) { OpenAISpec::SPEC_ROOT.join('data/sample.jsonl') }
|
8
|
+
|
9
|
+
context 'when creating a file' do
|
10
|
+
let(:response_body) do
|
11
|
+
{
|
12
|
+
"id": 'file-XjGxS3KTG0uNmNOK362iJua3',
|
13
|
+
"object": 'file',
|
14
|
+
"bytes": 140,
|
15
|
+
"created_at": 1_613_779_121,
|
16
|
+
"filename": 'sample.jsonl',
|
17
|
+
"purpose": 'fine-tune'
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
it 'can create a file' do
|
22
|
+
file = resource.create(
|
23
|
+
file: sample_file,
|
24
|
+
purpose: 'fine-tune'
|
25
|
+
)
|
26
|
+
|
27
|
+
expect(http)
|
28
|
+
.to have_received(:post)
|
29
|
+
.with(
|
30
|
+
'https://api.openai.com/v1/files',
|
31
|
+
hash_including(
|
32
|
+
form: hash_including(
|
33
|
+
{
|
34
|
+
file: instance_of(HTTP::FormData::File),
|
35
|
+
purpose: 'fine-tune'
|
36
|
+
}
|
37
|
+
)
|
38
|
+
)
|
39
|
+
)
|
40
|
+
|
41
|
+
expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
|
42
|
+
expect(file.object).to eql('file')
|
43
|
+
expect(file.bytes).to eql(140)
|
44
|
+
expect(file.created_at).to eql(1_613_779_121)
|
45
|
+
expect(file.filename).to eql('sample.jsonl')
|
46
|
+
expect(file.purpose).to eql('fine-tune')
|
47
|
+
expect(file.deleted?).to be(nil)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
context 'when listing a file' do
|
52
|
+
let(:response_body) do
|
53
|
+
{
|
54
|
+
"data": [
|
55
|
+
{
|
56
|
+
"id": 'file-ccdDZrC3iZVNiQVeEA6Z66wf',
|
57
|
+
"object": 'file',
|
58
|
+
"bytes": 175,
|
59
|
+
"created_at": 1_613_677_385,
|
60
|
+
"filename": 'train.jsonl',
|
61
|
+
"purpose": 'search'
|
62
|
+
},
|
63
|
+
{
|
64
|
+
"id": 'file-XjGxS3KTG0uNmNOK362iJua3',
|
65
|
+
"object": 'file',
|
66
|
+
"bytes": 140,
|
67
|
+
"created_at": 1_613_779_121,
|
68
|
+
"filename": 'puppy.jsonl',
|
69
|
+
"purpose": 'search'
|
70
|
+
}
|
71
|
+
],
|
72
|
+
"object": 'list'
|
73
|
+
}
|
74
|
+
end
|
75
|
+
|
76
|
+
it 'can get a list of files' do
|
77
|
+
files = resource.list
|
78
|
+
|
79
|
+
expect(http)
|
80
|
+
.to have_received(:get)
|
81
|
+
.with('https://api.openai.com/v1/files')
|
82
|
+
|
83
|
+
expect(files.data.size).to eql(2)
|
84
|
+
expect(files.data.first.id).to eql('file-ccdDZrC3iZVNiQVeEA6Z66wf')
|
85
|
+
expect(files.data.first.object).to eql('file')
|
86
|
+
expect(files.data.first.bytes).to eql(175)
|
87
|
+
expect(files.data.first.created_at).to eql(1_613_677_385)
|
88
|
+
expect(files.data.first.filename).to eql('train.jsonl')
|
89
|
+
expect(files.data.first.purpose).to eql('search')
|
90
|
+
expect(files.object).to eql('list')
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
context 'when deleting a file' do
|
95
|
+
let(:response_body) do
|
96
|
+
{
|
97
|
+
"id": 'file-XjGxS3KTG0uNmNOK362iJua3',
|
98
|
+
"object": 'file',
|
99
|
+
"deleted": true
|
100
|
+
}
|
101
|
+
end
|
102
|
+
|
103
|
+
it 'can delete a file' do
|
104
|
+
file = resource.delete('file-XjGxS3KTG0uNmNOK362iJua3')
|
105
|
+
|
106
|
+
expect(http)
|
107
|
+
.to have_received(:delete)
|
108
|
+
.with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3')
|
109
|
+
|
110
|
+
expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
|
111
|
+
expect(file.object).to eql('file')
|
112
|
+
expect(file.deleted?).to be_truthy
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
context 'when fetching a file' do
|
117
|
+
let(:response_body) do
|
118
|
+
{
|
119
|
+
"id": 'file-XjGxS3KTG0uNmNOK362iJua3',
|
120
|
+
"object": 'file',
|
121
|
+
"bytes": 140,
|
122
|
+
"created_at": 1_613_779_657,
|
123
|
+
"filename": 'mydata.jsonl',
|
124
|
+
"purpose": 'fine-tune'
|
125
|
+
}
|
126
|
+
end
|
127
|
+
|
128
|
+
it 'can get a file' do
|
129
|
+
file = resource.fetch('file-XjGxS3KTG0uNmNOK362iJua3')
|
130
|
+
|
131
|
+
expect(http)
|
132
|
+
.to have_received(:get)
|
133
|
+
.with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3')
|
134
|
+
|
135
|
+
expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
|
136
|
+
expect(file.object).to eql('file')
|
137
|
+
expect(file.bytes).to eql(140)
|
138
|
+
expect(file.created_at).to eql(1_613_779_657)
|
139
|
+
expect(file.filename).to eql('mydata.jsonl')
|
140
|
+
expect(file.purpose).to eql('fine-tune')
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
context 'when fetching a file contents' do
|
145
|
+
let(:response) do
|
146
|
+
instance_double(
|
147
|
+
HTTP::Response,
|
148
|
+
status: HTTP::Response::Status.new(200),
|
149
|
+
body: '(raw)'
|
150
|
+
)
|
151
|
+
end
|
152
|
+
|
153
|
+
it 'can get a file contents' do
|
154
|
+
response = resource.get_content('file-XjGxS3KTG0uNmNOK362iJua3')
|
155
|
+
|
156
|
+
expect(http)
|
157
|
+
.to have_received(:get)
|
158
|
+
.with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content')
|
159
|
+
|
160
|
+
expect(response).to eql('(raw)')
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|