openai.rb 0.0.0 → 0.0.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/main.yml +27 -0
  3. data/.rubocop.yml +18 -0
  4. data/.ruby-version +1 -1
  5. data/Gemfile +9 -5
  6. data/Gemfile.lock +29 -24
  7. data/README.md +401 -0
  8. data/bin/console +9 -4
  9. data/lib/openai/api/cache.rb +137 -0
  10. data/lib/openai/api/client.rb +86 -0
  11. data/lib/openai/api/resource.rb +232 -0
  12. data/lib/openai/api/response.rb +384 -0
  13. data/lib/openai/api.rb +75 -0
  14. data/lib/openai/chat.rb +125 -0
  15. data/lib/openai/tokenizer.rb +50 -0
  16. data/lib/openai/util.rb +47 -0
  17. data/lib/openai/version.rb +1 -1
  18. data/lib/openai.rb +38 -357
  19. data/openai.gemspec +9 -3
  20. data/spec/data/sample_french.mp3 +0 -0
  21. data/spec/data/sample_image.png +0 -0
  22. data/spec/data/sample_image_mask.png +0 -0
  23. data/spec/shared/api_resource_context.rb +22 -0
  24. data/spec/spec_helper.rb +4 -0
  25. data/spec/unit/openai/api/audio_spec.rb +78 -0
  26. data/spec/unit/openai/api/cache_spec.rb +115 -0
  27. data/spec/unit/openai/api/chat_completions_spec.rb +130 -0
  28. data/spec/unit/openai/api/completions_spec.rb +125 -0
  29. data/spec/unit/openai/api/edits_spec.rb +40 -0
  30. data/spec/unit/openai/api/embeddings_spec.rb +45 -0
  31. data/spec/unit/openai/api/files_spec.rb +163 -0
  32. data/spec/unit/openai/api/fine_tunes_spec.rb +322 -0
  33. data/spec/unit/openai/api/images_spec.rb +137 -0
  34. data/spec/unit/openai/api/models_spec.rb +98 -0
  35. data/spec/unit/openai/api/moderations_spec.rb +63 -0
  36. data/spec/unit/openai/api/response_spec.rb +203 -0
  37. data/spec/unit/openai/chat_spec.rb +32 -0
  38. data/spec/unit/openai/tokenizer_spec.rb +45 -0
  39. data/spec/unit/openai_spec.rb +47 -736
  40. metadata +97 -7
  41. data/bin/codegen +0 -371
@@ -1,760 +1,71 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  RSpec.describe OpenAI do
4
- let(:client) { described_class.new('sk-123', http: http) }
5
- let(:http) { class_spy(HTTP) }
6
-
7
- before do
8
- allow(http).to receive(:post).and_return(response)
9
- allow(http).to receive(:get).and_return(response)
10
- allow(http).to receive(:delete).and_return(response)
11
- end
12
-
13
- let(:response) do
14
- instance_double(
15
- HTTP::Response,
16
- status: HTTP::Response::Status.new(200),
17
- body: JSON.dump(response_body)
18
- )
19
- end
20
-
21
- describe '#create_completion' do
22
- let(:response_body) do
23
- {
24
- "id": 'cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7',
25
- "object": 'text_completion',
26
- "created": 1_589_478_378,
27
- "model": 'text-davinci-003',
28
- "choices": [
29
- {
30
- "text": "\n\nThis is indeed a test",
31
- "index": 0,
32
- "logprobs": nil,
33
- "finish_reason": 'length'
34
- }
35
- ],
36
- "usage": {
37
- "prompt_tokens": 5,
38
- "completion_tokens": 7,
39
- "total_tokens": 12
40
- }
41
- }
42
- end
43
-
44
- it 'authenticates requests' do
45
- client.create_completion(model: 'text-davinci-002', prompt: 'Hello, world!')
46
-
47
- expect(http).to have_received(:headers).with(
48
- hash_including(
49
- 'Authorization' => 'Bearer sk-123'
50
- )
51
- )
52
- end
53
-
54
- it 'can create a completion' do
55
- completion = client.create_completion(model: 'text-davinci-002', prompt: 'Hello, world!')
56
-
57
- expect(http)
58
- .to have_received(:post)
59
- .with('https://api.openai.com/v1/completions', hash_including(:json))
60
-
61
- expect(completion.id).to eql('cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7')
62
- expect(completion.model).to eql('text-davinci-003')
63
- expect(completion.choices.first.text).to eql("\n\nThis is indeed a test")
64
- expect(completion.choices.first.index).to eql(0)
65
- expect(completion.choices.first.logprobs).to be_nil
66
- expect(completion.choices.first.finish_reason).to eql('length')
67
- expect(completion.usage.prompt_tokens).to eql(5)
68
- expect(completion.usage.completion_tokens).to eql(7)
69
- expect(completion.usage.total_tokens).to eql(12)
70
- end
71
- end
72
-
73
- describe '#create_chat_completion' do
74
- let(:response_body) do
75
- {
76
- "id": 'chatcmpl-123',
77
- "object": 'chat.completion',
78
- "created": 1_677_652_288,
79
- "choices": [
80
- {
81
- "index": 0,
82
- "message": {
83
- "role": 'assistant',
84
- "content": "\n\nHello there, how may I assist you today?"
85
- },
86
- "finish_reason": 'stop'
87
- }
88
- ],
89
- "usage": {
90
- "prompt_tokens": 9,
91
- "completion_tokens": 12,
92
- "total_tokens": 21
93
- }
94
- }
95
- end
96
-
97
- it 'can create a chat completion' do
98
- messages = [
99
- { "text": 'Hello there!', "user": 'customer' },
100
- { "text": 'Can you help me with my order?', "user": 'customer' },
101
- { "text": 'Sure, what would you like to do?', "user": 'assistant' }
102
- ]
103
- completion = client.create_chat_completion(model: 'text-davinci-002', messages: messages)
104
-
105
- expect(completion.id).to eql('chatcmpl-123')
106
- expect(completion.choices.first.index).to eql(0)
107
- expect(completion.choices.first.message.role).to eql('assistant')
108
- expect(completion.choices.first.message.content).to eql("\n\nHello there, how may I assist you today?")
109
- expect(completion.choices.first.finish_reason).to eql('stop')
110
- expect(completion.usage.prompt_tokens).to eql(9)
111
- expect(completion.usage.completion_tokens).to eql(12)
112
- expect(completion.usage.total_tokens).to eql(21)
113
- end
114
- end
115
-
116
- describe '#create_embedding' do
117
- let(:response_body) do
118
- {
119
- "object": 'list',
120
- "data": [
121
- {
122
- "object": 'embedding',
123
- "embedding": [
124
- 0.0023064255,
125
- -0.009327292,
126
- -0.0028842222
127
- ],
128
- "index": 0
129
- }
130
- ],
131
- "model": 'text-embedding-ada-002',
132
- "usage": {
133
- "prompt_tokens": 8,
134
- "total_tokens": 8
135
- }
136
- }
137
- end
138
-
139
- it 'can create an embedding' do
140
- embedding = client.create_embedding(model: 'text-embedding-ada-002', input: 'Hello, world!')
141
-
142
- expect(http)
143
- .to have_received(:post)
144
- .with('https://api.openai.com/v1/embeddings', hash_including(:json))
145
-
146
- expect(embedding.object).to eql('list')
147
- expect(embedding.data.first.object).to eql('embedding')
148
- expect(embedding.data.first.embedding.length).to eql(3)
149
- expect(embedding.data.first.embedding.first).to eql(0.0023064255)
150
- expect(embedding.data.first.index).to eql(0)
151
- expect(embedding.model).to eql('text-embedding-ada-002')
152
- expect(embedding.usage.prompt_tokens).to eql(8)
153
- expect(embedding.usage.total_tokens).to eql(8)
154
- end
155
- end
156
-
157
- describe '#list_models' do
158
- let(:response_body) do
159
- {
160
- data: [
161
- {
162
- id: 'model-id-0',
163
- object: 'model',
164
- owned_by: 'organization-owner',
165
- permission: [1, 2, 3]
166
- },
167
- {
168
- id: 'model-id-1',
169
- object: 'model',
170
- owned_by: 'organization-owner',
171
- permission: [4, 5, 6]
172
- },
173
- {
174
- id: 'model-id-2',
175
- object: 'model',
176
- owned_by: 'openai',
177
- permission: [7, 8, 9]
178
- }
179
- ],
180
- object: 'list'
181
- }
182
- end
183
-
184
- it 'can list all models' do
185
- models = client.list_models
186
-
187
- expect(http)
188
- .to have_received(:get)
189
- .with('https://api.openai.com/v1/models')
190
-
191
- expect(models.data.length).to eql(3)
192
-
193
- expect(models.data[0].id).to eql('model-id-0')
194
- expect(models.data[0].object).to eql('model')
195
- expect(models.data[0].owned_by).to eql('organization-owner')
196
- expect(models.data[0].permission).to eql([1, 2, 3])
197
-
198
- expect(models.data[1].id).to eql('model-id-1')
199
- expect(models.data[1].object).to eql('model')
200
- expect(models.data[1].owned_by).to eql('organization-owner')
201
- expect(models.data[1].permission).to eql([4, 5, 6])
202
-
203
- expect(models.data[2].id).to eql('model-id-2')
204
- expect(models.data[2].object).to eql('model')
205
- expect(models.data[2].owned_by).to eql('openai')
206
- expect(models.data[2].permission).to eql([7, 8, 9])
207
- end
208
- end
209
-
210
- describe '#get_model' do
211
- let(:response_body) do
212
- {
213
- "id": 'text-davinci-002',
214
- "object": 'model',
215
- "owned_by": 'openai',
216
- "permission": %w[
217
- query
218
- completions
219
- models:read
220
- models:write
221
- engine:read
222
- engine:write
223
- ]
224
- }
225
- end
226
-
227
- it 'can retrieve a model' do
228
- model = client.get_model('text-davinci-002')
229
-
230
- expect(http)
231
- .to have_received(:get)
232
- .with('https://api.openai.com/v1/models/text-davinci-002')
233
-
234
- expect(model.id).to eql('text-davinci-002')
235
- expect(model.object).to eql('model')
236
- expect(model.owned_by).to eql('openai')
237
- expect(model.permission).to eql(%w[
238
- query
239
- completions
240
- models:read
241
- models:write
242
- engine:read
243
- engine:write
244
- ])
245
- end
246
- end
247
-
248
- describe '#create_edit' do
249
- let(:response_body) do
250
- {
251
- "object": 'edit',
252
- "created": 1_589_478_378,
253
- "choices": [
254
- {
255
- "text": 'What day of the week is it?',
256
- "index": 0
257
- }
258
- ],
259
- "usage": {
260
- "prompt_tokens": 25,
261
- "completion_tokens": 32,
262
- "total_tokens": 57
4
+ include_context 'an API Resource'
5
+
6
+ let(:resource) { api.completions }
7
+
8
+ let(:response_body) do
9
+ {
10
+ "id": 'cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7',
11
+ "object": 'text_completion',
12
+ "created": 1_589_478_378,
13
+ "model": 'text-davinci-003',
14
+ "choices": [
15
+ {
16
+ "text": "\n\nThis is indeed a test",
17
+ "index": 0,
18
+ "logprobs": nil,
19
+ "finish_reason": 'length'
263
20
  }
21
+ ],
22
+ "usage": {
23
+ "prompt_tokens": 5,
24
+ "completion_tokens": 7,
25
+ "total_tokens": 12
264
26
  }
265
- end
266
-
267
- it 'can create an edit' do
268
- edit = client.create_edit(model: 'text-davinci-002',
269
- instruction: 'Change "world" to "solar system" in the following text: "Hello, world!"')
270
-
271
- expect(http)
272
- .to have_received(:post)
273
- .with('https://api.openai.com/v1/edits', hash_including(:json))
274
-
275
- expect(edit.object).to eql('edit')
276
- expect(edit.choices.first.text).to eql('What day of the week is it?')
277
- expect(edit.choices.first.index).to eql(0)
278
- expect(edit.usage.prompt_tokens).to eql(25)
279
- expect(edit.usage.completion_tokens).to eql(32)
280
- expect(edit.usage.total_tokens).to eql(57)
281
- end
27
+ }
282
28
  end
283
29
 
284
- describe '#create_image_generation' do
285
- let(:response_body) do
286
- {
287
- created: Time.now.to_i,
288
- data: [
289
- { url: 'https://example.com/image1.png' },
290
- { url: 'https://example.com/image2.png' }
291
- ]
292
- }
293
- end
30
+ it 'authenticates requests' do
31
+ resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
294
32
 
295
- it 'can create an image generation' do
296
- image_generation = client.create_image_generation(prompt: 'a bird in the forest', size: 512)
297
-
298
- expect(http)
299
- .to have_received(:post)
300
- .with(
301
- 'https://api.openai.com/v1/images/generations',
302
- hash_including(
303
- json: hash_including(prompt: 'a bird in the forest', size: 512)
304
- )
305
- )
306
-
307
- expect(image_generation.created).to be_within(1).of(Time.now.to_i)
308
- expect(image_generation.data.map(&:url)).to contain_exactly('https://example.com/image1.png', 'https://example.com/image2.png')
309
- end
310
- end
311
-
312
- describe '#create_file' do
313
- let(:sample_file) { OpenAISpec::SPEC_ROOT.join('data/sample.jsonl') }
314
-
315
- let(:response_body) do
316
- {
317
- "id": 'file-XjGxS3KTG0uNmNOK362iJua3',
318
- "object": 'file',
319
- "bytes": 140,
320
- "created_at": 1_613_779_121,
321
- "filename": 'sample.jsonl',
322
- "purpose": 'fine-tune'
323
- }
324
- end
325
-
326
- it 'can create a file' do
327
- file = client.create_file(
328
- file: sample_file,
329
- purpose: 'fine-tune'
33
+ expect(http).to have_received(:headers).with(
34
+ hash_including(
35
+ 'Authorization' => 'Bearer sk-123'
330
36
  )
331
-
332
- expect(http)
333
- .to have_received(:post)
334
- .with(
335
- 'https://api.openai.com/v1/files',
336
- hash_including(
337
- form: hash_including(
338
- {
339
- file: instance_of(HTTP::FormData::File),
340
- purpose: 'fine-tune'
341
- }
342
- )
343
- )
344
- )
345
-
346
- expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
347
- expect(file.object).to eql('file')
348
- expect(file.bytes).to eql(140)
349
- expect(file.created_at).to eql(1_613_779_121)
350
- expect(file.filename).to eql('sample.jsonl')
351
- expect(file.purpose).to eql('fine-tune')
352
- expect(file.deleted?).to be(nil)
353
- end
354
- end
355
-
356
- describe '#list_files' do
357
- let(:response_body) do
358
- {
359
- "data": [
360
- {
361
- "id": 'file-ccdDZrC3iZVNiQVeEA6Z66wf',
362
- "object": 'file',
363
- "bytes": 175,
364
- "created_at": 1_613_677_385,
365
- "filename": 'train.jsonl',
366
- "purpose": 'search'
367
- },
368
- {
369
- "id": 'file-XjGxS3KTG0uNmNOK362iJua3',
370
- "object": 'file',
371
- "bytes": 140,
372
- "created_at": 1_613_779_121,
373
- "filename": 'puppy.jsonl',
374
- "purpose": 'search'
375
- }
376
- ],
377
- "object": 'list'
378
- }
379
- end
380
-
381
- it 'can get a list of files' do
382
- files = client.list_files
383
-
384
- expect(http)
385
- .to have_received(:get)
386
- .with('https://api.openai.com/v1/files')
387
-
388
- expect(files.data.size).to eql(2)
389
- expect(files.data.first.id).to eql('file-ccdDZrC3iZVNiQVeEA6Z66wf')
390
- expect(files.data.first.object).to eql('file')
391
- expect(files.data.first.bytes).to eql(175)
392
- expect(files.data.first.created_at).to eql(1_613_677_385)
393
- expect(files.data.first.filename).to eql('train.jsonl')
394
- expect(files.data.first.purpose).to eql('search')
395
- expect(files.object).to eql('list')
396
- end
397
- end
398
-
399
- describe '#delete_file' do
400
- let(:response_body) do
401
- {
402
- "id": 'file-XjGxS3KTG0uNmNOK362iJua3',
403
- "object": 'file',
404
- "deleted": true
405
- }
406
- end
407
-
408
- it 'can delete a file' do
409
- file = client.delete_file('file-XjGxS3KTG0uNmNOK362iJua3')
410
-
411
- expect(http)
412
- .to have_received(:delete)
413
- .with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3')
414
-
415
- expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
416
- expect(file.object).to eql('file')
417
- expect(file.deleted?).to be_truthy
418
- end
419
- end
420
-
421
- describe '#get_file' do
422
- let(:response_body) do
423
- {
424
- "id": 'file-XjGxS3KTG0uNmNOK362iJua3',
425
- "object": 'file',
426
- "bytes": 140,
427
- "created_at": 1_613_779_657,
428
- "filename": 'mydata.jsonl',
429
- "purpose": 'fine-tune'
430
- }
431
- end
432
-
433
- it 'can get a file' do
434
- file = client.get_file('file-XjGxS3KTG0uNmNOK362iJua3')
435
-
436
- expect(http)
437
- .to have_received(:get)
438
- .with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3')
439
-
440
- expect(file.id).to eql('file-XjGxS3KTG0uNmNOK362iJua3')
441
- expect(file.object).to eql('file')
442
- expect(file.bytes).to eql(140)
443
- expect(file.created_at).to eql(1_613_779_657)
444
- expect(file.filename).to eql('mydata.jsonl')
445
- expect(file.purpose).to eql('fine-tune')
446
- end
447
- end
448
-
449
- describe '#get_file_content' do
450
- let(:response) do
451
- instance_double(
452
- HTTP::Response,
453
- status: HTTP::Response::Status.new(200),
454
- body: '(raw)'
455
- )
456
- end
457
-
458
- it 'can get a file contents' do
459
- response = client.get_file_content('file-XjGxS3KTG0uNmNOK362iJua3')
460
-
461
- expect(http)
462
- .to have_received(:get)
463
- .with('https://api.openai.com/v1/files/file-XjGxS3KTG0uNmNOK362iJua3/content')
464
-
465
- expect(response).to eql('(raw)')
466
- end
467
- end
468
-
469
- describe '#list_fine_tunes' do
470
- let(:response_body) do
471
- {
472
- "object": 'list',
473
- "data": [
474
- {
475
- "id": 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',
476
- "object": 'fine-tune',
477
- "model": 'curie',
478
- "created_at": 1_614_807_352,
479
- "fine_tuned_model": nil,
480
- "hyperparams": {},
481
- "organization_id": 'org-...',
482
- "result_files": [],
483
- "status": 'pending',
484
- "validation_files": [],
485
- "training_files": [{}],
486
- "updated_at": 1_614_807_352
487
- },
488
- {},
489
- {}
490
- ]
491
- }
492
- end
493
-
494
- it 'can get a list of fine-tunes' do
495
- fine_tunes = client.list_fine_tunes
496
-
497
- expect(http)
498
- .to have_received(:get)
499
- .with('https://api.openai.com/v1/fine-tunes')
500
-
501
- expect(fine_tunes.object).to eql('list')
502
- expect(fine_tunes.data.size).to eql(3)
503
- expect(fine_tunes.data.first.id).to eql('ft-AF1WoRqd3aJAHsqc9NY7iL8F')
504
- expect(fine_tunes.data.first.object).to eql('fine-tune')
505
- expect(fine_tunes.data.first.model).to eql('curie')
506
- expect(fine_tunes.data.first.created_at).to eql(1_614_807_352)
507
- expect(fine_tunes.data.first.fine_tuned_model).to be_nil
508
- expect(fine_tunes.data.first.hyperparams).to eql(
509
- OpenAI::Response::FineTune::Hyperparams.new({})
510
- )
511
- expect(fine_tunes.data.first.organization_id).to eql('org-...')
512
- expect(fine_tunes.data.first.result_files).to eql([])
513
- expect(fine_tunes.data.first.status).to eql('pending')
514
- expect(fine_tunes.data.first.validation_files).to eql([])
515
- expect(fine_tunes.data.first.training_files).to eql(
516
- [
517
- OpenAI::Response::FineTune::File.new({})
518
- ]
519
- )
520
- expect(fine_tunes.data.first.updated_at).to eql(1_614_807_352)
521
- end
522
- end
523
-
524
- describe '#create_fine_tune' do
525
- let(:response_body) do
526
- {
527
- "id": 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',
528
- "object": 'fine-tune',
529
- "model": 'curie',
530
- "created_at": 1_614_807_352,
531
- "events": [
532
- {
533
- "object": 'fine-tune-event',
534
- "created_at": 1_614_807_352,
535
- "level": 'info',
536
- "message": 'Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.'
537
- }
538
- ],
539
- "fine_tuned_model": nil,
540
- "hyperparams": {
541
- "batch_size": 4,
542
- "learning_rate_multiplier": 0.1,
543
- "n_epochs": 4,
544
- "prompt_loss_weight": 0.1
545
- },
546
- "organization_id": 'org-...',
547
- "result_files": [],
548
- "status": 'pending',
549
- "validation_files": [],
550
- "training_files": [
551
- {
552
- "id": 'file-XGinujblHPwGLSztz8cPS8XY',
553
- "object": 'file',
554
- "bytes": 1_547_276,
555
- "created_at": 1_610_062_281,
556
- "filename": 'my-data-train.jsonl',
557
- "purpose": 'fine-tune-train'
558
- }
559
- ],
560
- "updated_at": 1_614_807_352
561
- }
562
- end
563
-
564
- it 'can create a fine-tune' do
565
- fine_tune = client.create_fine_tune(training_file: 'my-data-train.jsonl', model: 'curie')
566
-
567
- expect(http)
568
- .to have_received(:post)
569
- .with('https://api.openai.com/v1/fine-tunes', hash_including(:json))
570
-
571
- expect(fine_tune.id).to eql('ft-AF1WoRqd3aJAHsqc9NY7iL8F')
572
- expect(fine_tune.model).to eql('curie')
573
- expect(fine_tune.created_at).to eql(1_614_807_352)
574
- expect(fine_tune.events.first.object).to eql('fine-tune-event')
575
- expect(fine_tune.events.first.created_at).to eql(1_614_807_352)
576
- expect(fine_tune.events.first.level).to eql('info')
577
- expect(fine_tune.events.first.message).to eql('Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.')
578
- expect(fine_tune.fine_tuned_model).to be_nil
579
- expect(fine_tune.hyperparams.batch_size).to eql(4)
580
- expect(fine_tune.hyperparams.learning_rate_multiplier).to eql(0.1)
581
- expect(fine_tune.hyperparams.n_epochs).to eql(4)
582
- expect(fine_tune.hyperparams.prompt_loss_weight).to eql(0.1)
583
- expect(fine_tune.organization_id).to eql('org-...')
584
- expect(fine_tune.result_files).to be_empty
585
- expect(fine_tune.status).to eql('pending')
586
- expect(fine_tune.validation_files).to be_empty
587
- expect(fine_tune.training_files.first.id).to eql('file-XGinujblHPwGLSztz8cPS8XY')
588
- expect(fine_tune.training_files.first.object).to eql('file')
589
- expect(fine_tune.training_files.first.bytes).to eql(1_547_276)
590
- expect(fine_tune.training_files.first.created_at).to eql(1_610_062_281)
591
- expect(fine_tune.training_files.first.filename).to eql('my-data-train.jsonl')
592
- expect(fine_tune.training_files.first.purpose).to eql('fine-tune-train')
593
- expect(fine_tune.updated_at).to eql(1_614_807_352)
594
- end
595
- end
596
-
597
- describe '#get_fine_tune' do
598
- let(:response_body) do
599
- {
600
- "id": 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',
601
- "object": 'fine-tune',
602
- "model": 'curie',
603
- "created_at": 1_614_807_352,
604
- "events": [
605
- {
606
- "object": 'fine-tune-event',
607
- "created_at": 1_614_807_352,
608
- "level": 'info',
609
- "message": 'Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.'
610
- },
611
- {
612
- "object": 'fine-tune-event',
613
- "created_at": 1_614_807_356,
614
- "level": 'info',
615
- "message": 'Job started.'
616
- },
617
- {
618
- "object": 'fine-tune-event',
619
- "created_at": 1_614_807_861,
620
- "level": 'info',
621
- "message": 'Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20.'
622
- },
623
- {
624
- "object": 'fine-tune-event',
625
- "created_at": 1_614_807_864,
626
- "level": 'info',
627
- "message": 'Uploaded result files: file-QQm6ZpqdNwAaVC3aSz5sWwLT.'
628
- },
629
- {
630
- "object": 'fine-tune-event',
631
- "created_at": 1_614_807_864,
632
- "level": 'info',
633
- "message": 'Job succeeded.'
634
- }
635
- ],
636
- "fine_tuned_model": 'curie:ft-acmeco-2021-03-03-21-44-20',
637
- "hyperparams": {
638
- "batch_size": 4,
639
- "learning_rate_multiplier": 0.1,
640
- "n_epochs": 4,
641
- "prompt_loss_weight": 0.1
642
- },
643
- "organization_id": 'org-...',
644
- "result_files": [
645
- {
646
- "id": 'file-QQm6ZpqdNwAaVC3aSz5sWwLT',
647
- "object": 'file',
648
- "bytes": 81_509,
649
- "created_at": 1_614_807_863,
650
- "filename": 'compiled_results.csv',
651
- "purpose": 'fine-tune-results'
652
- }
653
- ],
654
- "status": 'succeeded',
655
- "validation_files": [],
656
- "training_files": [
657
- {
658
- "id": 'file-XGinujblHPwGLSztz8cPS8XY',
659
- "object": 'file',
660
- "bytes": 1_547_276,
661
- "created_at": 1_610_062_281,
662
- "filename": 'my-data-train.jsonl',
663
- "purpose": 'fine-tune-train'
664
- }
665
- ],
666
- "updated_at": 1_614_807_865
667
- }
668
- end
669
-
670
- it 'can get a fine-tune' do
671
- fine_tune = client.get_fine_tune('ft-AF1WoRqd3aJAHsqc9NY7iL8F')
672
-
673
- expect(http)
674
- .to have_received(:get)
675
- .with('https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F')
676
-
677
- expect(fine_tune.id).to eql('ft-AF1WoRqd3aJAHsqc9NY7iL8F')
678
- expect(fine_tune.model).to eql('curie')
679
- expect(fine_tune.created_at).to eql(1_614_807_352)
680
- expect(fine_tune.events.first.object).to eql('fine-tune-event')
681
- expect(fine_tune.events.first.created_at).to eql(1_614_807_352)
682
- expect(fine_tune.events.first.level).to eql('info')
683
- expect(fine_tune.events.first.message).to eql('Job enqueued. Waiting for jobs ahead to complete. Queue number: 0.')
684
- expect(fine_tune.fine_tuned_model).to eql('curie:ft-acmeco-2021-03-03-21-44-20')
685
- expect(fine_tune.hyperparams.batch_size).to eql(4)
686
- expect(fine_tune.hyperparams.learning_rate_multiplier).to eql(0.1)
687
- expect(fine_tune.hyperparams.n_epochs).to eql(4)
688
- expect(fine_tune.hyperparams.prompt_loss_weight).to eql(0.1)
689
- expect(fine_tune.organization_id).to eql('org-...')
690
- expect(fine_tune.result_files.first.id).to eql('file-QQm6ZpqdNwAaVC3aSz5sWwLT')
691
- expect(fine_tune.result_files.first.object).to eql('file')
692
- expect(fine_tune.result_files.first.bytes).to eql(81_509)
693
- expect(fine_tune.result_files.first.created_at).to eql(1_614_807_863)
694
- expect(fine_tune.result_files.first.filename).to eql('compiled_results.csv')
695
- expect(fine_tune.result_files.first.purpose).to eql('fine-tune-results')
696
- expect(fine_tune.status).to eql('succeeded')
697
- expect(fine_tune.validation_files).to be_empty
698
- expect(fine_tune.training_files.first.id).to eql('file-XGinujblHPwGLSztz8cPS8XY')
699
- expect(fine_tune.training_files.first.object).to eql('file')
700
- expect(fine_tune.training_files.first.bytes).to eql(1_547_276)
701
- expect(fine_tune.training_files.first.created_at).to eql(1_610_062_281)
702
- expect(fine_tune.training_files.first.filename).to eql('my-data-train.jsonl')
703
- expect(fine_tune.training_files.first.purpose).to eql('fine-tune-train')
704
- expect(fine_tune.updated_at).to eql(1_614_807_865)
705
- end
37
+ )
706
38
  end
707
39
 
708
- describe '#cancel_fine_tune' do
40
+ context 'when the request is not 2xx' do
709
41
  let(:response_body) do
710
42
  {
711
- "id": 'ft-xhrpBbvVUzYGo8oUO1FY4nI7',
712
- "status": 'cancelled'
43
+ "error": {
44
+ "message": "You didn't provide an API key.",
45
+ "type": 'invalid_request_error',
46
+ "param": nil,
47
+ "code": nil
48
+ }
713
49
  }
714
50
  end
715
51
 
716
- it 'can cancel a fine-tune' do
717
- fine_tune = client.cancel_fine_tune('ft-xhrpBbvVUzYGo8oUO1FY4nI7')
52
+ let(:response_status_code) { 401 }
718
53
 
719
- expect(http)
720
- .to have_received(:post)
721
- .with('https://api.openai.com/v1/fine-tunes/ft-xhrpBbvVUzYGo8oUO1FY4nI7/cancel', json: {})
54
+ it 'raises an error' do
55
+ expect { resource.create(model: 'text-davinci-002', prompt: 'Hello, world!') }
56
+ .to raise_error(OpenAI::API::Error, <<~ERROR)
57
+ Unexpected response status! Expected 2xx but got: 401 Unauthorized
722
58
 
723
- expect(fine_tune.id).to eql('ft-xhrpBbvVUzYGo8oUO1FY4nI7')
724
- expect(fine_tune.status).to eql('cancelled')
725
- end
726
- end
59
+ Body:
727
60
 
728
- describe '#transcribe_audio' do
729
- let(:sample_audio) { OpenAISpec::SPEC_ROOT.join('data/sample.mp3') }
730
-
731
- let(:response_body) do
732
- {
733
- "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that."
734
- }
61
+ #{response.body}
62
+ ERROR
735
63
  end
736
64
 
737
- it 'can transcribe audio' do
738
- transcription = client.transcribe_audio(
739
- file: sample_audio,
740
- model: 'model-1234'
741
- )
742
-
743
- expect(http)
744
- .to have_received(:post)
745
- .with(
746
- 'https://api.openai.com/v1/audio/transcriptions',
747
- hash_including(
748
- form: hash_including(
749
- {
750
- file: instance_of(HTTP::FormData::File),
751
- model: 'model-1234'
752
- }
753
- )
754
- )
755
- )
756
-
757
- expect(transcription.text).to eql("Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.")
65
+ it 'includes the original HTTP response as an attribute on the error instance' do
66
+ resource.create(model: 'text-davinci-002', prompt: 'Hello, world!')
67
+ rescue OpenAI::API::Error => e
68
+ expect(e.http_response).to be(response)
758
69
  end
759
70
  end
760
71
  end