openai 4.22.0 → 4.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/README.md +27 -19
  3. package/index.d.mts +1 -0
  4. package/index.d.ts +1 -0
  5. package/index.d.ts.map +1 -1
  6. package/index.js.map +1 -1
  7. package/index.mjs.map +1 -1
  8. package/lib/ChatCompletionRunFunctions.test.js +15 -0
  9. package/lib/ChatCompletionRunFunctions.test.js.map +1 -1
  10. package/lib/ChatCompletionRunFunctions.test.mjs +15 -0
  11. package/lib/ChatCompletionRunFunctions.test.mjs.map +1 -1
  12. package/lib/ChatCompletionStream.d.ts +4 -0
  13. package/lib/ChatCompletionStream.d.ts.map +1 -1
  14. package/lib/ChatCompletionStream.js +23 -8
  15. package/lib/ChatCompletionStream.js.map +1 -1
  16. package/lib/ChatCompletionStream.mjs +23 -8
  17. package/lib/ChatCompletionStream.mjs.map +1 -1
  18. package/package.json +1 -1
  19. package/resources/beta/threads/runs/steps.d.ts +2 -2
  20. package/resources/chat/chat.d.ts +1 -0
  21. package/resources/chat/chat.d.ts.map +1 -1
  22. package/resources/chat/chat.js.map +1 -1
  23. package/resources/chat/chat.mjs.map +1 -1
  24. package/resources/chat/completions.d.ts +92 -5
  25. package/resources/chat/completions.d.ts.map +1 -1
  26. package/resources/chat/completions.js.map +1 -1
  27. package/resources/chat/completions.mjs.map +1 -1
  28. package/resources/chat/index.d.ts +1 -1
  29. package/resources/chat/index.d.ts.map +1 -1
  30. package/resources/chat/index.js.map +1 -1
  31. package/resources/chat/index.mjs.map +1 -1
  32. package/resources/completions.d.ts +6 -5
  33. package/resources/completions.d.ts.map +1 -1
  34. package/resources/completions.js.map +1 -1
  35. package/resources/completions.mjs.map +1 -1
  36. package/resources/files.d.ts +2 -1
  37. package/resources/files.d.ts.map +1 -1
  38. package/resources/files.js +2 -1
  39. package/resources/files.js.map +1 -1
  40. package/resources/files.mjs +2 -1
  41. package/resources/files.mjs.map +1 -1
  42. package/src/index.ts +1 -0
  43. package/src/lib/ChatCompletionRunFunctions.test.ts +15 -0
  44. package/src/lib/ChatCompletionStream.ts +25 -5
  45. package/src/resources/beta/threads/runs/steps.ts +2 -2
  46. package/src/resources/chat/chat.ts +1 -0
  47. package/src/resources/chat/completions.ts +105 -5
  48. package/src/resources/chat/index.ts +1 -0
  49. package/src/resources/completions.ts +6 -5
  50. package/src/resources/files.ts +2 -1
  51. package/src/version.ts +1 -1
  52. package/version.d.ts +1 -1
  53. package/version.js +1 -1
  54. package/version.mjs +1 -1
@@ -146,6 +146,7 @@ function* contentChoiceDeltas(
146
146
  yield {
147
147
  index,
148
148
  finish_reason: i === deltas.length - 1 ? 'stop' : null,
149
+ logprobs: null,
149
150
  delta: {
150
151
  role,
151
152
  content: deltas[i] ? `${deltas[i]}${i === deltas.length - 1 ? '' : ' '}` : null,
@@ -593,6 +594,7 @@ describe('resource completions', () => {
593
594
  {
594
595
  index: 0,
595
596
  finish_reason: 'function_call',
597
+ logprobs: null,
596
598
  message: {
597
599
  role: 'assistant',
598
600
  content: null,
@@ -645,6 +647,7 @@ describe('resource completions', () => {
645
647
  {
646
648
  index: 0,
647
649
  finish_reason: 'stop',
650
+ logprobs: null,
648
651
  message: {
649
652
  role: 'assistant',
650
653
  content: `it's raining`,
@@ -716,6 +719,7 @@ describe('resource completions', () => {
716
719
  {
717
720
  index: 0,
718
721
  finish_reason: 'function_call',
722
+ logprobs: null,
719
723
  message: {
720
724
  role: 'assistant',
721
725
  content: null,
@@ -808,6 +812,7 @@ describe('resource completions', () => {
808
812
  {
809
813
  index: 0,
810
814
  finish_reason: 'function_call',
815
+ logprobs: null,
811
816
  message: {
812
817
  role: 'assistant',
813
818
  content: null,
@@ -867,6 +872,7 @@ describe('resource completions', () => {
867
872
  {
868
873
  index: 0,
869
874
  finish_reason: 'stop',
875
+ logprobs: null,
870
876
  message: {
871
877
  role: 'assistant',
872
878
  content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`,
@@ -953,6 +959,7 @@ describe('resource completions', () => {
953
959
  {
954
960
  index: 0,
955
961
  finish_reason: 'function_call',
962
+ logprobs: null,
956
963
  message: {
957
964
  role: 'assistant',
958
965
  content: null,
@@ -1006,6 +1013,7 @@ describe('resource completions', () => {
1006
1013
  {
1007
1014
  index: 0,
1008
1015
  finish_reason: 'function_call',
1016
+ logprobs: null,
1009
1017
  message: {
1010
1018
  role: 'assistant',
1011
1019
  content: null,
@@ -1078,6 +1086,7 @@ describe('resource completions', () => {
1078
1086
  {
1079
1087
  index: 0,
1080
1088
  finish_reason: 'stop',
1089
+ logprobs: null,
1081
1090
  message: {
1082
1091
  role: 'assistant',
1083
1092
  content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`,
@@ -1164,6 +1173,7 @@ describe('resource completions', () => {
1164
1173
  {
1165
1174
  index: 0,
1166
1175
  finish_reason: 'function_call',
1176
+ logprobs: null,
1167
1177
  message: {
1168
1178
  role: 'assistant',
1169
1179
  content: null,
@@ -1241,6 +1251,7 @@ describe('resource completions', () => {
1241
1251
  {
1242
1252
  index: 0,
1243
1253
  finish_reason: 'function_call',
1254
+ logprobs: null,
1244
1255
  message: {
1245
1256
  role: 'assistant',
1246
1257
  content: null,
@@ -1291,6 +1302,7 @@ describe('resource completions', () => {
1291
1302
  {
1292
1303
  index: 0,
1293
1304
  finish_reason: 'function_call',
1305
+ logprobs: null,
1294
1306
  message: {
1295
1307
  role: 'assistant',
1296
1308
  content: null,
@@ -1360,6 +1372,7 @@ describe('resource completions', () => {
1360
1372
  {
1361
1373
  index: 0,
1362
1374
  finish_reason: 'stop',
1375
+ logprobs: null,
1363
1376
  message: {
1364
1377
  role: 'assistant',
1365
1378
  content: `it's raining`,
@@ -1436,6 +1449,7 @@ describe('resource completions', () => {
1436
1449
  {
1437
1450
  index: 0,
1438
1451
  finish_reason: 'function_call',
1452
+ logprobs: null,
1439
1453
  delta: {
1440
1454
  role: 'assistant',
1441
1455
  content: null,
@@ -2071,6 +2085,7 @@ describe('resource completions', () => {
2071
2085
  {
2072
2086
  index: 0,
2073
2087
  finish_reason: 'function_call',
2088
+ logprobs: null,
2074
2089
  delta: {
2075
2090
  role: 'assistant',
2076
2091
  content: null,
@@ -153,13 +153,22 @@ export class ChatCompletionStream
153
153
  Object.assign(snapshot, rest);
154
154
  }
155
155
 
156
- for (const { delta, finish_reason, index, ...other } of chunk.choices) {
156
+ for (const { delta, finish_reason, index, logprobs = null, ...other } of chunk.choices) {
157
157
  let choice = snapshot.choices[index];
158
158
  if (!choice) {
159
- snapshot.choices[index] = { finish_reason, index, message: delta, ...other };
159
+ snapshot.choices[index] = { finish_reason, index, message: delta, logprobs, ...other };
160
160
  continue;
161
161
  }
162
162
 
163
+ if (logprobs) {
164
+ if (!choice.logprobs) {
165
+ choice.logprobs = logprobs;
166
+ } else if (logprobs.content) {
167
+ choice.logprobs.content ??= [];
168
+ choice.logprobs.content.push(...logprobs.content);
169
+ }
170
+ }
171
+
163
172
  if (finish_reason) choice.finish_reason = finish_reason;
164
173
  Object.assign(choice, other);
165
174
 
@@ -242,7 +251,7 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
242
251
  const { id, choices, created, model } = snapshot;
243
252
  return {
244
253
  id,
245
- choices: choices.map(({ message, finish_reason, index }): ChatCompletion.Choice => {
254
+ choices: choices.map(({ message, finish_reason, index, logprobs }): ChatCompletion.Choice => {
246
255
  if (!finish_reason) throw new OpenAIError(`missing finish_reason for choice ${index}`);
247
256
  const { content = null, function_call, tool_calls } = message;
248
257
  const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine.
@@ -251,12 +260,18 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
251
260
  const { arguments: args, name } = function_call;
252
261
  if (args == null) throw new OpenAIError(`missing function_call.arguments for choice ${index}`);
253
262
  if (!name) throw new OpenAIError(`missing function_call.name for choice ${index}`);
254
- return { message: { content, function_call: { arguments: args, name }, role }, finish_reason, index };
263
+ return {
264
+ message: { content, function_call: { arguments: args, name }, role },
265
+ finish_reason,
266
+ index,
267
+ logprobs,
268
+ };
255
269
  }
256
270
  if (tool_calls) {
257
271
  return {
258
272
  index,
259
273
  finish_reason,
274
+ logprobs,
260
275
  message: {
261
276
  role,
262
277
  content,
@@ -281,7 +296,7 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio
281
296
  },
282
297
  };
283
298
  }
284
- return { message: { content: content, role }, finish_reason, index };
299
+ return { message: { content: content, role }, finish_reason, index, logprobs };
285
300
  }),
286
301
  created,
287
302
  model,
@@ -336,6 +351,11 @@ export namespace ChatCompletionSnapshot {
336
351
  */
337
352
  finish_reason: ChatCompletion.Choice['finish_reason'] | null;
338
353
 
354
+ /**
355
+ * Log probability information for the choice.
356
+ */
357
+ logprobs: ChatCompletion.Choice.Logprobs | null;
358
+
339
359
  /**
340
360
  * The index of the choice in the list of choices.
341
361
  */
@@ -180,7 +180,7 @@ export interface MessageCreationStepDetails {
180
180
  message_creation: MessageCreationStepDetails.MessageCreation;
181
181
 
182
182
  /**
183
- * Always `message_creation``.
183
+ * Always `message_creation`.
184
184
  */
185
185
  type: 'message_creation';
186
186
  }
@@ -269,7 +269,7 @@ export interface RunStep {
269
269
  metadata: unknown | null;
270
270
 
271
271
  /**
272
- * The object type, which is always `thread.run.step``.
272
+ * The object type, which is always `thread.run.step`.
273
273
  */
274
274
  object: 'thread.run.step';
275
275
 
@@ -23,6 +23,7 @@ export namespace Chat {
23
23
  export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice;
24
24
  export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole;
25
25
  export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam;
26
+ export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob;
26
27
  export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool;
27
28
  export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption;
28
29
  export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam;
@@ -96,11 +96,28 @@ export namespace ChatCompletion {
96
96
  */
97
97
  index: number;
98
98
 
99
+ /**
100
+ * Log probability information for the choice.
101
+ */
102
+ logprobs: Choice.Logprobs | null;
103
+
99
104
  /**
100
105
  * A chat completion message generated by the model.
101
106
  */
102
107
  message: ChatCompletionsAPI.ChatCompletionMessage;
103
108
  }
109
+
110
+ export namespace Choice {
111
+ /**
112
+ * Log probability information for the choice.
113
+ */
114
+ export interface Logprobs {
115
+ /**
116
+ * A list of message content tokens with log probability information.
117
+ */
118
+ content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
119
+ }
120
+ }
104
121
  }
105
122
 
106
123
  export interface ChatCompletionAssistantMessageParam {
@@ -215,6 +232,11 @@ export namespace ChatCompletionChunk {
215
232
  * The index of the choice in the list of choices.
216
233
  */
217
234
  index: number;
235
+
236
+ /**
237
+ * Log probability information for the choice.
238
+ */
239
+ logprobs?: Choice.Logprobs | null;
218
240
  }
219
241
 
220
242
  export namespace Choice {
@@ -294,6 +316,16 @@ export namespace ChatCompletionChunk {
294
316
  }
295
317
  }
296
318
  }
319
+
320
+ /**
321
+ * Log probability information for the choice.
322
+ */
323
+ export interface Logprobs {
324
+ /**
325
+ * A list of message content tokens with log probability information.
326
+ */
327
+ content: Array<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
328
+ }
297
329
  }
298
330
  }
299
331
 
@@ -350,7 +382,7 @@ export interface ChatCompletionFunctionMessageParam {
350
382
  /**
351
383
  * The contents of the function message.
352
384
  */
353
- content: string;
385
+ content: string | null;
354
386
 
355
387
  /**
356
388
  * The name of the function to call.
@@ -499,6 +531,55 @@ export interface ChatCompletionSystemMessageParam {
499
531
  name?: string;
500
532
  }
501
533
 
534
+ export interface ChatCompletionTokenLogprob {
535
+ /**
536
+ * The token.
537
+ */
538
+ token: string;
539
+
540
+ /**
541
+ * A list of integers representing the UTF-8 bytes representation of the token.
542
+ * Useful in instances where characters are represented by multiple tokens and
543
+ * their byte representations must be combined to generate the correct text
544
+ * representation. Can be `null` if there is no bytes representation for the token.
545
+ */
546
+ bytes: Array<number> | null;
547
+
548
+ /**
549
+ * The log probability of this token.
550
+ */
551
+ logprob: number;
552
+
553
+ /**
554
+ * List of the most likely tokens and their log probability, at this token
555
+ * position. In rare cases, there may be fewer than the number of requested
556
+ * `top_logprobs` returned.
557
+ */
558
+ top_logprobs: Array<ChatCompletionTokenLogprob.TopLogprob>;
559
+ }
560
+
561
+ export namespace ChatCompletionTokenLogprob {
562
+ export interface TopLogprob {
563
+ /**
564
+ * The token.
565
+ */
566
+ token: string;
567
+
568
+ /**
569
+ * A list of integers representing the UTF-8 bytes representation of the token.
570
+ * Useful in instances where characters are represented by multiple tokens and
571
+ * their byte representations must be combined to generate the correct text
572
+ * representation. Can be `null` if there is no bytes representation for the token.
573
+ */
574
+ bytes: Array<number> | null;
575
+
576
+ /**
577
+ * The log probability of this token.
578
+ */
579
+ logprob: number;
580
+ }
581
+ }
582
+
502
583
  export interface ChatCompletionTool {
503
584
  function: Shared.FunctionDefinition;
504
585
 
@@ -612,7 +693,7 @@ export interface ChatCompletionCreateParamsBase {
612
693
  * particular function via `{"name": "my_function"}` forces the model to call that
613
694
  * function.
614
695
  *
615
- * `none` is the default when no functions are present. `auto`` is the default if
696
+ * `none` is the default when no functions are present. `auto` is the default if
616
697
  * functions are present.
617
698
  */
618
699
  function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption;
@@ -637,7 +718,16 @@ export interface ChatCompletionCreateParamsBase {
637
718
  logit_bias?: Record<string, number> | null;
638
719
 
639
720
  /**
640
- * The maximum number of [tokens](/tokenizer) to generate in the chat completion.
721
+ * Whether to return log probabilities of the output tokens or not. If true,
722
+ * returns the log probabilities of each output token returned in the `content` of
723
+ * `message`. This option is currently not available on the `gpt-4-vision-preview`
724
+ * model.
725
+ */
726
+ logprobs?: boolean | null;
727
+
728
+ /**
729
+ * The maximum number of [tokens](/tokenizer) that can be generated in the chat
730
+ * completion.
641
731
  *
642
732
  * The total length of input tokens and generated tokens is limited by the model's
643
733
  * context length.
@@ -663,7 +753,8 @@ export interface ChatCompletionCreateParamsBase {
663
753
  presence_penalty?: number | null;
664
754
 
665
755
  /**
666
- * An object specifying the format that the model must output.
756
+ * An object specifying the format that the model must output. Compatible with
757
+ * `gpt-4-1106-preview` and `gpt-3.5-turbo-1106`.
667
758
  *
668
759
  * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
669
760
  * message the model generates is valid JSON.
@@ -731,6 +822,13 @@ export interface ChatCompletionCreateParamsBase {
731
822
  */
732
823
  tools?: Array<ChatCompletionTool>;
733
824
 
825
+ /**
826
+ * An integer between 0 and 5 specifying the number of most likely tokens to return
827
+ * at each token position, each with an associated log probability. `logprobs` must
828
+ * be set to `true` if this parameter is used.
829
+ */
830
+ top_logprobs?: number | null;
831
+
734
832
  /**
735
833
  * An alternative to sampling with temperature, called nucleus sampling, where the
736
834
  * model considers the results of the tokens with top_p probability mass. So 0.1
@@ -775,7 +873,8 @@ export namespace ChatCompletionCreateParams {
775
873
  }
776
874
 
777
875
  /**
778
- * An object specifying the format that the model must output.
876
+ * An object specifying the format that the model must output. Compatible with
877
+ * `gpt-4-1106-preview` and `gpt-3.5-turbo-1106`.
779
878
  *
780
879
  * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
781
880
  * message the model generates is valid JSON.
@@ -854,6 +953,7 @@ export namespace Completions {
854
953
  export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice;
855
954
  export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole;
856
955
  export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam;
956
+ export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob;
857
957
  export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool;
858
958
  export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption;
859
959
  export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam;
@@ -16,6 +16,7 @@ export {
16
16
  ChatCompletionNamedToolChoice,
17
17
  ChatCompletionRole,
18
18
  ChatCompletionSystemMessageParam,
19
+ ChatCompletionTokenLogprob,
19
20
  ChatCompletionTool,
20
21
  ChatCompletionToolChoiceOption,
21
22
  ChatCompletionToolMessageParam,
@@ -199,17 +199,18 @@ export interface CompletionCreateParamsBase {
199
199
  logit_bias?: Record<string, number> | null;
200
200
 
201
201
  /**
202
- * Include the log probabilities on the `logprobs` most likely tokens, as well the
203
- * chosen tokens. For example, if `logprobs` is 5, the API will return a list of
204
- * the 5 most likely tokens. The API will always return the `logprob` of the
205
- * sampled token, so there may be up to `logprobs+1` elements in the response.
202
+ * Include the log probabilities on the `logprobs` most likely output tokens, as
203
+ * well the chosen tokens. For example, if `logprobs` is 5, the API will return a
204
+ * list of the 5 most likely tokens. The API will always return the `logprob` of
205
+ * the sampled token, so there may be up to `logprobs+1` elements in the response.
206
206
  *
207
207
  * The maximum value for `logprobs` is 5.
208
208
  */
209
209
  logprobs?: number | null;
210
210
 
211
211
  /**
212
- * The maximum number of [tokens](/tokenizer) to generate in the completion.
212
+ * The maximum number of [tokens](/tokenizer) that can be generated in the
213
+ * completion.
213
214
  *
214
215
  * The token count of your prompt plus `max_tokens` cannot exceed the model's
215
216
  * context length.
@@ -15,7 +15,8 @@ export class Files extends APIResource {
15
15
  * Upload a file that can be used across various endpoints. The size of all the
16
16
  * files uploaded by one organization can be up to 100 GB.
17
17
  *
18
- * The size of individual files can be a maximum of 512 MB. See the
18
+ * The size of individual files can be a maximum of 512 MB or 2 million tokens for
19
+ * Assistants. See the
19
20
  * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
20
21
  * learn more about the types of files supported. The Fine-tuning API only supports
21
22
  * `.jsonl` files.
package/src/version.ts CHANGED
@@ -1 +1 @@
1
- export const VERSION = '4.22.0'; // x-release-please-version
1
+ export const VERSION = '4.23.0'; // x-release-please-version
package/version.d.ts CHANGED
@@ -1,2 +1,2 @@
1
- export declare const VERSION = "4.22.0";
1
+ export declare const VERSION = "4.23.0";
2
2
  //# sourceMappingURL=version.d.ts.map
package/version.js CHANGED
@@ -1,5 +1,5 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.VERSION = void 0;
4
- exports.VERSION = '4.22.0'; // x-release-please-version
4
+ exports.VERSION = '4.23.0'; // x-release-please-version
5
5
  //# sourceMappingURL=version.js.map
package/version.mjs CHANGED
@@ -1,2 +1,2 @@
1
- export const VERSION = '4.22.0'; // x-release-please-version
1
+ export const VERSION = '4.23.0'; // x-release-please-version
2
2
  //# sourceMappingURL=version.mjs.map