ai 2.1.33 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,19 +23,18 @@ With the Vercel AI SDK, you can build a ChatGPT-like app in just a few lines of
23
23
 
24
24
  ```tsx
25
25
  // ./app/api/chat/route.js
26
- import { Configuration, OpenAIApi } from 'openai-edge'
26
+ import OpenAI from 'openai'
27
27
  import { OpenAIStream, StreamingTextResponse } from 'ai'
28
28
 
29
- const config = new Configuration({
29
+ const openai = new OpenAI({
30
30
  apiKey: process.env.OPENAI_API_KEY
31
31
  })
32
- const openai = new OpenAIApi(config)
33
32
 
34
33
  export const runtime = 'edge'
35
34
 
36
35
  export async function POST(req) {
37
36
  const { messages } = await req.json()
38
- const response = await openai.createChatCompletion({
37
+ const response = await openai.chat.completions.create({
39
38
  model: 'gpt-4',
40
39
  stream: true,
41
40
  messages
@@ -76,7 +75,7 @@ export default function Chat() {
76
75
 
77
76
  ---
78
77
 
79
- View the full documentation and examples on [sdk.vercel.ai/docs](https://sdk.vercel.ai/docs)
78
+ View the full documentation and examples on [sdk.vercel.ai/docs](https://sdk.vercel.ai/docs).
80
79
 
81
80
  ## Authors
82
81
 
@@ -88,4 +87,4 @@ This library is created by [Vercel](https://vercel.com) and [Next.js](https://ne
88
87
  - Malte Ubl ([@cramforce](https://twitter.com/cramforce)) - [Vercel](https://vercel.com)
89
88
  - Justin Ridgewell ([@jridgewell](https://github.com/jridgewell)) - [Vercel](https://vercel.com)
90
89
 
91
- [Contributors](https://github.com/vercel-labs/ai/graphs/contributors)
90
+ [Contributors](https://github.com/vercel/ai/graphs/contributors)
package/dist/index.d.ts CHANGED
@@ -1,5 +1,4 @@
1
- import { ChatCompletionRequestMessageFunctionCall, CreateChatCompletionRequestFunctionCall } from 'openai-edge';
2
- import { ChatCompletionFunctions } from 'openai-edge/types/api';
1
+ import { ChatCompletionMessage, CompletionCreateParams, CreateChatCompletionRequestMessage } from 'openai/resources/chat';
3
2
  import { ServerResponse } from 'node:http';
4
3
  import { Prediction } from 'replicate';
5
4
 
@@ -109,7 +108,7 @@ type Message = {
109
108
  * contains the function call name and arguments. Otherwise, the field should
110
109
  * not be set.
111
110
  */
112
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
111
+ function_call?: string | ChatCompletionMessage.FunctionCall;
113
112
  };
114
113
  type CreateMessage = Omit<Message, 'id'> & {
115
114
  id?: Message['id'];
@@ -117,18 +116,18 @@ type CreateMessage = Omit<Message, 'id'> & {
117
116
  type ChatRequest = {
118
117
  messages: Message[];
119
118
  options?: RequestOptions;
120
- functions?: Array<ChatCompletionFunctions>;
121
- function_call?: CreateChatCompletionRequestFunctionCall;
119
+ functions?: Array<CompletionCreateParams.Function>;
120
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
122
121
  };
123
- type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionRequestMessageFunctionCall) => Promise<ChatRequest | void>;
122
+ type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionMessage.FunctionCall) => Promise<ChatRequest | void>;
124
123
  type RequestOptions = {
125
124
  headers?: Record<string, string> | Headers;
126
125
  body?: object;
127
126
  };
128
127
  type ChatRequestOptions = {
129
128
  options?: RequestOptions;
130
- functions?: Array<ChatCompletionFunctions>;
131
- function_call?: CreateChatCompletionRequestFunctionCall;
129
+ functions?: Array<CompletionCreateParams.Function>;
130
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
132
131
  };
133
132
  type UseChatOptions = {
134
133
  /**
@@ -262,7 +261,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacks & {
262
261
  /**
263
262
  * @example
264
263
  * ```js
265
- * const response = await openai.createChatCompletion({
264
+ * const response = await openai.chat.completions.create({
266
265
  * model: 'gpt-3.5-turbo-0613',
267
266
  * stream: true,
268
267
  * messages,
@@ -275,7 +274,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacks & {
275
274
  * const result = await myFunction(functionCallPayload)
276
275
  *
277
276
  * // Ask for another completion, or return a string to send to the client as an assistant message.
278
- * return await openai.createChatCompletion({
277
+ * return await openai.chat.completions.create({
279
278
  * model: 'gpt-3.5-turbo-0613',
280
279
  * stream: true,
281
280
  * // Append the relevant "assistant" and "function" call messages
@@ -348,7 +347,32 @@ declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStrea
348
347
 
349
348
  declare function CohereStream(reader: Response, callbacks?: AIStreamCallbacks): ReadableStream;
350
349
 
351
- declare function AnthropicStream(res: Response, cb?: AIStreamCallbacks): ReadableStream;
350
+ interface CompletionChunk {
351
+ /**
352
+ * The resulting completion up to and excluding the stop sequences.
353
+ */
354
+ completion: string;
355
+ /**
356
+ * The model that performed the completion.
357
+ */
358
+ model: string;
359
+ /**
360
+ * The reason that we stopped sampling.
361
+ *
362
+ * This may be one the following values:
363
+ *
364
+ * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
365
+ * `stop_sequences` parameter, or a stop sequence built into the model
366
+ * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
367
+ */
368
+ stop_reason: string;
369
+ }
370
+ /**
371
+ * Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
372
+ * or the return value of `await client.completions.create({ stream: true })`
373
+ * from the `@anthropic-ai/sdk` package.
374
+ */
375
+ declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk>, cb?: AIStreamCallbacks): ReadableStream;
352
376
 
353
377
  declare function LangChainStream(callbacks?: AIStreamCallbacks): {
354
378
  stream: ReadableStream<Uint8Array>;
package/dist/index.js CHANGED
@@ -397,14 +397,36 @@ function parseAnthropicStream() {
397
397
  let previous = "";
398
398
  return (data) => {
399
399
  const json = JSON.parse(data);
400
+ if ("error" in json) {
401
+ throw new Error(`${json.error.type}: ${json.error.message}`);
402
+ }
403
+ if (!("completion" in json)) {
404
+ return;
405
+ }
400
406
  const text = json.completion;
401
- const delta = text.slice(previous.length);
402
- previous = text;
403
- return delta;
407
+ if (!previous || text.length > previous.length && text.startsWith(previous)) {
408
+ const delta = text.slice(previous.length);
409
+ previous = text;
410
+ return delta;
411
+ }
412
+ return text;
404
413
  };
405
414
  }
415
+ async function* streamable2(stream) {
416
+ for await (const chunk of stream) {
417
+ const text = chunk.completion;
418
+ if (text)
419
+ yield text;
420
+ }
421
+ }
406
422
  function AnthropicStream(res, cb) {
407
- return AIStream(res, parseAnthropicStream(), cb);
423
+ if (Symbol.asyncIterator in res) {
424
+ return readableFromAsyncIterable(streamable2(res)).pipeThrough(
425
+ createCallbacksTransformer(cb)
426
+ );
427
+ } else {
428
+ return AIStream(res, parseAnthropicStream(), cb);
429
+ }
408
430
  }
409
431
 
410
432
  // streams/langchain-stream.ts
package/dist/index.mjs CHANGED
@@ -359,14 +359,36 @@ function parseAnthropicStream() {
359
359
  let previous = "";
360
360
  return (data) => {
361
361
  const json = JSON.parse(data);
362
+ if ("error" in json) {
363
+ throw new Error(`${json.error.type}: ${json.error.message}`);
364
+ }
365
+ if (!("completion" in json)) {
366
+ return;
367
+ }
362
368
  const text = json.completion;
363
- const delta = text.slice(previous.length);
364
- previous = text;
365
- return delta;
369
+ if (!previous || text.length > previous.length && text.startsWith(previous)) {
370
+ const delta = text.slice(previous.length);
371
+ previous = text;
372
+ return delta;
373
+ }
374
+ return text;
366
375
  };
367
376
  }
377
+ async function* streamable2(stream) {
378
+ for await (const chunk of stream) {
379
+ const text = chunk.completion;
380
+ if (text)
381
+ yield text;
382
+ }
383
+ }
368
384
  function AnthropicStream(res, cb) {
369
- return AIStream(res, parseAnthropicStream(), cb);
385
+ if (Symbol.asyncIterator in res) {
386
+ return readableFromAsyncIterable(streamable2(res)).pipeThrough(
387
+ createCallbacksTransformer(cb)
388
+ );
389
+ } else {
390
+ return AIStream(res, parseAnthropicStream(), cb);
391
+ }
370
392
  }
371
393
 
372
394
  // streams/langchain-stream.ts
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai",
3
- "version": "2.1.33",
3
+ "version": "2.2.0",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -61,6 +61,7 @@
61
61
  "dependencies": {
62
62
  "eventsource-parser": "1.0.0",
63
63
  "nanoid": "3.3.6",
64
+ "openai": "4.0.0",
64
65
  "solid-swr-store": "0.10.7",
65
66
  "sswr": "2.0.0",
66
67
  "swr": "2.2.0",
@@ -75,7 +76,6 @@
75
76
  "@types/react-dom": "^18.2.0",
76
77
  "eslint": "^7.32.0",
77
78
  "jest": "29.2.1",
78
- "openai-edge": "^1.1.0",
79
79
  "replicate": "^0.16.0",
80
80
  "ts-jest": "29.0.3",
81
81
  "tsup": "^6.7.0",
@@ -109,13 +109,13 @@
109
109
  "publishConfig": {
110
110
  "access": "public"
111
111
  },
112
- "homepage": "https://github.com/vercel-labs/ai#readme",
112
+ "homepage": "https://sdk.vercel.ai/docs",
113
113
  "repository": {
114
114
  "type": "git",
115
- "url": "git+https://github.com/vercel-labs/ai.git"
115
+ "url": "git+https://github.com/vercel/ai.git"
116
116
  },
117
117
  "bugs": {
118
- "url": "https://github.com/vercel-labs/ai/issues"
118
+ "url": "https://github.com/vercel/ai/issues"
119
119
  },
120
120
  "keywords": [
121
121
  "ai",
@@ -1,4 +1,4 @@
1
- import { ChatCompletionRequestMessageFunctionCall } from 'openai-edge';
1
+ import { ChatCompletionMessage } from 'openai/resources/chat';
2
2
 
3
3
  /**
4
4
  * Shared types between the API and UI packages.
@@ -18,7 +18,7 @@ type Message = {
18
18
  * contains the function call name and arguments. Otherwise, the field should
19
19
  * not be set.
20
20
  */
21
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
21
+ function_call?: string | ChatCompletionMessage.FunctionCall;
22
22
  };
23
23
 
24
24
  /**
@@ -1,5 +1,4 @@
1
- import { ChatCompletionRequestMessageFunctionCall, CreateChatCompletionRequestFunctionCall } from 'openai-edge';
2
- import { ChatCompletionFunctions } from 'openai-edge/types/api';
1
+ import { ChatCompletionMessage, CompletionCreateParams, CreateChatCompletionRequestMessage } from 'openai/resources/chat';
3
2
 
4
3
  /**
5
4
  * Shared types between the API and UI packages.
@@ -19,7 +18,7 @@ type Message = {
19
18
  * contains the function call name and arguments. Otherwise, the field should
20
19
  * not be set.
21
20
  */
22
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
21
+ function_call?: string | ChatCompletionMessage.FunctionCall;
23
22
  };
24
23
  type CreateMessage = Omit<Message, 'id'> & {
25
24
  id?: Message['id'];
@@ -27,18 +26,18 @@ type CreateMessage = Omit<Message, 'id'> & {
27
26
  type ChatRequest = {
28
27
  messages: Message[];
29
28
  options?: RequestOptions;
30
- functions?: Array<ChatCompletionFunctions>;
31
- function_call?: CreateChatCompletionRequestFunctionCall;
29
+ functions?: Array<CompletionCreateParams.Function>;
30
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
32
31
  };
33
- type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionRequestMessageFunctionCall) => Promise<ChatRequest | void>;
32
+ type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionMessage.FunctionCall) => Promise<ChatRequest | void>;
34
33
  type RequestOptions = {
35
34
  headers?: Record<string, string> | Headers;
36
35
  body?: object;
37
36
  };
38
37
  type ChatRequestOptions = {
39
38
  options?: RequestOptions;
40
- functions?: Array<ChatCompletionFunctions>;
41
- function_call?: CreateChatCompletionRequestFunctionCall;
39
+ functions?: Array<CompletionCreateParams.Function>;
40
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
42
41
  };
43
42
  type UseChatOptions = {
44
43
  /**
@@ -1,6 +1,5 @@
1
1
  import { Resource, Accessor, Setter } from 'solid-js';
2
- import { ChatCompletionRequestMessageFunctionCall, CreateChatCompletionRequestFunctionCall } from 'openai-edge';
3
- import { ChatCompletionFunctions } from 'openai-edge/types/api';
2
+ import { ChatCompletionMessage, CompletionCreateParams, CreateChatCompletionRequestMessage } from 'openai/resources/chat';
4
3
 
5
4
  /**
6
5
  * Shared types between the API and UI packages.
@@ -20,7 +19,7 @@ type Message = {
20
19
  * contains the function call name and arguments. Otherwise, the field should
21
20
  * not be set.
22
21
  */
23
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
22
+ function_call?: string | ChatCompletionMessage.FunctionCall;
24
23
  };
25
24
  type CreateMessage = Omit<Message, 'id'> & {
26
25
  id?: Message['id'];
@@ -28,10 +27,10 @@ type CreateMessage = Omit<Message, 'id'> & {
28
27
  type ChatRequest = {
29
28
  messages: Message[];
30
29
  options?: RequestOptions;
31
- functions?: Array<ChatCompletionFunctions>;
32
- function_call?: CreateChatCompletionRequestFunctionCall;
30
+ functions?: Array<CompletionCreateParams.Function>;
31
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
33
32
  };
34
- type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionRequestMessageFunctionCall) => Promise<ChatRequest | void>;
33
+ type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionMessage.FunctionCall) => Promise<ChatRequest | void>;
35
34
  type RequestOptions = {
36
35
  headers?: Record<string, string> | Headers;
37
36
  body?: object;
@@ -1,6 +1,5 @@
1
1
  import { Readable, Writable } from 'svelte/store';
2
- import { ChatCompletionRequestMessageFunctionCall, CreateChatCompletionRequestFunctionCall } from 'openai-edge';
3
- import { ChatCompletionFunctions } from 'openai-edge/types/api';
2
+ import { ChatCompletionMessage, CompletionCreateParams, CreateChatCompletionRequestMessage } from 'openai/resources/chat';
4
3
 
5
4
  /**
6
5
  * Shared types between the API and UI packages.
@@ -20,7 +19,7 @@ type Message = {
20
19
  * contains the function call name and arguments. Otherwise, the field should
21
20
  * not be set.
22
21
  */
23
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
22
+ function_call?: string | ChatCompletionMessage.FunctionCall;
24
23
  };
25
24
  type CreateMessage = Omit<Message, 'id'> & {
26
25
  id?: Message['id'];
@@ -28,18 +27,18 @@ type CreateMessage = Omit<Message, 'id'> & {
28
27
  type ChatRequest = {
29
28
  messages: Message[];
30
29
  options?: RequestOptions;
31
- functions?: Array<ChatCompletionFunctions>;
32
- function_call?: CreateChatCompletionRequestFunctionCall;
30
+ functions?: Array<CompletionCreateParams.Function>;
31
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
33
32
  };
34
- type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionRequestMessageFunctionCall) => Promise<ChatRequest | void>;
33
+ type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionMessage.FunctionCall) => Promise<ChatRequest | void>;
35
34
  type RequestOptions = {
36
35
  headers?: Record<string, string> | Headers;
37
36
  body?: object;
38
37
  };
39
38
  type ChatRequestOptions = {
40
39
  options?: RequestOptions;
41
- functions?: Array<ChatCompletionFunctions>;
42
- function_call?: CreateChatCompletionRequestFunctionCall;
40
+ functions?: Array<CompletionCreateParams.Function>;
41
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
43
42
  };
44
43
  type UseChatOptions = {
45
44
  /**
@@ -626,7 +626,9 @@ var getStreamedResponse = async (api, chatRequest, mutate, extraMetadata, previo
626
626
  }
627
627
  }
628
628
  if (typeof responseMessage.function_call === "string") {
629
- const parsedFunctionCall = JSON.parse(responseMessage.function_call).function_call;
629
+ const parsedFunctionCall = JSON.parse(
630
+ responseMessage.function_call
631
+ ).function_call;
630
632
  responseMessage.function_call = parsedFunctionCall;
631
633
  mutate([...chatRequest.messages, { ...responseMessage }]);
632
634
  }
@@ -599,7 +599,9 @@ var getStreamedResponse = async (api, chatRequest, mutate, extraMetadata, previo
599
599
  }
600
600
  }
601
601
  if (typeof responseMessage.function_call === "string") {
602
- const parsedFunctionCall = JSON.parse(responseMessage.function_call).function_call;
602
+ const parsedFunctionCall = JSON.parse(
603
+ responseMessage.function_call
604
+ ).function_call;
603
605
  responseMessage.function_call = parsedFunctionCall;
604
606
  mutate([...chatRequest.messages, { ...responseMessage }]);
605
607
  }
@@ -1,6 +1,5 @@
1
1
  import { Ref } from 'vue';
2
- import { ChatCompletionRequestMessageFunctionCall, CreateChatCompletionRequestFunctionCall } from 'openai-edge';
3
- import { ChatCompletionFunctions } from 'openai-edge/types/api';
2
+ import { ChatCompletionMessage, CompletionCreateParams, CreateChatCompletionRequestMessage } from 'openai/resources/chat';
4
3
 
5
4
  /**
6
5
  * Shared types between the API and UI packages.
@@ -20,7 +19,7 @@ type Message = {
20
19
  * contains the function call name and arguments. Otherwise, the field should
21
20
  * not be set.
22
21
  */
23
- function_call?: string | ChatCompletionRequestMessageFunctionCall;
22
+ function_call?: string | ChatCompletionMessage.FunctionCall;
24
23
  };
25
24
  type CreateMessage = Omit<Message, 'id'> & {
26
25
  id?: Message['id'];
@@ -28,10 +27,10 @@ type CreateMessage = Omit<Message, 'id'> & {
28
27
  type ChatRequest = {
29
28
  messages: Message[];
30
29
  options?: RequestOptions;
31
- functions?: Array<ChatCompletionFunctions>;
32
- function_call?: CreateChatCompletionRequestFunctionCall;
30
+ functions?: Array<CompletionCreateParams.Function>;
31
+ function_call?: CreateChatCompletionRequestMessage.FunctionCall;
33
32
  };
34
- type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionRequestMessageFunctionCall) => Promise<ChatRequest | void>;
33
+ type FunctionCallHandler = (chatMessages: Message[], functionCall: ChatCompletionMessage.FunctionCall) => Promise<ChatRequest | void>;
35
34
  type RequestOptions = {
36
35
  headers?: Record<string, string> | Headers;
37
36
  body?: object;