ai 2.1.33 → 2.1.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -76,7 +76,7 @@ export default function Chat() {
76
76
 
77
77
  ---
78
78
 
79
- View the full documentation and examples on [sdk.vercel.ai/docs](https://sdk.vercel.ai/docs)
79
+ View the full documentation and examples on [sdk.vercel.ai/docs](https://sdk.vercel.ai/docs).
80
80
 
81
81
  ## Authors
82
82
 
@@ -88,4 +88,4 @@ This library is created by [Vercel](https://vercel.com) and [Next.js](https://ne
88
88
  - Malte Ubl ([@cramforce](https://twitter.com/cramforce)) - [Vercel](https://vercel.com)
89
89
  - Justin Ridgewell ([@jridgewell](https://github.com/jridgewell)) - [Vercel](https://vercel.com)
90
90
 
91
- [Contributors](https://github.com/vercel-labs/ai/graphs/contributors)
91
+ [Contributors](https://github.com/vercel/ai/graphs/contributors)
package/dist/index.d.ts CHANGED
@@ -348,7 +348,32 @@ declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStrea
348
348
 
349
349
  declare function CohereStream(reader: Response, callbacks?: AIStreamCallbacks): ReadableStream;
350
350
 
351
- declare function AnthropicStream(res: Response, cb?: AIStreamCallbacks): ReadableStream;
351
+ interface CompletionChunk {
352
+ /**
353
+ * The resulting completion up to and excluding the stop sequences.
354
+ */
355
+ completion: string;
356
+ /**
357
+ * The model that performed the completion.
358
+ */
359
+ model: string;
360
+ /**
361
+ * The reason that we stopped sampling.
362
+ *
363
+ * This may be one the following values:
364
+ *
365
+ * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
366
+ * `stop_sequences` parameter, or a stop sequence built into the model
367
+ * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
368
+ */
369
+ stop_reason: string;
370
+ }
371
+ /**
372
+ * Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
373
+ * or the return value of `await client.completions.create({ stream: true })`
374
+ * from the `@anthropic-ai/sdk` package.
375
+ */
376
+ declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk>, cb?: AIStreamCallbacks): ReadableStream;
352
377
 
353
378
  declare function LangChainStream(callbacks?: AIStreamCallbacks): {
354
379
  stream: ReadableStream<Uint8Array>;
package/dist/index.js CHANGED
@@ -397,14 +397,36 @@ function parseAnthropicStream() {
397
397
  let previous = "";
398
398
  return (data) => {
399
399
  const json = JSON.parse(data);
400
+ if ("error" in json) {
401
+ throw new Error(`${json.error.type}: ${json.error.message}`);
402
+ }
403
+ if (!("completion" in json)) {
404
+ return;
405
+ }
400
406
  const text = json.completion;
401
- const delta = text.slice(previous.length);
402
- previous = text;
403
- return delta;
407
+ if (!previous || text.length > previous.length && text.startsWith(previous)) {
408
+ const delta = text.slice(previous.length);
409
+ previous = text;
410
+ return delta;
411
+ }
412
+ return text;
404
413
  };
405
414
  }
415
+ async function* streamable2(stream) {
416
+ for await (const chunk of stream) {
417
+ const text = chunk.completion;
418
+ if (text)
419
+ yield text;
420
+ }
421
+ }
406
422
  function AnthropicStream(res, cb) {
407
- return AIStream(res, parseAnthropicStream(), cb);
423
+ if (Symbol.asyncIterator in res) {
424
+ return readableFromAsyncIterable(streamable2(res)).pipeThrough(
425
+ createCallbacksTransformer(cb)
426
+ );
427
+ } else {
428
+ return AIStream(res, parseAnthropicStream(), cb);
429
+ }
408
430
  }
409
431
 
410
432
  // streams/langchain-stream.ts
package/dist/index.mjs CHANGED
@@ -359,14 +359,36 @@ function parseAnthropicStream() {
359
359
  let previous = "";
360
360
  return (data) => {
361
361
  const json = JSON.parse(data);
362
+ if ("error" in json) {
363
+ throw new Error(`${json.error.type}: ${json.error.message}`);
364
+ }
365
+ if (!("completion" in json)) {
366
+ return;
367
+ }
362
368
  const text = json.completion;
363
- const delta = text.slice(previous.length);
364
- previous = text;
365
- return delta;
369
+ if (!previous || text.length > previous.length && text.startsWith(previous)) {
370
+ const delta = text.slice(previous.length);
371
+ previous = text;
372
+ return delta;
373
+ }
374
+ return text;
366
375
  };
367
376
  }
377
+ async function* streamable2(stream) {
378
+ for await (const chunk of stream) {
379
+ const text = chunk.completion;
380
+ if (text)
381
+ yield text;
382
+ }
383
+ }
368
384
  function AnthropicStream(res, cb) {
369
- return AIStream(res, parseAnthropicStream(), cb);
385
+ if (Symbol.asyncIterator in res) {
386
+ return readableFromAsyncIterable(streamable2(res)).pipeThrough(
387
+ createCallbacksTransformer(cb)
388
+ );
389
+ } else {
390
+ return AIStream(res, parseAnthropicStream(), cb);
391
+ }
370
392
  }
371
393
 
372
394
  // streams/langchain-stream.ts
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai",
3
- "version": "2.1.33",
3
+ "version": "2.1.34",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -109,13 +109,13 @@
109
109
  "publishConfig": {
110
110
  "access": "public"
111
111
  },
112
- "homepage": "https://github.com/vercel-labs/ai#readme",
112
+ "homepage": "https://sdk.vercel.ai/docs",
113
113
  "repository": {
114
114
  "type": "git",
115
- "url": "git+https://github.com/vercel-labs/ai.git"
115
+ "url": "git+https://github.com/vercel/ai.git"
116
116
  },
117
117
  "bugs": {
118
- "url": "https://github.com/vercel-labs/ai/issues"
118
+ "url": "https://github.com/vercel/ai/issues"
119
119
  },
120
120
  "keywords": [
121
121
  "ai",