ai 4.0.0-canary.4 → 4.0.0-canary.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2085,9 +2085,6 @@ async function generateObject({
2085
2085
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2086
2086
  "ai.usage.promptTokens": result2.usage.promptTokens,
2087
2087
  "ai.usage.completionTokens": result2.usage.completionTokens,
2088
- // deprecated:
2089
- "ai.finishReason": result2.finishReason,
2090
- "ai.result.object": { output: () => result2.text },
2091
2088
  // standardized gen-ai llm span attributes:
2092
2089
  "gen_ai.response.finish_reasons": [result2.finishReason],
2093
2090
  "gen_ai.response.id": responseData.id,
@@ -2192,9 +2189,6 @@ async function generateObject({
2192
2189
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2193
2190
  "ai.usage.promptTokens": result2.usage.promptTokens,
2194
2191
  "ai.usage.completionTokens": result2.usage.completionTokens,
2195
- // deprecated:
2196
- "ai.finishReason": result2.finishReason,
2197
- "ai.result.object": { output: () => objectText },
2198
2192
  // standardized gen-ai llm span attributes:
2199
2193
  "gen_ai.response.finish_reasons": [result2.finishReason],
2200
2194
  "gen_ai.response.id": responseData.id,
@@ -2248,12 +2242,7 @@ async function generateObject({
2248
2242
  output: () => JSON.stringify(validationResult.value)
2249
2243
  },
2250
2244
  "ai.usage.promptTokens": usage.promptTokens,
2251
- "ai.usage.completionTokens": usage.completionTokens,
2252
- // deprecated:
2253
- "ai.finishReason": finishReason,
2254
- "ai.result.object": {
2255
- output: () => JSON.stringify(validationResult.value)
2256
- }
2245
+ "ai.usage.completionTokens": usage.completionTokens
2257
2246
  }
2258
2247
  })
2259
2248
  );
@@ -2282,9 +2271,6 @@ var DefaultGenerateObjectResult = class {
2282
2271
  this.experimental_providerMetadata = options.providerMetadata;
2283
2272
  this.response = options.response;
2284
2273
  this.request = options.request;
2285
- this.rawResponse = {
2286
- headers: options.response.headers
2287
- };
2288
2274
  this.logprobs = options.logprobs;
2289
2275
  }
2290
2276
  toJsonResponse(init) {
@@ -2662,7 +2648,6 @@ var DefaultStreamObjectResult = class {
2662
2648
  generateId: generateId3
2663
2649
  }) {
2664
2650
  this.warnings = warnings;
2665
- this.rawResponse = rawResponse;
2666
2651
  this.outputStrategy = outputStrategy;
2667
2652
  this.request = Promise.resolve(request);
2668
2653
  this.objectPromise = new DelayedPromise();
@@ -2795,9 +2780,6 @@ var DefaultStreamObjectResult = class {
2795
2780
  "ai.response.timestamp": response.timestamp.toISOString(),
2796
2781
  "ai.usage.promptTokens": finalUsage.promptTokens,
2797
2782
  "ai.usage.completionTokens": finalUsage.completionTokens,
2798
- // deprecated
2799
- "ai.finishReason": finishReason,
2800
- "ai.result.object": { output: () => JSON.stringify(object) },
2801
2783
  // standardized gen-ai llm span attributes:
2802
2784
  "gen_ai.response.finish_reasons": [finishReason],
2803
2785
  "gen_ai.response.id": response.id,
@@ -2816,9 +2798,7 @@ var DefaultStreamObjectResult = class {
2816
2798
  "ai.usage.completionTokens": finalUsage.completionTokens,
2817
2799
  "ai.response.object": {
2818
2800
  output: () => JSON.stringify(object)
2819
- },
2820
- // deprecated
2821
- "ai.result.object": { output: () => JSON.stringify(object) }
2801
+ }
2822
2802
  }
2823
2803
  })
2824
2804
  );
@@ -2826,7 +2806,6 @@ var DefaultStreamObjectResult = class {
2826
2806
  usage: finalUsage,
2827
2807
  object,
2828
2808
  error,
2829
- rawResponse,
2830
2809
  response: {
2831
2810
  ...response,
2832
2811
  headers: rawResponse == null ? void 0 : rawResponse.headers
@@ -3290,14 +3269,6 @@ async function generateText({
3290
3269
  "ai.response.timestamp": responseData.timestamp.toISOString(),
3291
3270
  "ai.usage.promptTokens": result.usage.promptTokens,
3292
3271
  "ai.usage.completionTokens": result.usage.completionTokens,
3293
- // deprecated:
3294
- "ai.finishReason": result.finishReason,
3295
- "ai.result.text": {
3296
- output: () => result.text
3297
- },
3298
- "ai.result.toolCalls": {
3299
- output: () => JSON.stringify(result.toolCalls)
3300
- },
3301
3272
  // standardized gen-ai llm span attributes:
3302
3273
  "gen_ai.response.finish_reasons": [result.finishReason],
3303
3274
  "gen_ai.response.id": responseData.id,
@@ -3400,15 +3371,7 @@ async function generateText({
3400
3371
  output: () => JSON.stringify(currentModelResponse.toolCalls)
3401
3372
  },
3402
3373
  "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
3403
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
3404
- // deprecated:
3405
- "ai.finishReason": currentModelResponse.finishReason,
3406
- "ai.result.text": {
3407
- output: () => currentModelResponse.text
3408
- },
3409
- "ai.result.toolCalls": {
3410
- output: () => JSON.stringify(currentModelResponse.toolCalls)
3411
- }
3374
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
3412
3375
  }
3413
3376
  })
3414
3377
  );
@@ -3506,9 +3469,6 @@ var DefaultGenerateTextResult = class {
3506
3469
  this.responseMessages = options.responseMessages;
3507
3470
  this.steps = options.steps;
3508
3471
  this.experimental_providerMetadata = options.providerMetadata;
3509
- this.rawResponse = {
3510
- headers: options.response.headers
3511
- };
3512
3472
  this.logprobs = options.logprobs;
3513
3473
  }
3514
3474
  };
@@ -4165,14 +4125,10 @@ var DefaultStreamTextResult = class {
4165
4125
  const msToFirstChunk = now2() - startTimestamp;
4166
4126
  stepFirstChunk = false;
4167
4127
  doStreamSpan2.addEvent("ai.stream.firstChunk", {
4168
- "ai.response.msToFirstChunk": msToFirstChunk,
4169
- // deprecated:
4170
- "ai.stream.msToFirstChunk": msToFirstChunk
4128
+ "ai.response.msToFirstChunk": msToFirstChunk
4171
4129
  });
4172
4130
  doStreamSpan2.setAttributes({
4173
- "ai.response.msToFirstChunk": msToFirstChunk,
4174
- // deprecated:
4175
- "ai.stream.msToFirstChunk": msToFirstChunk
4131
+ "ai.response.msToFirstChunk": msToFirstChunk
4176
4132
  });
4177
4133
  }
4178
4134
  if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
@@ -4297,12 +4253,6 @@ var DefaultStreamTextResult = class {
4297
4253
  "ai.response.timestamp": stepResponse.timestamp.toISOString(),
4298
4254
  "ai.usage.promptTokens": stepUsage.promptTokens,
4299
4255
  "ai.usage.completionTokens": stepUsage.completionTokens,
4300
- // deprecated
4301
- "ai.finishReason": stepFinishReason,
4302
- "ai.result.text": { output: () => stepText },
4303
- "ai.result.toolCalls": {
4304
- output: () => stepToolCallsJson
4305
- },
4306
4256
  // standardized gen-ai llm span attributes:
4307
4257
  "gen_ai.response.finish_reasons": [stepFinishReason],
4308
4258
  "gen_ai.response.id": stepResponse.id,
@@ -4357,7 +4307,6 @@ var DefaultStreamTextResult = class {
4357
4307
  warnings: self.warnings,
4358
4308
  logprobs: stepLogProbs,
4359
4309
  request: stepRequest,
4360
- rawResponse: self.rawResponse,
4361
4310
  response: {
4362
4311
  ...stepResponse,
4363
4312
  headers: (_a11 = self.rawResponse) == null ? void 0 : _a11.headers,
@@ -4418,13 +4367,7 @@ var DefaultStreamTextResult = class {
4418
4367
  output: () => stepToolCallsJson
4419
4368
  },
4420
4369
  "ai.usage.promptTokens": combinedUsage.promptTokens,
4421
- "ai.usage.completionTokens": combinedUsage.completionTokens,
4422
- // deprecated
4423
- "ai.finishReason": stepFinishReason,
4424
- "ai.result.text": { output: () => fullStepText },
4425
- "ai.result.toolCalls": {
4426
- output: () => stepToolCallsJson
4427
- }
4370
+ "ai.usage.completionTokens": combinedUsage.completionTokens
4428
4371
  }
4429
4372
  })
4430
4373
  );
@@ -4454,7 +4397,6 @@ var DefaultStreamTextResult = class {
4454
4397
  // The type exposed to the users will be correctly inferred.
4455
4398
  toolResults: stepToolResults,
4456
4399
  request: stepRequest,
4457
- rawResponse,
4458
4400
  response: {
4459
4401
  ...stepResponse,
4460
4402
  headers: rawResponse == null ? void 0 : rawResponse.headers,
@@ -5182,48 +5124,6 @@ function trimStartOfStream() {
5182
5124
  return text;
5183
5125
  };
5184
5126
  }
5185
-
5186
- // streams/stream-to-response.ts
5187
- function streamToResponse(res, response, init, data) {
5188
- var _a11;
5189
- response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
5190
- "Content-Type": "text/plain; charset=utf-8",
5191
- ...init == null ? void 0 : init.headers
5192
- });
5193
- let processedStream = res;
5194
- if (data) {
5195
- processedStream = mergeStreams(data.stream, res);
5196
- }
5197
- const reader = processedStream.getReader();
5198
- function read() {
5199
- reader.read().then(({ done, value }) => {
5200
- if (done) {
5201
- response.end();
5202
- return;
5203
- }
5204
- response.write(value);
5205
- read();
5206
- });
5207
- }
5208
- read();
5209
- }
5210
-
5211
- // streams/streaming-text-response.ts
5212
- var StreamingTextResponse = class extends Response {
5213
- constructor(res, init, data) {
5214
- let processedStream = res;
5215
- if (data) {
5216
- processedStream = mergeStreams(data.stream, res);
5217
- }
5218
- super(processedStream, {
5219
- ...init,
5220
- status: 200,
5221
- headers: prepareResponseHeaders(init, {
5222
- contentType: "text/plain; charset=utf-8"
5223
- })
5224
- });
5225
- }
5226
- };
5227
5127
  export {
5228
5128
  AISDKError10 as AISDKError,
5229
5129
  APICallError2 as APICallError,
@@ -5248,7 +5148,6 @@ export {
5248
5148
  NoSuchToolError,
5249
5149
  RetryError,
5250
5150
  StreamData,
5251
- StreamingTextResponse,
5252
5151
  TypeValidationError2 as TypeValidationError,
5253
5152
  UnsupportedFunctionalityError2 as UnsupportedFunctionalityError,
5254
5153
  convertToCoreMessages,
@@ -5272,7 +5171,6 @@ export {
5272
5171
  readDataStream,
5273
5172
  streamObject,
5274
5173
  streamText,
5275
- streamToResponse,
5276
5174
  tool
5277
5175
  };
5278
5176
  //# sourceMappingURL=index.mjs.map