ai 3.2.33 → 3.2.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -88,14 +88,14 @@ async function embed({
88
88
  const modelResponse = await retry(
89
89
  () => model.doEmbed({ values: [value], abortSignal, headers })
90
90
  );
91
- return new EmbedResult({
91
+ return new DefaultEmbedResult({
92
92
  value,
93
93
  embedding: modelResponse.embeddings[0],
94
94
  usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN },
95
95
  rawResponse: modelResponse.rawResponse
96
96
  });
97
97
  }
98
- var EmbedResult = class {
98
+ var DefaultEmbedResult = class {
99
99
  constructor(options) {
100
100
  this.value = options.value;
101
101
  this.embedding = options.embedding;
@@ -131,7 +131,7 @@ async function embedMany({
131
131
  const modelResponse = await retry(
132
132
  () => model.doEmbed({ values, abortSignal, headers })
133
133
  );
134
- return new EmbedManyResult({
134
+ return new DefaultEmbedManyResult({
135
135
  values,
136
136
  embeddings: modelResponse.embeddings,
137
137
  usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
@@ -147,9 +147,9 @@ async function embedMany({
147
147
  embeddings.push(...modelResponse.embeddings);
148
148
  tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
149
149
  }
150
- return new EmbedManyResult({ values, embeddings, usage: { tokens } });
150
+ return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
151
151
  }
152
- var EmbedManyResult = class {
152
+ var DefaultEmbedManyResult = class {
153
153
  constructor(options) {
154
154
  this.values = options.values;
155
155
  this.embeddings = options.embeddings;
@@ -427,6 +427,7 @@ function prepareCallSettings({
427
427
  topP,
428
428
  presencePenalty,
429
429
  frequencyPenalty,
430
+ stopSequences,
430
431
  seed,
431
432
  maxRetries
432
433
  }) {
@@ -513,11 +514,163 @@ function prepareCallSettings({
513
514
  topP,
514
515
  presencePenalty,
515
516
  frequencyPenalty,
517
+ stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
516
518
  seed,
517
519
  maxRetries: maxRetries != null ? maxRetries : 2
518
520
  };
519
521
  }
520
522
 
523
+ // core/telemetry/get-base-telemetry-attributes.ts
524
+ function getBaseTelemetryAttributes({
525
+ operationName,
526
+ model,
527
+ settings,
528
+ telemetry,
529
+ headers
530
+ }) {
531
+ var _a;
532
+ return {
533
+ "ai.model.provider": model.provider,
534
+ "ai.model.id": model.modelId,
535
+ // settings:
536
+ ...Object.entries(settings).reduce((attributes, [key, value]) => {
537
+ attributes[`ai.settings.${key}`] = value;
538
+ return attributes;
539
+ }, {}),
540
+ // special telemetry information
541
+ "operation.name": operationName,
542
+ "resource.name": telemetry == null ? void 0 : telemetry.functionId,
543
+ "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
544
+ // add metadata as attributes:
545
+ ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
546
+ (attributes, [key, value]) => {
547
+ attributes[`ai.telemetry.metadata.${key}`] = value;
548
+ return attributes;
549
+ },
550
+ {}
551
+ ),
552
+ // request headers
553
+ ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
554
+ if (value !== void 0) {
555
+ attributes[`ai.request.headers.${key}`] = value;
556
+ }
557
+ return attributes;
558
+ }, {})
559
+ };
560
+ }
561
+
562
+ // core/telemetry/get-tracer.ts
563
+ import { trace } from "@opentelemetry/api";
564
+
565
+ // core/telemetry/noop-tracer.ts
566
+ var noopTracer = {
567
+ startSpan() {
568
+ return noopSpan;
569
+ },
570
+ startActiveSpan(name, arg1, arg2, arg3) {
571
+ if (typeof arg1 === "function") {
572
+ return arg1(noopSpan);
573
+ }
574
+ if (typeof arg2 === "function") {
575
+ return arg2(noopSpan);
576
+ }
577
+ if (typeof arg3 === "function") {
578
+ return arg3(noopSpan);
579
+ }
580
+ }
581
+ };
582
+ var noopSpan = {
583
+ spanContext() {
584
+ return noopSpanContext;
585
+ },
586
+ setAttribute() {
587
+ return this;
588
+ },
589
+ setAttributes() {
590
+ return this;
591
+ },
592
+ addEvent() {
593
+ return this;
594
+ },
595
+ addLink() {
596
+ return this;
597
+ },
598
+ addLinks() {
599
+ return this;
600
+ },
601
+ setStatus() {
602
+ return this;
603
+ },
604
+ updateName() {
605
+ return this;
606
+ },
607
+ end() {
608
+ return this;
609
+ },
610
+ isRecording() {
611
+ return false;
612
+ },
613
+ recordException() {
614
+ return this;
615
+ }
616
+ };
617
+ var noopSpanContext = {
618
+ traceId: "",
619
+ spanId: "",
620
+ traceFlags: 0
621
+ };
622
+
623
+ // core/telemetry/get-tracer.ts
624
+ var testTracer = void 0;
625
+ function getTracer({ isEnabled }) {
626
+ if (!isEnabled) {
627
+ return noopTracer;
628
+ }
629
+ if (testTracer) {
630
+ return testTracer;
631
+ }
632
+ return trace.getTracer("ai");
633
+ }
634
+
635
+ // core/telemetry/record-span.ts
636
+ import { SpanStatusCode } from "@opentelemetry/api";
637
+ function recordSpan({
638
+ name,
639
+ tracer,
640
+ attributes,
641
+ fn,
642
+ endWhenDone = true
643
+ }) {
644
+ return tracer.startActiveSpan(name, { attributes }, async (span) => {
645
+ try {
646
+ const result = await fn(span);
647
+ if (endWhenDone) {
648
+ span.end();
649
+ }
650
+ return result;
651
+ } catch (error) {
652
+ try {
653
+ if (error instanceof Error) {
654
+ span.recordException({
655
+ name: error.name,
656
+ message: error.message,
657
+ stack: error.stack
658
+ });
659
+ span.setStatus({
660
+ code: SpanStatusCode.ERROR,
661
+ message: error.message
662
+ });
663
+ } else {
664
+ span.setStatus({ code: SpanStatusCode.ERROR });
665
+ }
666
+ } finally {
667
+ span.end();
668
+ }
669
+ throw error;
670
+ }
671
+ });
672
+ }
673
+
521
674
  // core/types/token-usage.ts
522
675
  function calculateCompletionTokenUsage(usage) {
523
676
  return {
@@ -573,133 +726,180 @@ async function generateObject({
573
726
  maxRetries,
574
727
  abortSignal,
575
728
  headers,
729
+ experimental_telemetry: telemetry,
576
730
  ...settings
577
731
  }) {
578
- var _a, _b;
579
- const retry = retryWithExponentialBackoff({ maxRetries });
732
+ var _a;
733
+ const baseTelemetryAttributes = getBaseTelemetryAttributes({
734
+ operationName: "ai.generateObject",
735
+ model,
736
+ telemetry,
737
+ headers,
738
+ settings: { ...settings, maxRetries }
739
+ });
580
740
  const jsonSchema = convertZodToJSONSchema(schema);
581
- if (mode === "auto" || mode == null) {
582
- mode = model.defaultObjectGenerationMode;
583
- }
584
- let result;
585
- let finishReason;
586
- let usage;
587
- let warnings;
588
- let rawResponse;
589
- let logprobs;
590
- switch (mode) {
591
- case "json": {
592
- const validatedPrompt = getValidatedPrompt({
593
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
594
- prompt,
595
- messages
596
- });
597
- const generateResult = await retry(() => {
598
- return model.doGenerate({
599
- mode: { type: "object-json" },
600
- ...prepareCallSettings(settings),
601
- inputFormat: validatedPrompt.type,
602
- prompt: convertToLanguageModelPrompt(validatedPrompt),
603
- abortSignal,
604
- headers
605
- });
606
- });
607
- if (generateResult.text === void 0) {
608
- throw new NoObjectGeneratedError();
741
+ const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
742
+ return recordSpan({
743
+ name: "ai.generateObject",
744
+ attributes: {
745
+ ...baseTelemetryAttributes,
746
+ // specific settings that only make sense on the outer level:
747
+ "ai.prompt": JSON.stringify({ system, prompt, messages }),
748
+ "ai.settings.jsonSchema": JSON.stringify(jsonSchema),
749
+ "ai.settings.mode": mode
750
+ },
751
+ tracer,
752
+ fn: async (span) => {
753
+ var _a2, _b;
754
+ const retry = retryWithExponentialBackoff({ maxRetries });
755
+ if (mode === "auto" || mode == null) {
756
+ mode = model.defaultObjectGenerationMode;
609
757
  }
610
- result = generateResult.text;
611
- finishReason = generateResult.finishReason;
612
- usage = generateResult.usage;
613
- warnings = generateResult.warnings;
614
- rawResponse = generateResult.rawResponse;
615
- logprobs = generateResult.logprobs;
616
- break;
617
- }
618
- case "grammar": {
619
- const validatedPrompt = getValidatedPrompt({
620
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
621
- prompt,
622
- messages
623
- });
624
- const generateResult = await retry(
625
- () => model.doGenerate({
626
- mode: { type: "object-grammar", schema: jsonSchema },
627
- ...prepareCallSettings(settings),
628
- inputFormat: validatedPrompt.type,
629
- prompt: convertToLanguageModelPrompt(validatedPrompt),
630
- abortSignal,
631
- headers
632
- })
633
- );
634
- if (generateResult.text === void 0) {
635
- throw new NoObjectGeneratedError();
758
+ let result;
759
+ let finishReason;
760
+ let usage;
761
+ let warnings;
762
+ let rawResponse;
763
+ let logprobs;
764
+ switch (mode) {
765
+ case "json": {
766
+ const validatedPrompt = getValidatedPrompt({
767
+ system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
768
+ prompt,
769
+ messages
770
+ });
771
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
772
+ const inputFormat = validatedPrompt.type;
773
+ const generateResult = await retry(
774
+ () => recordSpan({
775
+ name: "ai.generateObject.doGenerate",
776
+ attributes: {
777
+ ...baseTelemetryAttributes,
778
+ "ai.prompt.format": inputFormat,
779
+ "ai.prompt.messages": JSON.stringify(promptMessages),
780
+ "ai.settings.mode": mode
781
+ },
782
+ tracer,
783
+ fn: async (span2) => {
784
+ const result2 = await model.doGenerate({
785
+ mode: { type: "object-json" },
786
+ ...prepareCallSettings(settings),
787
+ inputFormat,
788
+ prompt: promptMessages,
789
+ abortSignal,
790
+ headers
791
+ });
792
+ span2.setAttributes({
793
+ "ai.finishReason": result2.finishReason,
794
+ "ai.usage.promptTokens": result2.usage.promptTokens,
795
+ "ai.usage.completionTokens": result2.usage.completionTokens,
796
+ "ai.result.text": result2.text
797
+ });
798
+ return result2;
799
+ }
800
+ })
801
+ );
802
+ if (generateResult.text === void 0) {
803
+ throw new NoObjectGeneratedError();
804
+ }
805
+ result = generateResult.text;
806
+ finishReason = generateResult.finishReason;
807
+ usage = generateResult.usage;
808
+ warnings = generateResult.warnings;
809
+ rawResponse = generateResult.rawResponse;
810
+ logprobs = generateResult.logprobs;
811
+ break;
812
+ }
813
+ case "tool": {
814
+ const validatedPrompt = getValidatedPrompt({
815
+ system,
816
+ prompt,
817
+ messages
818
+ });
819
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
820
+ const inputFormat = validatedPrompt.type;
821
+ const generateResult = await retry(
822
+ () => recordSpan({
823
+ name: "ai.generateObject.doGenerate",
824
+ attributes: {
825
+ ...baseTelemetryAttributes,
826
+ "ai.prompt.format": inputFormat,
827
+ "ai.prompt.messages": JSON.stringify(promptMessages),
828
+ "ai.settings.mode": mode
829
+ },
830
+ tracer,
831
+ fn: async (span2) => {
832
+ const result2 = await model.doGenerate({
833
+ mode: {
834
+ type: "object-tool",
835
+ tool: {
836
+ type: "function",
837
+ name: "json",
838
+ description: "Respond with a JSON object.",
839
+ parameters: jsonSchema
840
+ }
841
+ },
842
+ ...prepareCallSettings(settings),
843
+ inputFormat,
844
+ prompt: promptMessages,
845
+ abortSignal,
846
+ headers
847
+ });
848
+ span2.setAttributes({
849
+ "ai.finishReason": result2.finishReason,
850
+ "ai.usage.promptTokens": result2.usage.promptTokens,
851
+ "ai.usage.completionTokens": result2.usage.completionTokens,
852
+ "ai.result.text": result2.text,
853
+ "ai.result.toolCalls": JSON.stringify(result2.toolCalls)
854
+ });
855
+ return result2;
856
+ }
857
+ })
858
+ );
859
+ const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
860
+ if (functionArgs === void 0) {
861
+ throw new NoObjectGeneratedError();
862
+ }
863
+ result = functionArgs;
864
+ finishReason = generateResult.finishReason;
865
+ usage = generateResult.usage;
866
+ warnings = generateResult.warnings;
867
+ rawResponse = generateResult.rawResponse;
868
+ logprobs = generateResult.logprobs;
869
+ break;
870
+ }
871
+ case void 0: {
872
+ throw new Error(
873
+ "Model does not have a default object generation mode."
874
+ );
875
+ }
876
+ default: {
877
+ const _exhaustiveCheck = mode;
878
+ throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
879
+ }
636
880
  }
637
- result = generateResult.text;
638
- finishReason = generateResult.finishReason;
639
- usage = generateResult.usage;
640
- warnings = generateResult.warnings;
641
- rawResponse = generateResult.rawResponse;
642
- logprobs = generateResult.logprobs;
643
- break;
644
- }
645
- case "tool": {
646
- const validatedPrompt = getValidatedPrompt({
647
- system,
648
- prompt,
649
- messages
650
- });
651
- const generateResult = await retry(
652
- () => model.doGenerate({
653
- mode: {
654
- type: "object-tool",
655
- tool: {
656
- type: "function",
657
- name: "json",
658
- description: "Respond with a JSON object.",
659
- parameters: jsonSchema
660
- }
661
- },
662
- ...prepareCallSettings(settings),
663
- inputFormat: validatedPrompt.type,
664
- prompt: convertToLanguageModelPrompt(validatedPrompt),
665
- abortSignal,
666
- headers
667
- })
668
- );
669
- const functionArgs = (_b = (_a = generateResult.toolCalls) == null ? void 0 : _a[0]) == null ? void 0 : _b.args;
670
- if (functionArgs === void 0) {
671
- throw new NoObjectGeneratedError();
881
+ const parseResult = safeParseJSON({ text: result, schema });
882
+ if (!parseResult.success) {
883
+ throw parseResult.error;
672
884
  }
673
- result = functionArgs;
674
- finishReason = generateResult.finishReason;
675
- usage = generateResult.usage;
676
- warnings = generateResult.warnings;
677
- rawResponse = generateResult.rawResponse;
678
- logprobs = generateResult.logprobs;
679
- break;
680
- }
681
- case void 0: {
682
- throw new Error("Model does not have a default object generation mode.");
683
- }
684
- default: {
685
- const _exhaustiveCheck = mode;
686
- throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
885
+ span.setAttributes({
886
+ "ai.finishReason": finishReason,
887
+ "ai.usage.promptTokens": usage.promptTokens,
888
+ "ai.usage.completionTokens": usage.completionTokens,
889
+ "ai.result.object": JSON.stringify(parseResult.value)
890
+ });
891
+ return new DefaultGenerateObjectResult({
892
+ object: parseResult.value,
893
+ finishReason,
894
+ usage: calculateCompletionTokenUsage(usage),
895
+ warnings,
896
+ rawResponse,
897
+ logprobs
898
+ });
687
899
  }
688
- }
689
- const parseResult = safeParseJSON({ text: result, schema });
690
- if (!parseResult.success) {
691
- throw parseResult.error;
692
- }
693
- return new GenerateObjectResult({
694
- object: parseResult.value,
695
- finishReason,
696
- usage: calculateCompletionTokenUsage(usage),
697
- warnings,
698
- rawResponse,
699
- logprobs
700
900
  });
701
901
  }
702
- var GenerateObjectResult = class {
902
+ var DefaultGenerateObjectResult = class {
703
903
  constructor(options) {
704
904
  this.object = options.object;
705
905
  this.finishReason = options.finishReason;
@@ -708,10 +908,6 @@ var GenerateObjectResult = class {
708
908
  this.rawResponse = options.rawResponse;
709
909
  this.logprobs = options.logprobs;
710
910
  }
711
- /**
712
- Converts the object to a JSON response.
713
- The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
714
- */
715
911
  toJsonResponse(init) {
716
912
  var _a;
717
913
  return new Response(JSON.stringify(this.object), {
@@ -837,35 +1033,6 @@ async function streamObject({
837
1033
  };
838
1034
  break;
839
1035
  }
840
- case "grammar": {
841
- const validatedPrompt = getValidatedPrompt({
842
- system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
843
- prompt,
844
- messages
845
- });
846
- callOptions = {
847
- mode: { type: "object-grammar", schema: jsonSchema },
848
- ...prepareCallSettings(settings),
849
- inputFormat: validatedPrompt.type,
850
- prompt: convertToLanguageModelPrompt(validatedPrompt),
851
- abortSignal,
852
- headers
853
- };
854
- transformer = {
855
- transform: (chunk, controller) => {
856
- switch (chunk.type) {
857
- case "text-delta":
858
- controller.enqueue(chunk.textDelta);
859
- break;
860
- case "finish":
861
- case "error":
862
- controller.enqueue(chunk);
863
- break;
864
- }
865
- }
866
- };
867
- break;
868
- }
869
1036
  case "tool": {
870
1037
  const validatedPrompt = getValidatedPrompt({
871
1038
  system,
@@ -912,7 +1079,7 @@ async function streamObject({
912
1079
  }
913
1080
  }
914
1081
  const result = await retry(() => model.doStream(callOptions));
915
- return new StreamObjectResult({
1082
+ return new DefaultStreamObjectResult({
916
1083
  stream: result.stream.pipeThrough(new TransformStream(transformer)),
917
1084
  warnings: result.warnings,
918
1085
  rawResponse: result.rawResponse,
@@ -920,7 +1087,7 @@ async function streamObject({
920
1087
  onFinish
921
1088
  });
922
1089
  }
923
- var StreamObjectResult = class {
1090
+ var DefaultStreamObjectResult = class {
924
1091
  constructor({
925
1092
  stream,
926
1093
  warnings,
@@ -1016,18 +1183,9 @@ var StreamObjectResult = class {
1016
1183
  })
1017
1184
  );
1018
1185
  }
1019
- /**
1020
- The generated object (typed according to the schema). Resolved when the response is finished.
1021
- */
1022
1186
  get object() {
1023
1187
  return this.objectPromise.value;
1024
1188
  }
1025
- /**
1026
- Stream of partial objects. It gets more complete as the stream progresses.
1027
-
1028
- Note that the partial object is not validated.
1029
- If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
1030
- */
1031
1189
  get partialObjectStream() {
1032
1190
  return createAsyncIterableStream(this.originalStream, {
1033
1191
  transform(chunk, controller) {
@@ -1049,10 +1207,6 @@ var StreamObjectResult = class {
1049
1207
  }
1050
1208
  });
1051
1209
  }
1052
- /**
1053
- Text stream of the JSON representation of the generated object. It contains text chunks.
1054
- When the stream is finished, the object is valid JSON that can be parsed.
1055
- */
1056
1210
  get textStream() {
1057
1211
  return createAsyncIterableStream(this.originalStream, {
1058
1212
  transform(chunk, controller) {
@@ -1074,9 +1228,6 @@ var StreamObjectResult = class {
1074
1228
  }
1075
1229
  });
1076
1230
  }
1077
- /**
1078
- Stream of different types of events, including partial objects, errors, and finish events.
1079
- */
1080
1231
  get fullStream() {
1081
1232
  return createAsyncIterableStream(this.originalStream, {
1082
1233
  transform(chunk, controller) {
@@ -1084,14 +1235,6 @@ var StreamObjectResult = class {
1084
1235
  }
1085
1236
  });
1086
1237
  }
1087
- /**
1088
- Writes text delta output to a Node.js response-like object.
1089
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1090
- writes each text delta as a separate chunk.
1091
-
1092
- @param response A Node.js response-like object (ServerResponse).
1093
- @param init Optional headers and status code.
1094
- */
1095
1238
  pipeTextStreamToResponse(response, init) {
1096
1239
  var _a;
1097
1240
  response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
@@ -1115,14 +1258,6 @@ var StreamObjectResult = class {
1115
1258
  };
1116
1259
  read();
1117
1260
  }
1118
- /**
1119
- Creates a simple text stream response.
1120
- The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
1121
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
1122
- Non-text-delta events are ignored.
1123
-
1124
- @param init Optional headers and status code.
1125
- */
1126
1261
  toTextStreamResponse(init) {
1127
1262
  var _a;
1128
1263
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
@@ -1162,157 +1297,6 @@ function prepareToolsAndToolChoice({
1162
1297
  };
1163
1298
  }
1164
1299
 
1165
- // core/telemetry/get-base-telemetry-attributes.ts
1166
- function getBaseTelemetryAttributes({
1167
- operationName,
1168
- model,
1169
- settings,
1170
- telemetry,
1171
- headers
1172
- }) {
1173
- var _a;
1174
- return {
1175
- "ai.model.provider": model.provider,
1176
- "ai.model.id": model.modelId,
1177
- // settings:
1178
- ...Object.entries(settings).reduce((attributes, [key, value]) => {
1179
- attributes[`ai.settings.${key}`] = value;
1180
- return attributes;
1181
- }, {}),
1182
- // special telemetry information
1183
- "operation.name": operationName,
1184
- "resource.name": telemetry == null ? void 0 : telemetry.functionId,
1185
- "ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
1186
- // add metadata as attributes:
1187
- ...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
1188
- (attributes, [key, value]) => {
1189
- attributes[`ai.telemetry.metadata.${key}`] = value;
1190
- return attributes;
1191
- },
1192
- {}
1193
- ),
1194
- // request headers
1195
- ...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
1196
- if (value !== void 0) {
1197
- attributes[`ai.request.headers.${key}`] = value;
1198
- }
1199
- return attributes;
1200
- }, {})
1201
- };
1202
- }
1203
-
1204
- // core/telemetry/get-tracer.ts
1205
- import { trace } from "@opentelemetry/api";
1206
-
1207
- // core/telemetry/noop-tracer.ts
1208
- var noopTracer = {
1209
- startSpan() {
1210
- return noopSpan;
1211
- },
1212
- startActiveSpan(name, arg1, arg2, arg3) {
1213
- if (typeof arg1 === "function") {
1214
- return arg1(noopSpan);
1215
- }
1216
- if (typeof arg2 === "function") {
1217
- return arg2(noopSpan);
1218
- }
1219
- if (typeof arg3 === "function") {
1220
- return arg3(noopSpan);
1221
- }
1222
- }
1223
- };
1224
- var noopSpan = {
1225
- spanContext() {
1226
- return noopSpanContext;
1227
- },
1228
- setAttribute() {
1229
- return this;
1230
- },
1231
- setAttributes() {
1232
- return this;
1233
- },
1234
- addEvent() {
1235
- return this;
1236
- },
1237
- addLink() {
1238
- return this;
1239
- },
1240
- addLinks() {
1241
- return this;
1242
- },
1243
- setStatus() {
1244
- return this;
1245
- },
1246
- updateName() {
1247
- return this;
1248
- },
1249
- end() {
1250
- return this;
1251
- },
1252
- isRecording() {
1253
- return false;
1254
- },
1255
- recordException() {
1256
- return this;
1257
- }
1258
- };
1259
- var noopSpanContext = {
1260
- traceId: "",
1261
- spanId: "",
1262
- traceFlags: 0
1263
- };
1264
-
1265
- // core/telemetry/get-tracer.ts
1266
- var testTracer = void 0;
1267
- function getTracer({ isEnabled }) {
1268
- if (!isEnabled) {
1269
- return noopTracer;
1270
- }
1271
- if (testTracer) {
1272
- return testTracer;
1273
- }
1274
- return trace.getTracer("ai");
1275
- }
1276
-
1277
- // core/telemetry/record-span.ts
1278
- import { SpanStatusCode } from "@opentelemetry/api";
1279
- function recordSpan({
1280
- name,
1281
- tracer,
1282
- attributes,
1283
- fn,
1284
- endWhenDone = true
1285
- }) {
1286
- return tracer.startActiveSpan(name, { attributes }, async (span) => {
1287
- try {
1288
- const result = await fn(span);
1289
- if (endWhenDone) {
1290
- span.end();
1291
- }
1292
- return result;
1293
- } catch (error) {
1294
- try {
1295
- if (error instanceof Error) {
1296
- span.recordException({
1297
- name: error.name,
1298
- message: error.message,
1299
- stack: error.stack
1300
- });
1301
- span.setStatus({
1302
- code: SpanStatusCode.ERROR,
1303
- message: error.message
1304
- });
1305
- } else {
1306
- span.setStatus({ code: SpanStatusCode.ERROR });
1307
- }
1308
- } finally {
1309
- span.end();
1310
- }
1311
- throw error;
1312
- }
1313
- });
1314
- }
1315
-
1316
1300
  // core/generate-text/tool-call.ts
1317
1301
  import {
1318
1302
  InvalidToolArgumentsError,
@@ -1488,7 +1472,7 @@ async function generateText({
1488
1472
  "ai.result.text": currentModelResponse.text,
1489
1473
  "ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
1490
1474
  });
1491
- return new GenerateTextResult({
1475
+ return new DefaultGenerateTextResult({
1492
1476
  // Always return a string so that the caller doesn't have to check for undefined.
1493
1477
  // If they need to check if the model did not return any text,
1494
1478
  // they can check the length of the string:
@@ -1548,7 +1532,7 @@ async function executeTools({
1548
1532
  (result) => result != null
1549
1533
  );
1550
1534
  }
1551
- var GenerateTextResult = class {
1535
+ var DefaultGenerateTextResult = class {
1552
1536
  constructor(options) {
1553
1537
  this.text = options.text;
1554
1538
  this.toolCalls = options.toolCalls;
@@ -1915,7 +1899,7 @@ async function streamText({
1915
1899
  }
1916
1900
  })
1917
1901
  );
1918
- return new StreamTextResult({
1902
+ return new DefaultStreamTextResult({
1919
1903
  stream: runToolsTransformation({
1920
1904
  tools,
1921
1905
  generatorStream: stream,
@@ -1931,7 +1915,7 @@ async function streamText({
1931
1915
  }
1932
1916
  });
1933
1917
  }
1934
- var StreamTextResult = class {
1918
+ var DefaultStreamTextResult = class {
1935
1919
  constructor({
1936
1920
  stream,
1937
1921
  warnings,
@@ -2069,11 +2053,6 @@ var StreamTextResult = class {
2069
2053
  this.originalStream = stream2;
2070
2054
  return stream1;
2071
2055
  }
2072
- /**
2073
- A text stream that returns only the generated text deltas. You can use it
2074
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
2075
- stream will throw the error.
2076
- */
2077
2056
  get textStream() {
2078
2057
  return createAsyncIterableStream(this.teeStream(), {
2079
2058
  transform(chunk, controller) {
@@ -2087,12 +2066,6 @@ var StreamTextResult = class {
2087
2066
  }
2088
2067
  });
2089
2068
  }
2090
- /**
2091
- A stream with all events, including text deltas, tool calls, tool results, and
2092
- errors.
2093
- You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
2094
- stream will throw the error.
2095
- */
2096
2069
  get fullStream() {
2097
2070
  return createAsyncIterableStream(this.teeStream(), {
2098
2071
  transform(chunk, controller) {
@@ -2106,15 +2079,6 @@ var StreamTextResult = class {
2106
2079
  }
2107
2080
  });
2108
2081
  }
2109
- /**
2110
- Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
2111
- It can be used with the `useChat` and `useCompletion` hooks.
2112
-
2113
- @param callbacks
2114
- Stream callbacks that will be called when the stream emits events.
2115
-
2116
- @returns an `AIStream` object.
2117
- */
2118
2082
  toAIStream(callbacks = {}) {
2119
2083
  let aggregatedResponse = "";
2120
2084
  const callbackTransformer = new TransformStream({
@@ -2198,14 +2162,6 @@ var StreamTextResult = class {
2198
2162
  });
2199
2163
  return this.fullStream.pipeThrough(callbackTransformer).pipeThrough(streamPartsTransformer).pipeThrough(new TextEncoderStream());
2200
2164
  }
2201
- /**
2202
- Writes stream data output to a Node.js response-like object.
2203
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2204
- writes each stream data part as a separate chunk.
2205
-
2206
- @param response A Node.js response-like object (ServerResponse).
2207
- @param init Optional headers and status code.
2208
- */
2209
2165
  pipeAIStreamToResponse(response, init) {
2210
2166
  var _a;
2211
2167
  response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
@@ -2229,14 +2185,6 @@ var StreamTextResult = class {
2229
2185
  };
2230
2186
  read();
2231
2187
  }
2232
- /**
2233
- Writes text delta output to a Node.js response-like object.
2234
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2235
- writes each text delta as a separate chunk.
2236
-
2237
- @param response A Node.js response-like object (ServerResponse).
2238
- @param init Optional headers and status code.
2239
- */
2240
2188
  pipeTextStreamToResponse(response, init) {
2241
2189
  var _a;
2242
2190
  response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
@@ -2260,15 +2208,6 @@ var StreamTextResult = class {
2260
2208
  };
2261
2209
  read();
2262
2210
  }
2263
- /**
2264
- Converts the result to a streamed response object with a stream data part stream.
2265
- It can be used with the `useChat` and `useCompletion` hooks.
2266
-
2267
- @param options An object with an init property (ResponseInit) and a data property.
2268
- You can also pass in a ResponseInit directly (deprecated).
2269
-
2270
- @return A response object.
2271
- */
2272
2211
  toAIStreamResponse(options) {
2273
2212
  var _a;
2274
2213
  const init = options == null ? void 0 : "init" in options ? options.init : {
@@ -2286,13 +2225,6 @@ var StreamTextResult = class {
2286
2225
  })
2287
2226
  });
2288
2227
  }
2289
- /**
2290
- Creates a simple text stream response.
2291
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
2292
- Non-text-delta events are ignored.
2293
-
2294
- @param init Optional headers and status code.
2295
- */
2296
2228
  toTextStreamResponse(init) {
2297
2229
  var _a;
2298
2230
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
@@ -3640,11 +3572,7 @@ export {
3640
3572
  AnthropicStream,
3641
3573
  AssistantResponse,
3642
3574
  CohereStream,
3643
- EmbedManyResult,
3644
- EmbedResult,
3645
3575
  EmptyResponseBodyError,
3646
- GenerateObjectResult,
3647
- GenerateTextResult,
3648
3576
  GoogleGenerativeAIStream,
3649
3577
  HuggingFaceStream,
3650
3578
  InkeepStream,
@@ -3668,8 +3596,6 @@ export {
3668
3596
  ReplicateStream,
3669
3597
  RetryError2 as RetryError,
3670
3598
  StreamData2 as StreamData,
3671
- StreamObjectResult,
3672
- StreamTextResult,
3673
3599
  StreamingTextResponse,
3674
3600
  ToolCallParseError,
3675
3601
  TypeValidationError,