ai 3.2.33 → 3.2.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +473 -418
- package/dist/index.d.ts +473 -418
- package/dist/index.js +329 -409
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +329 -403
- package/dist/index.mjs.map +1 -1
- package/package.json +9 -9
- package/rsc/dist/index.d.ts +13 -0
- package/rsc/dist/rsc-server.d.mts +13 -0
- package/rsc/dist/rsc-server.mjs +2 -0
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.js
CHANGED
@@ -40,11 +40,7 @@ __export(streams_exports, {
|
|
40
40
|
AnthropicStream: () => AnthropicStream,
|
41
41
|
AssistantResponse: () => AssistantResponse,
|
42
42
|
CohereStream: () => CohereStream,
|
43
|
-
EmbedManyResult: () => EmbedManyResult,
|
44
|
-
EmbedResult: () => EmbedResult,
|
45
43
|
EmptyResponseBodyError: () => import_provider8.EmptyResponseBodyError,
|
46
|
-
GenerateObjectResult: () => GenerateObjectResult,
|
47
|
-
GenerateTextResult: () => GenerateTextResult,
|
48
44
|
GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
|
49
45
|
HuggingFaceStream: () => HuggingFaceStream,
|
50
46
|
InkeepStream: () => InkeepStream,
|
@@ -68,8 +64,6 @@ __export(streams_exports, {
|
|
68
64
|
ReplicateStream: () => ReplicateStream,
|
69
65
|
RetryError: () => import_provider8.RetryError,
|
70
66
|
StreamData: () => StreamData2,
|
71
|
-
StreamObjectResult: () => StreamObjectResult,
|
72
|
-
StreamTextResult: () => StreamTextResult,
|
73
67
|
StreamingTextResponse: () => StreamingTextResponse,
|
74
68
|
ToolCallParseError: () => import_provider8.ToolCallParseError,
|
75
69
|
TypeValidationError: () => import_provider8.TypeValidationError,
|
@@ -187,14 +181,14 @@ async function embed({
|
|
187
181
|
const modelResponse = await retry(
|
188
182
|
() => model.doEmbed({ values: [value], abortSignal, headers })
|
189
183
|
);
|
190
|
-
return new
|
184
|
+
return new DefaultEmbedResult({
|
191
185
|
value,
|
192
186
|
embedding: modelResponse.embeddings[0],
|
193
187
|
usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN },
|
194
188
|
rawResponse: modelResponse.rawResponse
|
195
189
|
});
|
196
190
|
}
|
197
|
-
var
|
191
|
+
var DefaultEmbedResult = class {
|
198
192
|
constructor(options) {
|
199
193
|
this.value = options.value;
|
200
194
|
this.embedding = options.embedding;
|
@@ -230,7 +224,7 @@ async function embedMany({
|
|
230
224
|
const modelResponse = await retry(
|
231
225
|
() => model.doEmbed({ values, abortSignal, headers })
|
232
226
|
);
|
233
|
-
return new
|
227
|
+
return new DefaultEmbedManyResult({
|
234
228
|
values,
|
235
229
|
embeddings: modelResponse.embeddings,
|
236
230
|
usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
|
@@ -246,9 +240,9 @@ async function embedMany({
|
|
246
240
|
embeddings.push(...modelResponse.embeddings);
|
247
241
|
tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
|
248
242
|
}
|
249
|
-
return new
|
243
|
+
return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
|
250
244
|
}
|
251
|
-
var
|
245
|
+
var DefaultEmbedManyResult = class {
|
252
246
|
constructor(options) {
|
253
247
|
this.values = options.values;
|
254
248
|
this.embeddings = options.embeddings;
|
@@ -523,6 +517,7 @@ function prepareCallSettings({
|
|
523
517
|
topP,
|
524
518
|
presencePenalty,
|
525
519
|
frequencyPenalty,
|
520
|
+
stopSequences,
|
526
521
|
seed,
|
527
522
|
maxRetries
|
528
523
|
}) {
|
@@ -609,11 +604,163 @@ function prepareCallSettings({
|
|
609
604
|
topP,
|
610
605
|
presencePenalty,
|
611
606
|
frequencyPenalty,
|
607
|
+
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
612
608
|
seed,
|
613
609
|
maxRetries: maxRetries != null ? maxRetries : 2
|
614
610
|
};
|
615
611
|
}
|
616
612
|
|
613
|
+
// core/telemetry/get-base-telemetry-attributes.ts
|
614
|
+
function getBaseTelemetryAttributes({
|
615
|
+
operationName,
|
616
|
+
model,
|
617
|
+
settings,
|
618
|
+
telemetry,
|
619
|
+
headers
|
620
|
+
}) {
|
621
|
+
var _a;
|
622
|
+
return {
|
623
|
+
"ai.model.provider": model.provider,
|
624
|
+
"ai.model.id": model.modelId,
|
625
|
+
// settings:
|
626
|
+
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
627
|
+
attributes[`ai.settings.${key}`] = value;
|
628
|
+
return attributes;
|
629
|
+
}, {}),
|
630
|
+
// special telemetry information
|
631
|
+
"operation.name": operationName,
|
632
|
+
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
633
|
+
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
634
|
+
// add metadata as attributes:
|
635
|
+
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
636
|
+
(attributes, [key, value]) => {
|
637
|
+
attributes[`ai.telemetry.metadata.${key}`] = value;
|
638
|
+
return attributes;
|
639
|
+
},
|
640
|
+
{}
|
641
|
+
),
|
642
|
+
// request headers
|
643
|
+
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
644
|
+
if (value !== void 0) {
|
645
|
+
attributes[`ai.request.headers.${key}`] = value;
|
646
|
+
}
|
647
|
+
return attributes;
|
648
|
+
}, {})
|
649
|
+
};
|
650
|
+
}
|
651
|
+
|
652
|
+
// core/telemetry/get-tracer.ts
|
653
|
+
var import_api = require("@opentelemetry/api");
|
654
|
+
|
655
|
+
// core/telemetry/noop-tracer.ts
|
656
|
+
var noopTracer = {
|
657
|
+
startSpan() {
|
658
|
+
return noopSpan;
|
659
|
+
},
|
660
|
+
startActiveSpan(name, arg1, arg2, arg3) {
|
661
|
+
if (typeof arg1 === "function") {
|
662
|
+
return arg1(noopSpan);
|
663
|
+
}
|
664
|
+
if (typeof arg2 === "function") {
|
665
|
+
return arg2(noopSpan);
|
666
|
+
}
|
667
|
+
if (typeof arg3 === "function") {
|
668
|
+
return arg3(noopSpan);
|
669
|
+
}
|
670
|
+
}
|
671
|
+
};
|
672
|
+
var noopSpan = {
|
673
|
+
spanContext() {
|
674
|
+
return noopSpanContext;
|
675
|
+
},
|
676
|
+
setAttribute() {
|
677
|
+
return this;
|
678
|
+
},
|
679
|
+
setAttributes() {
|
680
|
+
return this;
|
681
|
+
},
|
682
|
+
addEvent() {
|
683
|
+
return this;
|
684
|
+
},
|
685
|
+
addLink() {
|
686
|
+
return this;
|
687
|
+
},
|
688
|
+
addLinks() {
|
689
|
+
return this;
|
690
|
+
},
|
691
|
+
setStatus() {
|
692
|
+
return this;
|
693
|
+
},
|
694
|
+
updateName() {
|
695
|
+
return this;
|
696
|
+
},
|
697
|
+
end() {
|
698
|
+
return this;
|
699
|
+
},
|
700
|
+
isRecording() {
|
701
|
+
return false;
|
702
|
+
},
|
703
|
+
recordException() {
|
704
|
+
return this;
|
705
|
+
}
|
706
|
+
};
|
707
|
+
var noopSpanContext = {
|
708
|
+
traceId: "",
|
709
|
+
spanId: "",
|
710
|
+
traceFlags: 0
|
711
|
+
};
|
712
|
+
|
713
|
+
// core/telemetry/get-tracer.ts
|
714
|
+
var testTracer = void 0;
|
715
|
+
function getTracer({ isEnabled }) {
|
716
|
+
if (!isEnabled) {
|
717
|
+
return noopTracer;
|
718
|
+
}
|
719
|
+
if (testTracer) {
|
720
|
+
return testTracer;
|
721
|
+
}
|
722
|
+
return import_api.trace.getTracer("ai");
|
723
|
+
}
|
724
|
+
|
725
|
+
// core/telemetry/record-span.ts
|
726
|
+
var import_api2 = require("@opentelemetry/api");
|
727
|
+
function recordSpan({
|
728
|
+
name,
|
729
|
+
tracer,
|
730
|
+
attributes,
|
731
|
+
fn,
|
732
|
+
endWhenDone = true
|
733
|
+
}) {
|
734
|
+
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
735
|
+
try {
|
736
|
+
const result = await fn(span);
|
737
|
+
if (endWhenDone) {
|
738
|
+
span.end();
|
739
|
+
}
|
740
|
+
return result;
|
741
|
+
} catch (error) {
|
742
|
+
try {
|
743
|
+
if (error instanceof Error) {
|
744
|
+
span.recordException({
|
745
|
+
name: error.name,
|
746
|
+
message: error.message,
|
747
|
+
stack: error.stack
|
748
|
+
});
|
749
|
+
span.setStatus({
|
750
|
+
code: import_api2.SpanStatusCode.ERROR,
|
751
|
+
message: error.message
|
752
|
+
});
|
753
|
+
} else {
|
754
|
+
span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
|
755
|
+
}
|
756
|
+
} finally {
|
757
|
+
span.end();
|
758
|
+
}
|
759
|
+
throw error;
|
760
|
+
}
|
761
|
+
});
|
762
|
+
}
|
763
|
+
|
617
764
|
// core/types/token-usage.ts
|
618
765
|
function calculateCompletionTokenUsage(usage) {
|
619
766
|
return {
|
@@ -669,133 +816,180 @@ async function generateObject({
|
|
669
816
|
maxRetries,
|
670
817
|
abortSignal,
|
671
818
|
headers,
|
819
|
+
experimental_telemetry: telemetry,
|
672
820
|
...settings
|
673
821
|
}) {
|
674
|
-
var _a
|
675
|
-
const
|
822
|
+
var _a;
|
823
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
824
|
+
operationName: "ai.generateObject",
|
825
|
+
model,
|
826
|
+
telemetry,
|
827
|
+
headers,
|
828
|
+
settings: { ...settings, maxRetries }
|
829
|
+
});
|
676
830
|
const jsonSchema = convertZodToJSONSchema(schema);
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
const generateResult = await retry(() => {
|
694
|
-
return model.doGenerate({
|
695
|
-
mode: { type: "object-json" },
|
696
|
-
...prepareCallSettings(settings),
|
697
|
-
inputFormat: validatedPrompt.type,
|
698
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
699
|
-
abortSignal,
|
700
|
-
headers
|
701
|
-
});
|
702
|
-
});
|
703
|
-
if (generateResult.text === void 0) {
|
704
|
-
throw new import_provider5.NoObjectGeneratedError();
|
831
|
+
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
832
|
+
return recordSpan({
|
833
|
+
name: "ai.generateObject",
|
834
|
+
attributes: {
|
835
|
+
...baseTelemetryAttributes,
|
836
|
+
// specific settings that only make sense on the outer level:
|
837
|
+
"ai.prompt": JSON.stringify({ system, prompt, messages }),
|
838
|
+
"ai.settings.jsonSchema": JSON.stringify(jsonSchema),
|
839
|
+
"ai.settings.mode": mode
|
840
|
+
},
|
841
|
+
tracer,
|
842
|
+
fn: async (span) => {
|
843
|
+
var _a2, _b;
|
844
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
845
|
+
if (mode === "auto" || mode == null) {
|
846
|
+
mode = model.defaultObjectGenerationMode;
|
705
847
|
}
|
706
|
-
result
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
848
|
+
let result;
|
849
|
+
let finishReason;
|
850
|
+
let usage;
|
851
|
+
let warnings;
|
852
|
+
let rawResponse;
|
853
|
+
let logprobs;
|
854
|
+
switch (mode) {
|
855
|
+
case "json": {
|
856
|
+
const validatedPrompt = getValidatedPrompt({
|
857
|
+
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
858
|
+
prompt,
|
859
|
+
messages
|
860
|
+
});
|
861
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
862
|
+
const inputFormat = validatedPrompt.type;
|
863
|
+
const generateResult = await retry(
|
864
|
+
() => recordSpan({
|
865
|
+
name: "ai.generateObject.doGenerate",
|
866
|
+
attributes: {
|
867
|
+
...baseTelemetryAttributes,
|
868
|
+
"ai.prompt.format": inputFormat,
|
869
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
870
|
+
"ai.settings.mode": mode
|
871
|
+
},
|
872
|
+
tracer,
|
873
|
+
fn: async (span2) => {
|
874
|
+
const result2 = await model.doGenerate({
|
875
|
+
mode: { type: "object-json" },
|
876
|
+
...prepareCallSettings(settings),
|
877
|
+
inputFormat,
|
878
|
+
prompt: promptMessages,
|
879
|
+
abortSignal,
|
880
|
+
headers
|
881
|
+
});
|
882
|
+
span2.setAttributes({
|
883
|
+
"ai.finishReason": result2.finishReason,
|
884
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
885
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
886
|
+
"ai.result.text": result2.text
|
887
|
+
});
|
888
|
+
return result2;
|
889
|
+
}
|
890
|
+
})
|
891
|
+
);
|
892
|
+
if (generateResult.text === void 0) {
|
893
|
+
throw new import_provider5.NoObjectGeneratedError();
|
894
|
+
}
|
895
|
+
result = generateResult.text;
|
896
|
+
finishReason = generateResult.finishReason;
|
897
|
+
usage = generateResult.usage;
|
898
|
+
warnings = generateResult.warnings;
|
899
|
+
rawResponse = generateResult.rawResponse;
|
900
|
+
logprobs = generateResult.logprobs;
|
901
|
+
break;
|
902
|
+
}
|
903
|
+
case "tool": {
|
904
|
+
const validatedPrompt = getValidatedPrompt({
|
905
|
+
system,
|
906
|
+
prompt,
|
907
|
+
messages
|
908
|
+
});
|
909
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
910
|
+
const inputFormat = validatedPrompt.type;
|
911
|
+
const generateResult = await retry(
|
912
|
+
() => recordSpan({
|
913
|
+
name: "ai.generateObject.doGenerate",
|
914
|
+
attributes: {
|
915
|
+
...baseTelemetryAttributes,
|
916
|
+
"ai.prompt.format": inputFormat,
|
917
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
918
|
+
"ai.settings.mode": mode
|
919
|
+
},
|
920
|
+
tracer,
|
921
|
+
fn: async (span2) => {
|
922
|
+
const result2 = await model.doGenerate({
|
923
|
+
mode: {
|
924
|
+
type: "object-tool",
|
925
|
+
tool: {
|
926
|
+
type: "function",
|
927
|
+
name: "json",
|
928
|
+
description: "Respond with a JSON object.",
|
929
|
+
parameters: jsonSchema
|
930
|
+
}
|
931
|
+
},
|
932
|
+
...prepareCallSettings(settings),
|
933
|
+
inputFormat,
|
934
|
+
prompt: promptMessages,
|
935
|
+
abortSignal,
|
936
|
+
headers
|
937
|
+
});
|
938
|
+
span2.setAttributes({
|
939
|
+
"ai.finishReason": result2.finishReason,
|
940
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
941
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
942
|
+
"ai.result.text": result2.text,
|
943
|
+
"ai.result.toolCalls": JSON.stringify(result2.toolCalls)
|
944
|
+
});
|
945
|
+
return result2;
|
946
|
+
}
|
947
|
+
})
|
948
|
+
);
|
949
|
+
const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
|
950
|
+
if (functionArgs === void 0) {
|
951
|
+
throw new import_provider5.NoObjectGeneratedError();
|
952
|
+
}
|
953
|
+
result = functionArgs;
|
954
|
+
finishReason = generateResult.finishReason;
|
955
|
+
usage = generateResult.usage;
|
956
|
+
warnings = generateResult.warnings;
|
957
|
+
rawResponse = generateResult.rawResponse;
|
958
|
+
logprobs = generateResult.logprobs;
|
959
|
+
break;
|
960
|
+
}
|
961
|
+
case void 0: {
|
962
|
+
throw new Error(
|
963
|
+
"Model does not have a default object generation mode."
|
964
|
+
);
|
965
|
+
}
|
966
|
+
default: {
|
967
|
+
const _exhaustiveCheck = mode;
|
968
|
+
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
969
|
+
}
|
732
970
|
}
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
warnings = generateResult.warnings;
|
737
|
-
rawResponse = generateResult.rawResponse;
|
738
|
-
logprobs = generateResult.logprobs;
|
739
|
-
break;
|
740
|
-
}
|
741
|
-
case "tool": {
|
742
|
-
const validatedPrompt = getValidatedPrompt({
|
743
|
-
system,
|
744
|
-
prompt,
|
745
|
-
messages
|
746
|
-
});
|
747
|
-
const generateResult = await retry(
|
748
|
-
() => model.doGenerate({
|
749
|
-
mode: {
|
750
|
-
type: "object-tool",
|
751
|
-
tool: {
|
752
|
-
type: "function",
|
753
|
-
name: "json",
|
754
|
-
description: "Respond with a JSON object.",
|
755
|
-
parameters: jsonSchema
|
756
|
-
}
|
757
|
-
},
|
758
|
-
...prepareCallSettings(settings),
|
759
|
-
inputFormat: validatedPrompt.type,
|
760
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
761
|
-
abortSignal,
|
762
|
-
headers
|
763
|
-
})
|
764
|
-
);
|
765
|
-
const functionArgs = (_b = (_a = generateResult.toolCalls) == null ? void 0 : _a[0]) == null ? void 0 : _b.args;
|
766
|
-
if (functionArgs === void 0) {
|
767
|
-
throw new import_provider5.NoObjectGeneratedError();
|
971
|
+
const parseResult = (0, import_provider_utils4.safeParseJSON)({ text: result, schema });
|
972
|
+
if (!parseResult.success) {
|
973
|
+
throw parseResult.error;
|
768
974
|
}
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
975
|
+
span.setAttributes({
|
976
|
+
"ai.finishReason": finishReason,
|
977
|
+
"ai.usage.promptTokens": usage.promptTokens,
|
978
|
+
"ai.usage.completionTokens": usage.completionTokens,
|
979
|
+
"ai.result.object": JSON.stringify(parseResult.value)
|
980
|
+
});
|
981
|
+
return new DefaultGenerateObjectResult({
|
982
|
+
object: parseResult.value,
|
983
|
+
finishReason,
|
984
|
+
usage: calculateCompletionTokenUsage(usage),
|
985
|
+
warnings,
|
986
|
+
rawResponse,
|
987
|
+
logprobs
|
988
|
+
});
|
783
989
|
}
|
784
|
-
}
|
785
|
-
const parseResult = (0, import_provider_utils4.safeParseJSON)({ text: result, schema });
|
786
|
-
if (!parseResult.success) {
|
787
|
-
throw parseResult.error;
|
788
|
-
}
|
789
|
-
return new GenerateObjectResult({
|
790
|
-
object: parseResult.value,
|
791
|
-
finishReason,
|
792
|
-
usage: calculateCompletionTokenUsage(usage),
|
793
|
-
warnings,
|
794
|
-
rawResponse,
|
795
|
-
logprobs
|
796
990
|
});
|
797
991
|
}
|
798
|
-
var
|
992
|
+
var DefaultGenerateObjectResult = class {
|
799
993
|
constructor(options) {
|
800
994
|
this.object = options.object;
|
801
995
|
this.finishReason = options.finishReason;
|
@@ -804,10 +998,6 @@ var GenerateObjectResult = class {
|
|
804
998
|
this.rawResponse = options.rawResponse;
|
805
999
|
this.logprobs = options.logprobs;
|
806
1000
|
}
|
807
|
-
/**
|
808
|
-
Converts the object to a JSON response.
|
809
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
810
|
-
*/
|
811
1001
|
toJsonResponse(init) {
|
812
1002
|
var _a;
|
813
1003
|
return new Response(JSON.stringify(this.object), {
|
@@ -930,35 +1120,6 @@ async function streamObject({
|
|
930
1120
|
};
|
931
1121
|
break;
|
932
1122
|
}
|
933
|
-
case "grammar": {
|
934
|
-
const validatedPrompt = getValidatedPrompt({
|
935
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
936
|
-
prompt,
|
937
|
-
messages
|
938
|
-
});
|
939
|
-
callOptions = {
|
940
|
-
mode: { type: "object-grammar", schema: jsonSchema },
|
941
|
-
...prepareCallSettings(settings),
|
942
|
-
inputFormat: validatedPrompt.type,
|
943
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
944
|
-
abortSignal,
|
945
|
-
headers
|
946
|
-
};
|
947
|
-
transformer = {
|
948
|
-
transform: (chunk, controller) => {
|
949
|
-
switch (chunk.type) {
|
950
|
-
case "text-delta":
|
951
|
-
controller.enqueue(chunk.textDelta);
|
952
|
-
break;
|
953
|
-
case "finish":
|
954
|
-
case "error":
|
955
|
-
controller.enqueue(chunk);
|
956
|
-
break;
|
957
|
-
}
|
958
|
-
}
|
959
|
-
};
|
960
|
-
break;
|
961
|
-
}
|
962
1123
|
case "tool": {
|
963
1124
|
const validatedPrompt = getValidatedPrompt({
|
964
1125
|
system,
|
@@ -1005,7 +1166,7 @@ async function streamObject({
|
|
1005
1166
|
}
|
1006
1167
|
}
|
1007
1168
|
const result = await retry(() => model.doStream(callOptions));
|
1008
|
-
return new
|
1169
|
+
return new DefaultStreamObjectResult({
|
1009
1170
|
stream: result.stream.pipeThrough(new TransformStream(transformer)),
|
1010
1171
|
warnings: result.warnings,
|
1011
1172
|
rawResponse: result.rawResponse,
|
@@ -1013,7 +1174,7 @@ async function streamObject({
|
|
1013
1174
|
onFinish
|
1014
1175
|
});
|
1015
1176
|
}
|
1016
|
-
var
|
1177
|
+
var DefaultStreamObjectResult = class {
|
1017
1178
|
constructor({
|
1018
1179
|
stream,
|
1019
1180
|
warnings,
|
@@ -1109,18 +1270,9 @@ var StreamObjectResult = class {
|
|
1109
1270
|
})
|
1110
1271
|
);
|
1111
1272
|
}
|
1112
|
-
/**
|
1113
|
-
The generated object (typed according to the schema). Resolved when the response is finished.
|
1114
|
-
*/
|
1115
1273
|
get object() {
|
1116
1274
|
return this.objectPromise.value;
|
1117
1275
|
}
|
1118
|
-
/**
|
1119
|
-
Stream of partial objects. It gets more complete as the stream progresses.
|
1120
|
-
|
1121
|
-
Note that the partial object is not validated.
|
1122
|
-
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
1123
|
-
*/
|
1124
1276
|
get partialObjectStream() {
|
1125
1277
|
return createAsyncIterableStream(this.originalStream, {
|
1126
1278
|
transform(chunk, controller) {
|
@@ -1142,10 +1294,6 @@ var StreamObjectResult = class {
|
|
1142
1294
|
}
|
1143
1295
|
});
|
1144
1296
|
}
|
1145
|
-
/**
|
1146
|
-
Text stream of the JSON representation of the generated object. It contains text chunks.
|
1147
|
-
When the stream is finished, the object is valid JSON that can be parsed.
|
1148
|
-
*/
|
1149
1297
|
get textStream() {
|
1150
1298
|
return createAsyncIterableStream(this.originalStream, {
|
1151
1299
|
transform(chunk, controller) {
|
@@ -1167,9 +1315,6 @@ var StreamObjectResult = class {
|
|
1167
1315
|
}
|
1168
1316
|
});
|
1169
1317
|
}
|
1170
|
-
/**
|
1171
|
-
Stream of different types of events, including partial objects, errors, and finish events.
|
1172
|
-
*/
|
1173
1318
|
get fullStream() {
|
1174
1319
|
return createAsyncIterableStream(this.originalStream, {
|
1175
1320
|
transform(chunk, controller) {
|
@@ -1177,14 +1322,6 @@ var StreamObjectResult = class {
|
|
1177
1322
|
}
|
1178
1323
|
});
|
1179
1324
|
}
|
1180
|
-
/**
|
1181
|
-
Writes text delta output to a Node.js response-like object.
|
1182
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1183
|
-
writes each text delta as a separate chunk.
|
1184
|
-
|
1185
|
-
@param response A Node.js response-like object (ServerResponse).
|
1186
|
-
@param init Optional headers and status code.
|
1187
|
-
*/
|
1188
1325
|
pipeTextStreamToResponse(response, init) {
|
1189
1326
|
var _a;
|
1190
1327
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -1208,14 +1345,6 @@ var StreamObjectResult = class {
|
|
1208
1345
|
};
|
1209
1346
|
read();
|
1210
1347
|
}
|
1211
|
-
/**
|
1212
|
-
Creates a simple text stream response.
|
1213
|
-
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
1214
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1215
|
-
Non-text-delta events are ignored.
|
1216
|
-
|
1217
|
-
@param init Optional headers and status code.
|
1218
|
-
*/
|
1219
1348
|
toTextStreamResponse(init) {
|
1220
1349
|
var _a;
|
1221
1350
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -1255,157 +1384,6 @@ function prepareToolsAndToolChoice({
|
|
1255
1384
|
};
|
1256
1385
|
}
|
1257
1386
|
|
1258
|
-
// core/telemetry/get-base-telemetry-attributes.ts
|
1259
|
-
function getBaseTelemetryAttributes({
|
1260
|
-
operationName,
|
1261
|
-
model,
|
1262
|
-
settings,
|
1263
|
-
telemetry,
|
1264
|
-
headers
|
1265
|
-
}) {
|
1266
|
-
var _a;
|
1267
|
-
return {
|
1268
|
-
"ai.model.provider": model.provider,
|
1269
|
-
"ai.model.id": model.modelId,
|
1270
|
-
// settings:
|
1271
|
-
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
1272
|
-
attributes[`ai.settings.${key}`] = value;
|
1273
|
-
return attributes;
|
1274
|
-
}, {}),
|
1275
|
-
// special telemetry information
|
1276
|
-
"operation.name": operationName,
|
1277
|
-
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
1278
|
-
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
1279
|
-
// add metadata as attributes:
|
1280
|
-
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
1281
|
-
(attributes, [key, value]) => {
|
1282
|
-
attributes[`ai.telemetry.metadata.${key}`] = value;
|
1283
|
-
return attributes;
|
1284
|
-
},
|
1285
|
-
{}
|
1286
|
-
),
|
1287
|
-
// request headers
|
1288
|
-
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
1289
|
-
if (value !== void 0) {
|
1290
|
-
attributes[`ai.request.headers.${key}`] = value;
|
1291
|
-
}
|
1292
|
-
return attributes;
|
1293
|
-
}, {})
|
1294
|
-
};
|
1295
|
-
}
|
1296
|
-
|
1297
|
-
// core/telemetry/get-tracer.ts
|
1298
|
-
var import_api = require("@opentelemetry/api");
|
1299
|
-
|
1300
|
-
// core/telemetry/noop-tracer.ts
|
1301
|
-
var noopTracer = {
|
1302
|
-
startSpan() {
|
1303
|
-
return noopSpan;
|
1304
|
-
},
|
1305
|
-
startActiveSpan(name, arg1, arg2, arg3) {
|
1306
|
-
if (typeof arg1 === "function") {
|
1307
|
-
return arg1(noopSpan);
|
1308
|
-
}
|
1309
|
-
if (typeof arg2 === "function") {
|
1310
|
-
return arg2(noopSpan);
|
1311
|
-
}
|
1312
|
-
if (typeof arg3 === "function") {
|
1313
|
-
return arg3(noopSpan);
|
1314
|
-
}
|
1315
|
-
}
|
1316
|
-
};
|
1317
|
-
var noopSpan = {
|
1318
|
-
spanContext() {
|
1319
|
-
return noopSpanContext;
|
1320
|
-
},
|
1321
|
-
setAttribute() {
|
1322
|
-
return this;
|
1323
|
-
},
|
1324
|
-
setAttributes() {
|
1325
|
-
return this;
|
1326
|
-
},
|
1327
|
-
addEvent() {
|
1328
|
-
return this;
|
1329
|
-
},
|
1330
|
-
addLink() {
|
1331
|
-
return this;
|
1332
|
-
},
|
1333
|
-
addLinks() {
|
1334
|
-
return this;
|
1335
|
-
},
|
1336
|
-
setStatus() {
|
1337
|
-
return this;
|
1338
|
-
},
|
1339
|
-
updateName() {
|
1340
|
-
return this;
|
1341
|
-
},
|
1342
|
-
end() {
|
1343
|
-
return this;
|
1344
|
-
},
|
1345
|
-
isRecording() {
|
1346
|
-
return false;
|
1347
|
-
},
|
1348
|
-
recordException() {
|
1349
|
-
return this;
|
1350
|
-
}
|
1351
|
-
};
|
1352
|
-
var noopSpanContext = {
|
1353
|
-
traceId: "",
|
1354
|
-
spanId: "",
|
1355
|
-
traceFlags: 0
|
1356
|
-
};
|
1357
|
-
|
1358
|
-
// core/telemetry/get-tracer.ts
|
1359
|
-
var testTracer = void 0;
|
1360
|
-
function getTracer({ isEnabled }) {
|
1361
|
-
if (!isEnabled) {
|
1362
|
-
return noopTracer;
|
1363
|
-
}
|
1364
|
-
if (testTracer) {
|
1365
|
-
return testTracer;
|
1366
|
-
}
|
1367
|
-
return import_api.trace.getTracer("ai");
|
1368
|
-
}
|
1369
|
-
|
1370
|
-
// core/telemetry/record-span.ts
|
1371
|
-
var import_api2 = require("@opentelemetry/api");
|
1372
|
-
function recordSpan({
|
1373
|
-
name,
|
1374
|
-
tracer,
|
1375
|
-
attributes,
|
1376
|
-
fn,
|
1377
|
-
endWhenDone = true
|
1378
|
-
}) {
|
1379
|
-
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
1380
|
-
try {
|
1381
|
-
const result = await fn(span);
|
1382
|
-
if (endWhenDone) {
|
1383
|
-
span.end();
|
1384
|
-
}
|
1385
|
-
return result;
|
1386
|
-
} catch (error) {
|
1387
|
-
try {
|
1388
|
-
if (error instanceof Error) {
|
1389
|
-
span.recordException({
|
1390
|
-
name: error.name,
|
1391
|
-
message: error.message,
|
1392
|
-
stack: error.stack
|
1393
|
-
});
|
1394
|
-
span.setStatus({
|
1395
|
-
code: import_api2.SpanStatusCode.ERROR,
|
1396
|
-
message: error.message
|
1397
|
-
});
|
1398
|
-
} else {
|
1399
|
-
span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
|
1400
|
-
}
|
1401
|
-
} finally {
|
1402
|
-
span.end();
|
1403
|
-
}
|
1404
|
-
throw error;
|
1405
|
-
}
|
1406
|
-
});
|
1407
|
-
}
|
1408
|
-
|
1409
1387
|
// core/generate-text/tool-call.ts
|
1410
1388
|
var import_provider6 = require("@ai-sdk/provider");
|
1411
1389
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
@@ -1578,7 +1556,7 @@ async function generateText({
|
|
1578
1556
|
"ai.result.text": currentModelResponse.text,
|
1579
1557
|
"ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
|
1580
1558
|
});
|
1581
|
-
return new
|
1559
|
+
return new DefaultGenerateTextResult({
|
1582
1560
|
// Always return a string so that the caller doesn't have to check for undefined.
|
1583
1561
|
// If they need to check if the model did not return any text,
|
1584
1562
|
// they can check the length of the string:
|
@@ -1638,7 +1616,7 @@ async function executeTools({
|
|
1638
1616
|
(result) => result != null
|
1639
1617
|
);
|
1640
1618
|
}
|
1641
|
-
var
|
1619
|
+
var DefaultGenerateTextResult = class {
|
1642
1620
|
constructor(options) {
|
1643
1621
|
this.text = options.text;
|
1644
1622
|
this.toolCalls = options.toolCalls;
|
@@ -2005,7 +1983,7 @@ async function streamText({
|
|
2005
1983
|
}
|
2006
1984
|
})
|
2007
1985
|
);
|
2008
|
-
return new
|
1986
|
+
return new DefaultStreamTextResult({
|
2009
1987
|
stream: runToolsTransformation({
|
2010
1988
|
tools,
|
2011
1989
|
generatorStream: stream,
|
@@ -2021,7 +1999,7 @@ async function streamText({
|
|
2021
1999
|
}
|
2022
2000
|
});
|
2023
2001
|
}
|
2024
|
-
var
|
2002
|
+
var DefaultStreamTextResult = class {
|
2025
2003
|
constructor({
|
2026
2004
|
stream,
|
2027
2005
|
warnings,
|
@@ -2159,11 +2137,6 @@ var StreamTextResult = class {
|
|
2159
2137
|
this.originalStream = stream2;
|
2160
2138
|
return stream1;
|
2161
2139
|
}
|
2162
|
-
/**
|
2163
|
-
A text stream that returns only the generated text deltas. You can use it
|
2164
|
-
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2165
|
-
stream will throw the error.
|
2166
|
-
*/
|
2167
2140
|
get textStream() {
|
2168
2141
|
return createAsyncIterableStream(this.teeStream(), {
|
2169
2142
|
transform(chunk, controller) {
|
@@ -2177,12 +2150,6 @@ var StreamTextResult = class {
|
|
2177
2150
|
}
|
2178
2151
|
});
|
2179
2152
|
}
|
2180
|
-
/**
|
2181
|
-
A stream with all events, including text deltas, tool calls, tool results, and
|
2182
|
-
errors.
|
2183
|
-
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2184
|
-
stream will throw the error.
|
2185
|
-
*/
|
2186
2153
|
get fullStream() {
|
2187
2154
|
return createAsyncIterableStream(this.teeStream(), {
|
2188
2155
|
transform(chunk, controller) {
|
@@ -2196,15 +2163,6 @@ var StreamTextResult = class {
|
|
2196
2163
|
}
|
2197
2164
|
});
|
2198
2165
|
}
|
2199
|
-
/**
|
2200
|
-
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
2201
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2202
|
-
|
2203
|
-
@param callbacks
|
2204
|
-
Stream callbacks that will be called when the stream emits events.
|
2205
|
-
|
2206
|
-
@returns an `AIStream` object.
|
2207
|
-
*/
|
2208
2166
|
toAIStream(callbacks = {}) {
|
2209
2167
|
let aggregatedResponse = "";
|
2210
2168
|
const callbackTransformer = new TransformStream({
|
@@ -2288,14 +2246,6 @@ var StreamTextResult = class {
|
|
2288
2246
|
});
|
2289
2247
|
return this.fullStream.pipeThrough(callbackTransformer).pipeThrough(streamPartsTransformer).pipeThrough(new TextEncoderStream());
|
2290
2248
|
}
|
2291
|
-
/**
|
2292
|
-
Writes stream data output to a Node.js response-like object.
|
2293
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2294
|
-
writes each stream data part as a separate chunk.
|
2295
|
-
|
2296
|
-
@param response A Node.js response-like object (ServerResponse).
|
2297
|
-
@param init Optional headers and status code.
|
2298
|
-
*/
|
2299
2249
|
pipeAIStreamToResponse(response, init) {
|
2300
2250
|
var _a;
|
2301
2251
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2319,14 +2269,6 @@ var StreamTextResult = class {
|
|
2319
2269
|
};
|
2320
2270
|
read();
|
2321
2271
|
}
|
2322
|
-
/**
|
2323
|
-
Writes text delta output to a Node.js response-like object.
|
2324
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2325
|
-
writes each text delta as a separate chunk.
|
2326
|
-
|
2327
|
-
@param response A Node.js response-like object (ServerResponse).
|
2328
|
-
@param init Optional headers and status code.
|
2329
|
-
*/
|
2330
2272
|
pipeTextStreamToResponse(response, init) {
|
2331
2273
|
var _a;
|
2332
2274
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2350,15 +2292,6 @@ var StreamTextResult = class {
|
|
2350
2292
|
};
|
2351
2293
|
read();
|
2352
2294
|
}
|
2353
|
-
/**
|
2354
|
-
Converts the result to a streamed response object with a stream data part stream.
|
2355
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2356
|
-
|
2357
|
-
@param options An object with an init property (ResponseInit) and a data property.
|
2358
|
-
You can also pass in a ResponseInit directly (deprecated).
|
2359
|
-
|
2360
|
-
@return A response object.
|
2361
|
-
*/
|
2362
2295
|
toAIStreamResponse(options) {
|
2363
2296
|
var _a;
|
2364
2297
|
const init = options == null ? void 0 : "init" in options ? options.init : {
|
@@ -2376,13 +2309,6 @@ var StreamTextResult = class {
|
|
2376
2309
|
})
|
2377
2310
|
});
|
2378
2311
|
}
|
2379
|
-
/**
|
2380
|
-
Creates a simple text stream response.
|
2381
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
2382
|
-
Non-text-delta events are ignored.
|
2383
|
-
|
2384
|
-
@param init Optional headers and status code.
|
2385
|
-
*/
|
2386
2312
|
toTextStreamResponse(init) {
|
2387
2313
|
var _a;
|
2388
2314
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -3707,11 +3633,7 @@ var nanoid = import_provider_utils7.generateId;
|
|
3707
3633
|
AnthropicStream,
|
3708
3634
|
AssistantResponse,
|
3709
3635
|
CohereStream,
|
3710
|
-
EmbedManyResult,
|
3711
|
-
EmbedResult,
|
3712
3636
|
EmptyResponseBodyError,
|
3713
|
-
GenerateObjectResult,
|
3714
|
-
GenerateTextResult,
|
3715
3637
|
GoogleGenerativeAIStream,
|
3716
3638
|
HuggingFaceStream,
|
3717
3639
|
InkeepStream,
|
@@ -3735,8 +3657,6 @@ var nanoid = import_provider_utils7.generateId;
|
|
3735
3657
|
ReplicateStream,
|
3736
3658
|
RetryError,
|
3737
3659
|
StreamData,
|
3738
|
-
StreamObjectResult,
|
3739
|
-
StreamTextResult,
|
3740
3660
|
StreamingTextResponse,
|
3741
3661
|
ToolCallParseError,
|
3742
3662
|
TypeValidationError,
|