@ai-sdk/google 2.0.0-alpha.9 → 2.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,88 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 2.0.0-beta.2
4
+
5
+ ### Patch Changes
6
+
7
+ - a313780: fix: omit system message for gemma models
8
+
9
+ ## 2.0.0-beta.1
10
+
11
+ ### Patch Changes
12
+
13
+ - Updated dependencies [742b7be]
14
+ - Updated dependencies [7cddb72]
15
+ - Updated dependencies [ccce59b]
16
+ - Updated dependencies [e2b9e4b]
17
+ - Updated dependencies [45c1ea2]
18
+ - Updated dependencies [e025824]
19
+ - Updated dependencies [0d06df6]
20
+ - Updated dependencies [472524a]
21
+ - Updated dependencies [dd3ff01]
22
+ - Updated dependencies [7435eb5]
23
+ - Updated dependencies [cb68df0]
24
+ - Updated dependencies [bfdca8d]
25
+ - Updated dependencies [44f4aba]
26
+ - Updated dependencies [023ba40]
27
+ - Updated dependencies [5e57fae]
28
+ - Updated dependencies [71f938d]
29
+ - Updated dependencies [28a5ed5]
30
+ - @ai-sdk/provider@2.0.0-beta.1
31
+ - @ai-sdk/provider-utils@3.0.0-beta.1
32
+
33
+ ## 2.0.0-alpha.15
34
+
35
+ ### Patch Changes
36
+
37
+ - Updated dependencies [48d257a]
38
+ - Updated dependencies [8ba77a7]
39
+ - @ai-sdk/provider@2.0.0-alpha.15
40
+ - @ai-sdk/provider-utils@3.0.0-alpha.15
41
+
42
+ ## 2.0.0-alpha.14
43
+
44
+ ### Patch Changes
45
+
46
+ - Updated dependencies [b5da06a]
47
+ - Updated dependencies [63f9e9b]
48
+ - Updated dependencies [2e13791]
49
+ - @ai-sdk/provider@2.0.0-alpha.14
50
+ - @ai-sdk/provider-utils@3.0.0-alpha.14
51
+
52
+ ## 2.0.0-alpha.13
53
+
54
+ ### Patch Changes
55
+
56
+ - Updated dependencies [68ecf2f]
57
+ - @ai-sdk/provider@2.0.0-alpha.13
58
+ - @ai-sdk/provider-utils@3.0.0-alpha.13
59
+
60
+ ## 2.0.0-alpha.12
61
+
62
+ ### Patch Changes
63
+
64
+ - e2aceaf: feat: add raw chunk support
65
+ - Updated dependencies [e2aceaf]
66
+ - @ai-sdk/provider@2.0.0-alpha.12
67
+ - @ai-sdk/provider-utils@3.0.0-alpha.12
68
+
69
+ ## 2.0.0-alpha.11
70
+
71
+ ### Patch Changes
72
+
73
+ - Updated dependencies [c1e6647]
74
+ - @ai-sdk/provider@2.0.0-alpha.11
75
+ - @ai-sdk/provider-utils@3.0.0-alpha.11
76
+
77
+ ## 2.0.0-alpha.10
78
+
79
+ ### Patch Changes
80
+
81
+ - 581a9be: fix (provider/google): prevent error when thinking signature is used
82
+ - Updated dependencies [c4df419]
83
+ - @ai-sdk/provider@2.0.0-alpha.10
84
+ - @ai-sdk/provider-utils@3.0.0-alpha.10
85
+
3
86
  ## 2.0.0-alpha.9
4
87
 
5
88
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -31,15 +31,18 @@ declare const googleErrorDataSchema: z.ZodObject<{
31
31
  }>;
32
32
  type GoogleErrorData = z.infer<typeof googleErrorDataSchema>;
33
33
 
34
- type GoogleGenerativeAIModelId = 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-001' | 'gemini-1.5-pro' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-live-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-thinking-exp-01-21' | 'gemini-2.0-flash-exp' | 'gemini-2.5-pro-exp-03-25' | 'gemini-2.5-flash-preview-04-17' | 'gemini-exp-1206' | 'gemma-3-27b-it' | 'learnlm-1.5-pro-experimental' | (string & {});
34
+ type GoogleGenerativeAIModelId = 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-001' | 'gemini-1.5-pro' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-live-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-thinking-exp-01-21' | 'gemini-2.0-flash-exp' | 'gemini-2.5-pro-exp-03-25' | 'gemini-2.5-flash-preview-04-17' | 'gemini-exp-1206' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | 'learnlm-1.5-pro-experimental' | (string & {});
35
35
  declare const googleGenerativeAIProviderOptions: z.ZodObject<{
36
36
  responseModalities: z.ZodOptional<z.ZodArray<z.ZodEnum<["TEXT", "IMAGE"]>, "many">>;
37
37
  thinkingConfig: z.ZodOptional<z.ZodObject<{
38
38
  thinkingBudget: z.ZodOptional<z.ZodNumber>;
39
+ includeThoughts: z.ZodOptional<z.ZodBoolean>;
39
40
  }, "strip", z.ZodTypeAny, {
40
41
  thinkingBudget?: number | undefined;
42
+ includeThoughts?: boolean | undefined;
41
43
  }, {
42
44
  thinkingBudget?: number | undefined;
45
+ includeThoughts?: boolean | undefined;
43
46
  }>>;
44
47
  /**
45
48
  Optional.
@@ -110,6 +113,7 @@ declare const googleGenerativeAIProviderOptions: z.ZodObject<{
110
113
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
111
114
  thinkingConfig?: {
112
115
  thinkingBudget?: number | undefined;
116
+ includeThoughts?: boolean | undefined;
113
117
  } | undefined;
114
118
  cachedContent?: string | undefined;
115
119
  structuredOutputs?: boolean | undefined;
@@ -128,6 +132,7 @@ declare const googleGenerativeAIProviderOptions: z.ZodObject<{
128
132
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
129
133
  thinkingConfig?: {
130
134
  thinkingBudget?: number | undefined;
135
+ includeThoughts?: boolean | undefined;
131
136
  } | undefined;
132
137
  cachedContent?: string | undefined;
133
138
  structuredOutputs?: boolean | undefined;
package/dist/index.d.ts CHANGED
@@ -31,15 +31,18 @@ declare const googleErrorDataSchema: z.ZodObject<{
31
31
  }>;
32
32
  type GoogleErrorData = z.infer<typeof googleErrorDataSchema>;
33
33
 
34
- type GoogleGenerativeAIModelId = 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-001' | 'gemini-1.5-pro' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-live-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-thinking-exp-01-21' | 'gemini-2.0-flash-exp' | 'gemini-2.5-pro-exp-03-25' | 'gemini-2.5-flash-preview-04-17' | 'gemini-exp-1206' | 'gemma-3-27b-it' | 'learnlm-1.5-pro-experimental' | (string & {});
34
+ type GoogleGenerativeAIModelId = 'gemini-1.5-flash' | 'gemini-1.5-flash-latest' | 'gemini-1.5-flash-001' | 'gemini-1.5-flash-002' | 'gemini-1.5-flash-8b' | 'gemini-1.5-flash-8b-latest' | 'gemini-1.5-flash-8b-001' | 'gemini-1.5-pro' | 'gemini-1.5-pro-latest' | 'gemini-1.5-pro-001' | 'gemini-1.5-pro-002' | 'gemini-2.0-flash' | 'gemini-2.0-flash-001' | 'gemini-2.0-flash-live-001' | 'gemini-2.0-flash-lite' | 'gemini-2.0-pro-exp-02-05' | 'gemini-2.0-flash-thinking-exp-01-21' | 'gemini-2.0-flash-exp' | 'gemini-2.5-pro-exp-03-25' | 'gemini-2.5-flash-preview-04-17' | 'gemini-exp-1206' | 'gemma-3-12b-it' | 'gemma-3-27b-it' | 'learnlm-1.5-pro-experimental' | (string & {});
35
35
  declare const googleGenerativeAIProviderOptions: z.ZodObject<{
36
36
  responseModalities: z.ZodOptional<z.ZodArray<z.ZodEnum<["TEXT", "IMAGE"]>, "many">>;
37
37
  thinkingConfig: z.ZodOptional<z.ZodObject<{
38
38
  thinkingBudget: z.ZodOptional<z.ZodNumber>;
39
+ includeThoughts: z.ZodOptional<z.ZodBoolean>;
39
40
  }, "strip", z.ZodTypeAny, {
40
41
  thinkingBudget?: number | undefined;
42
+ includeThoughts?: boolean | undefined;
41
43
  }, {
42
44
  thinkingBudget?: number | undefined;
45
+ includeThoughts?: boolean | undefined;
43
46
  }>>;
44
47
  /**
45
48
  Optional.
@@ -110,6 +113,7 @@ declare const googleGenerativeAIProviderOptions: z.ZodObject<{
110
113
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
111
114
  thinkingConfig?: {
112
115
  thinkingBudget?: number | undefined;
116
+ includeThoughts?: boolean | undefined;
113
117
  } | undefined;
114
118
  cachedContent?: string | undefined;
115
119
  structuredOutputs?: boolean | undefined;
@@ -128,6 +132,7 @@ declare const googleGenerativeAIProviderOptions: z.ZodObject<{
128
132
  responseModalities?: ("TEXT" | "IMAGE")[] | undefined;
129
133
  thinkingConfig?: {
130
134
  thinkingBudget?: number | undefined;
135
+ includeThoughts?: boolean | undefined;
131
136
  } | undefined;
132
137
  cachedContent?: string | undefined;
133
138
  structuredOutputs?: boolean | undefined;
package/dist/index.js CHANGED
@@ -330,7 +330,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
330
330
  return {
331
331
  functionCall: {
332
332
  name: part.toolName,
333
- args: part.args
333
+ args: part.input
334
334
  }
335
335
  };
336
336
  }
@@ -348,7 +348,7 @@ function convertToGoogleGenerativeAIMessages(prompt) {
348
348
  name: part.toolName,
349
349
  response: {
350
350
  name: part.toolName,
351
- content: part.result
351
+ content: part.output.value
352
352
  }
353
353
  }
354
354
  }))
@@ -384,7 +384,8 @@ var dynamicRetrievalConfig = import_zod4.z.object({
384
384
  var googleGenerativeAIProviderOptions = import_zod4.z.object({
385
385
  responseModalities: import_zod4.z.array(import_zod4.z.enum(["TEXT", "IMAGE"])).optional(),
386
386
  thinkingConfig: import_zod4.z.object({
387
- thinkingBudget: import_zod4.z.number().optional()
387
+ thinkingBudget: import_zod4.z.number().optional(),
388
+ includeThoughts: import_zod4.z.boolean().optional()
388
389
  }).optional(),
389
390
  /**
390
391
  Optional.
@@ -488,7 +489,7 @@ function prepareTools({
488
489
  functionDeclarations.push({
489
490
  name: tool.name,
490
491
  description: (_a = tool.description) != null ? _a : "",
491
- parameters: convertJSONSchemaToOpenAPISchema(tool.parameters)
492
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
492
493
  });
493
494
  }
494
495
  }
@@ -595,14 +596,27 @@ var GoogleGenerativeAILanguageModel = class {
595
596
  toolChoice,
596
597
  providerOptions
597
598
  }) {
598
- var _a, _b;
599
+ var _a, _b, _c;
599
600
  const warnings = [];
600
601
  const googleOptions = await (0, import_provider_utils4.parseProviderOptions)({
601
602
  provider: "google",
602
603
  providerOptions,
603
604
  schema: googleGenerativeAIProviderOptions
604
605
  });
606
+ if (((_a = googleOptions == null ? void 0 : googleOptions.thinkingConfig) == null ? void 0 : _a.includeThoughts) === true && !this.config.provider.startsWith("google.vertex.")) {
607
+ warnings.push({
608
+ type: "other",
609
+ message: `The 'includeThoughts' option is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
610
+ });
611
+ }
605
612
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(prompt);
613
+ const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
614
+ if (isGemmaModel && systemInstruction && systemInstruction.parts.length > 0) {
615
+ warnings.push({
616
+ type: "other",
617
+ message: `GEMMA models do not support system instructions. System messages will be ignored. Consider including instructions in the first user message instead.`
618
+ });
619
+ }
606
620
  const {
607
621
  tools: googleTools,
608
622
  toolConfig: googleToolConfig,
@@ -610,7 +624,7 @@ var GoogleGenerativeAILanguageModel = class {
610
624
  } = prepareTools({
611
625
  tools,
612
626
  toolChoice,
613
- useSearchGrounding: (_a = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _a : false,
627
+ useSearchGrounding: (_b = googleOptions == null ? void 0 : googleOptions.useSearchGrounding) != null ? _b : false,
614
628
  dynamicRetrievalConfig: googleOptions == null ? void 0 : googleOptions.dynamicRetrievalConfig,
615
629
  modelId: this.modelId
616
630
  });
@@ -631,7 +645,7 @@ var GoogleGenerativeAILanguageModel = class {
631
645
  responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
632
646
  // so this is needed as an escape hatch:
633
647
  // TODO convert into provider option
634
- ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
648
+ ((_c = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _c : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
635
649
  ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
636
650
  audioTimestamp: googleOptions.audioTimestamp
637
651
  },
@@ -640,7 +654,7 @@ var GoogleGenerativeAILanguageModel = class {
640
654
  thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig
641
655
  },
642
656
  contents,
643
- systemInstruction,
657
+ systemInstruction: isGemmaModel ? void 0 : systemInstruction,
644
658
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
645
659
  tools: googleTools,
646
660
  toolConfig: googleToolConfig,
@@ -675,16 +689,20 @@ var GoogleGenerativeAILanguageModel = class {
675
689
  const candidate = response.candidates[0];
676
690
  const content = [];
677
691
  const parts = candidate.content == null || typeof candidate.content !== "object" || !("parts" in candidate.content) ? [] : (_a = candidate.content.parts) != null ? _a : [];
692
+ const usageMetadata = response.usageMetadata;
678
693
  for (const part of parts) {
679
- if ("text" in part && part.text.length > 0) {
680
- content.push({ type: "text", text: part.text });
694
+ if ("text" in part && part.text != null && part.text.length > 0) {
695
+ if (part.thought === true) {
696
+ content.push({ type: "reasoning", text: part.text });
697
+ } else {
698
+ content.push({ type: "text", text: part.text });
699
+ }
681
700
  } else if ("functionCall" in part) {
682
701
  content.push({
683
702
  type: "tool-call",
684
- toolCallType: "function",
685
703
  toolCallId: this.config.generateId(),
686
704
  toolName: part.functionCall.name,
687
- args: JSON.stringify(part.functionCall.args)
705
+ input: JSON.stringify(part.functionCall.args)
688
706
  });
689
707
  } else if ("inlineData" in part) {
690
708
  content.push({
@@ -701,7 +719,6 @@ var GoogleGenerativeAILanguageModel = class {
701
719
  for (const source of sources) {
702
720
  content.push(source);
703
721
  }
704
- const usageMetadata = response.usageMetadata;
705
722
  return {
706
723
  content,
707
724
  finishReason: mapGoogleGenerativeAIFinishReason({
@@ -757,6 +774,9 @@ var GoogleGenerativeAILanguageModel = class {
757
774
  let providerMetadata = void 0;
758
775
  const generateId2 = this.config.generateId;
759
776
  let hasToolCalls = false;
777
+ let currentTextBlockId = null;
778
+ let currentReasoningBlockId = null;
779
+ let blockCounter = 0;
760
780
  return {
761
781
  stream: response.pipeThrough(
762
782
  new TransformStream({
@@ -764,7 +784,10 @@ var GoogleGenerativeAILanguageModel = class {
764
784
  controller.enqueue({ type: "stream-start", warnings });
765
785
  },
766
786
  transform(chunk, controller) {
767
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
787
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
788
+ if (options.includeRawChunks) {
789
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
790
+ }
768
791
  if (!chunk.success) {
769
792
  controller.enqueue({ type: "error", error: chunk.error });
770
793
  return;
@@ -784,9 +807,51 @@ var GoogleGenerativeAILanguageModel = class {
784
807
  }
785
808
  const content = candidate.content;
786
809
  if (content != null) {
787
- const deltaText = getTextFromParts(content.parts);
788
- if (deltaText != null) {
789
- controller.enqueue(deltaText);
810
+ const parts = (_g = content.parts) != null ? _g : [];
811
+ for (const part of parts) {
812
+ if ("text" in part && part.text != null && part.text.length > 0) {
813
+ if (part.thought === true) {
814
+ if (currentTextBlockId !== null) {
815
+ controller.enqueue({
816
+ type: "text-end",
817
+ id: currentTextBlockId
818
+ });
819
+ currentTextBlockId = null;
820
+ }
821
+ if (currentReasoningBlockId === null) {
822
+ currentReasoningBlockId = String(blockCounter++);
823
+ controller.enqueue({
824
+ type: "reasoning-start",
825
+ id: currentReasoningBlockId
826
+ });
827
+ }
828
+ controller.enqueue({
829
+ type: "reasoning-delta",
830
+ id: currentReasoningBlockId,
831
+ delta: part.text
832
+ });
833
+ } else {
834
+ if (currentReasoningBlockId !== null) {
835
+ controller.enqueue({
836
+ type: "reasoning-end",
837
+ id: currentReasoningBlockId
838
+ });
839
+ currentReasoningBlockId = null;
840
+ }
841
+ if (currentTextBlockId === null) {
842
+ currentTextBlockId = String(blockCounter++);
843
+ controller.enqueue({
844
+ type: "text-start",
845
+ id: currentTextBlockId
846
+ });
847
+ }
848
+ controller.enqueue({
849
+ type: "text-delta",
850
+ id: currentTextBlockId,
851
+ delta: part.text
852
+ });
853
+ }
854
+ }
790
855
  }
791
856
  const inlineDataParts = getInlineDataParts(content.parts);
792
857
  if (inlineDataParts != null) {
@@ -805,18 +870,24 @@ var GoogleGenerativeAILanguageModel = class {
805
870
  if (toolCallDeltas != null) {
806
871
  for (const toolCall of toolCallDeltas) {
807
872
  controller.enqueue({
808
- type: "tool-call-delta",
809
- toolCallType: "function",
810
- toolCallId: toolCall.toolCallId,
811
- toolName: toolCall.toolName,
812
- argsTextDelta: toolCall.args
873
+ type: "tool-input-start",
874
+ id: toolCall.toolCallId,
875
+ toolName: toolCall.toolName
876
+ });
877
+ controller.enqueue({
878
+ type: "tool-input-delta",
879
+ id: toolCall.toolCallId,
880
+ delta: toolCall.args
881
+ });
882
+ controller.enqueue({
883
+ type: "tool-input-end",
884
+ id: toolCall.toolCallId
813
885
  });
814
886
  controller.enqueue({
815
887
  type: "tool-call",
816
- toolCallType: "function",
817
888
  toolCallId: toolCall.toolCallId,
818
889
  toolName: toolCall.toolName,
819
- args: toolCall.args
890
+ input: toolCall.args
820
891
  });
821
892
  hasToolCalls = true;
822
893
  }
@@ -827,22 +898,34 @@ var GoogleGenerativeAILanguageModel = class {
827
898
  finishReason: candidate.finishReason,
828
899
  hasToolCalls
829
900
  });
830
- const sources = (_g = extractSources({
901
+ const sources = (_h = extractSources({
831
902
  groundingMetadata: candidate.groundingMetadata,
832
903
  generateId: generateId2
833
- })) != null ? _g : [];
904
+ })) != null ? _h : [];
834
905
  for (const source of sources) {
835
906
  controller.enqueue(source);
836
907
  }
837
908
  providerMetadata = {
838
909
  google: {
839
- groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
840
- safetyRatings: (_i = candidate.safetyRatings) != null ? _i : null
910
+ groundingMetadata: (_i = candidate.groundingMetadata) != null ? _i : null,
911
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
841
912
  }
842
913
  };
843
914
  }
844
915
  },
845
916
  flush(controller) {
917
+ if (currentTextBlockId !== null) {
918
+ controller.enqueue({
919
+ type: "text-end",
920
+ id: currentTextBlockId
921
+ });
922
+ }
923
+ if (currentReasoningBlockId !== null) {
924
+ controller.enqueue({
925
+ type: "reasoning-end",
926
+ id: currentReasoningBlockId
927
+ });
928
+ }
846
929
  controller.enqueue({
847
930
  type: "finish",
848
931
  finishReason,
@@ -866,19 +949,11 @@ function getToolCallsFromParts({
866
949
  );
867
950
  return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
868
951
  type: "tool-call",
869
- toolCallType: "function",
870
952
  toolCallId: generateId2(),
871
953
  toolName: part.functionCall.name,
872
954
  args: JSON.stringify(part.functionCall.args)
873
955
  }));
874
956
  }
875
- function getTextFromParts(parts) {
876
- const textParts = parts == null ? void 0 : parts.filter((part) => "text" in part);
877
- return textParts == null || textParts.length === 0 ? void 0 : {
878
- type: "text",
879
- text: textParts.map((part) => part.text).join("")
880
- };
881
- }
882
957
  function getInlineDataParts(parts) {
883
958
  return parts == null ? void 0 : parts.filter(
884
959
  (part) => "inlineData" in part
@@ -900,12 +975,9 @@ function extractSources({
900
975
  }));
901
976
  }
902
977
  var contentSchema = import_zod5.z.object({
903
- role: import_zod5.z.string(),
904
978
  parts: import_zod5.z.array(
905
979
  import_zod5.z.union([
906
- import_zod5.z.object({
907
- text: import_zod5.z.string()
908
- }),
980
+ // note: order matters since text can be fully empty
909
981
  import_zod5.z.object({
910
982
  functionCall: import_zod5.z.object({
911
983
  name: import_zod5.z.string(),
@@ -917,6 +989,10 @@ var contentSchema = import_zod5.z.object({
917
989
  mimeType: import_zod5.z.string(),
918
990
  data: import_zod5.z.string()
919
991
  })
992
+ }),
993
+ import_zod5.z.object({
994
+ text: import_zod5.z.string().nullish(),
995
+ thought: import_zod5.z.boolean().nullish()
920
996
  })
921
997
  ])
922
998
  ).nullish()