modelfusion 0.116.1 → 0.118.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. package/CHANGELOG.md +67 -0
  2. package/README.md +14 -11
  3. package/core/getFunctionCallLogger.cjs +6 -6
  4. package/core/getFunctionCallLogger.js +6 -6
  5. package/model-function/ModelCallEvent.d.ts +1 -1
  6. package/model-function/embed/EmbeddingEvent.d.ts +1 -1
  7. package/model-function/embed/EmbeddingModel.d.ts +1 -1
  8. package/model-function/embed/embed.cjs +5 -5
  9. package/model-function/embed/embed.d.ts +2 -2
  10. package/model-function/embed/embed.js +5 -5
  11. package/model-function/executeStandardCall.cjs +3 -3
  12. package/model-function/executeStandardCall.d.ts +2 -2
  13. package/model-function/executeStandardCall.js +3 -3
  14. package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
  15. package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
  16. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
  17. package/model-function/generate-image/generateImage.cjs +2 -2
  18. package/model-function/generate-image/generateImage.d.ts +1 -1
  19. package/model-function/generate-image/generateImage.js +2 -2
  20. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
  21. package/model-function/generate-speech/generateSpeech.cjs +2 -2
  22. package/model-function/generate-speech/generateSpeech.d.ts +1 -1
  23. package/model-function/generate-speech/generateSpeech.js +2 -2
  24. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +1 -1
  25. package/model-function/generate-structure/StructureFromTextGenerationModel.js +1 -1
  26. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
  27. package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
  28. package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
  29. package/model-function/generate-structure/generateStructure.cjs +2 -2
  30. package/model-function/generate-structure/generateStructure.d.ts +1 -1
  31. package/model-function/generate-structure/generateStructure.js +2 -2
  32. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
  33. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
  34. package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
  35. package/model-function/generate-text/TextGenerationModel.d.ts +2 -2
  36. package/model-function/generate-text/generateText.cjs +3 -3
  37. package/model-function/generate-text/generateText.d.ts +1 -1
  38. package/model-function/generate-text/generateText.js +3 -3
  39. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
  40. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
  41. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
  42. package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
  43. package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
  44. package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
  45. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
  46. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
  47. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
  48. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
  49. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
  50. package/model-function/generate-text/prompt-template/index.cjs +1 -0
  51. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  52. package/model-function/generate-text/prompt-template/index.js +1 -0
  53. package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
  54. package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
  55. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  56. package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
  57. package/model-function/generate-transcription/generateTranscription.js +1 -1
  58. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
  59. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  60. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
  61. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
  62. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
  63. package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
  64. package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
  65. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -4
  66. package/model-provider/cohere/CohereTextGenerationModel.js +3 -3
  67. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
  68. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
  69. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
  70. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -3
  71. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -4
  72. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -3
  73. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
  74. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
  75. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
  76. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +37 -27
  77. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +18 -8
  78. package/model-provider/llamacpp/LlamaCppCompletionModel.js +37 -27
  79. package/model-provider/llamacpp/LlamaCppFacade.cjs +31 -3
  80. package/model-provider/llamacpp/LlamaCppFacade.d.ts +6 -1
  81. package/model-provider/llamacpp/LlamaCppFacade.js +6 -1
  82. package/model-provider/llamacpp/LlamaCppGrammars.cjs +84 -0
  83. package/model-provider/llamacpp/LlamaCppGrammars.d.ts +18 -0
  84. package/model-provider/llamacpp/LlamaCppGrammars.js +81 -0
  85. package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
  86. package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
  87. package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
  88. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
  89. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
  90. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
  91. package/model-provider/llamacpp/index.cjs +2 -3
  92. package/model-provider/llamacpp/index.d.ts +1 -2
  93. package/model-provider/llamacpp/index.js +1 -2
  94. package/model-provider/mistral/MistralChatModel.cjs +3 -3
  95. package/model-provider/mistral/MistralChatModel.d.ts +4 -4
  96. package/model-provider/mistral/MistralChatModel.js +3 -3
  97. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
  98. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
  99. package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
  100. package/model-provider/ollama/OllamaChatModel.cjs +3 -3
  101. package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
  102. package/model-provider/ollama/OllamaChatModel.js +3 -3
  103. package/model-provider/ollama/OllamaCompletionModel.cjs +3 -3
  104. package/model-provider/ollama/OllamaCompletionModel.d.ts +14 -14
  105. package/model-provider/ollama/OllamaCompletionModel.js +3 -3
  106. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
  107. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
  108. package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
  109. package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
  110. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
  111. package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
  112. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +6 -6
  113. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +2 -2
  114. package/model-provider/openai/AbstractOpenAICompletionModel.js +6 -6
  115. package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
  116. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
  117. package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
  118. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
  119. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
  120. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
  121. package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
  122. package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
  123. package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
  124. package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
  125. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
  126. package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
  127. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
  128. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
  129. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
  130. package/package.json +1 -1
  131. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
  132. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
  133. package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
  134. package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
  135. package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
  136. package/tool/generate-tool-call/generateToolCall.cjs +2 -2
  137. package/tool/generate-tool-call/generateToolCall.js +2 -2
  138. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
  139. package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
  140. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
  141. package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
  142. package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
  143. package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
  144. package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
  145. package/tool/generate-tool-calls/generateToolCalls.js +2 -2
package/CHANGELOG.md CHANGED
@@ -1,5 +1,72 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.118.0 - 2024-01-07
4
+
5
+ ### Added
6
+
7
+ - `LlamaCppCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `llamacpp.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
8
+
9
+ ```ts
10
+ const model = llamacpp
11
+ .CompletionTextGenerator({
12
+ // run https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF with llama.cpp
13
+ promptTemplate: llamacpp.prompt.ChatML,
14
+ contextWindowSize: 4096,
15
+ maxGenerationTokens: 512,
16
+ })
17
+ .withChatPrompt();
18
+ ```
19
+
20
+ ### Changed
21
+
22
+ - **breaking change**: renamed `response` to `rawResponse` when using `fullResponse: true` setting.
23
+ - **breaking change**: renamed `llamacpp.TextGenerator` to `llamacpp.CompletionTextGenerator`.
24
+
25
+ ### Removed
26
+
27
+ - **breaking change**: removed `.withTextPromptTemplate` on `LlamaCppCompletionModel`.
28
+
29
+ ## v0.117.0 - 2024-01-06
30
+
31
+ ### Added
32
+
33
+ - Predefined Llama.cpp GBNF grammars:
34
+
35
+ - `llamacpp.grammar.json`: Restricts the output to JSON.
36
+ - `llamacpp.grammar.jsonArray`: Restricts the output to a JSON array.
37
+ - `llamacpp.grammar.list`: Restricts the output to a newline-separated list where each line starts with `- `.
38
+
39
+ - Llama.cpp structure generation support:
40
+
41
+ ```ts
42
+ const structure = await generateStructure(
43
+ llamacpp
44
+ .TextGenerator({
45
+ // run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
46
+ maxGenerationTokens: 1024,
47
+ temperature: 0,
48
+ })
49
+ .withTextPromptTemplate(ChatMLPrompt.instruction()) // needed for jsonStructurePrompt.text()
50
+ .asStructureGenerationModel(jsonStructurePrompt.text()), // automatically restrict the output to JSON
51
+
52
+ zodSchema(
53
+ z.object({
54
+ characters: z.array(
55
+ z.object({
56
+ name: z.string(),
57
+ class: z
58
+ .string()
59
+ .describe("Character class, e.g. warrior, mage, or thief."),
60
+ description: z.string(),
61
+ })
62
+ ),
63
+ })
64
+ ),
65
+
66
+ "Generate 3 character descriptions for a fantasy role playing game. "
67
+ );
68
+ ```
69
+
3
70
  ## v0.116.0 - 2024-01-05
4
71
 
5
72
  ### Added
package/README.md CHANGED
@@ -152,7 +152,7 @@ const sentiment = await generateStructure(
152
152
  );
153
153
  ```
154
154
 
155
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
155
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
156
156
 
157
157
  #### streamStructure
158
158
 
@@ -198,7 +198,7 @@ for await (const part of structureStream) {
198
198
  }
199
199
  ```
200
200
 
201
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
201
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
202
202
 
203
203
  ### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
204
204
 
@@ -424,14 +424,15 @@ const text = await generateText(
424
424
  #### Instruction Prompt Example
425
425
 
426
426
  ```ts
427
- // example assumes you are running https://huggingface.co/TheBloke/Llama-2-7B-GGUF with llama.cpp
428
427
  const text = await generateText(
429
428
  llamacpp
430
- .TextGenerator({
429
+ .CompletionTextGenerator({
430
+ // run https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF with llama.cpp
431
+ promptTemplate: llamacpp.prompt.Llama2, // Set prompt template
431
432
  contextWindowSize: 4096, // Llama 2 context window size
432
- maxGenerationTokens: 1000,
433
+ maxGenerationTokens: 512,
433
434
  })
434
- .withTextPromptTemplate(Llama2Prompt.instruction()),
435
+ .withInstructionPrompt(),
435
436
  {
436
437
  system: "You are a story writer.",
437
438
  instruction: "Write a short story about a robot learning to love.",
@@ -503,11 +504,11 @@ const image = await generateImage(
503
504
 
504
505
  ### Metadata and original responses
505
506
 
506
- ModelFusion model functions return rich responses that include the original response and metadata when you set the `fullResponse` option to `true`.
507
+ ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse` option to `true`.
507
508
 
508
509
  ```ts
509
- // access the full response (needs to be typed) and the metadata:
510
- const { text, response, metadata } = await generateText(
510
+ // access the raw response (needs to be typed) and the metadata:
511
+ const { text, rawResponse, metadata } = await generateText(
511
512
  openai.CompletionTextGenerator({
512
513
  model: "gpt-3.5-turbo-instruct",
513
514
  maxGenerationTokens: 1000,
@@ -519,8 +520,8 @@ const { text, response, metadata } = await generateText(
519
520
 
520
521
  console.log(metadata);
521
522
 
522
- // cast to the response type:
523
- for (const choice of (response as OpenAICompletionResponse).choices) {
523
+ // cast to the raw response type:
524
+ for (const choice of (rawResponse as OpenAICompletionResponse).choices) {
524
525
  console.log(choice.text);
525
526
  }
526
527
  ```
@@ -563,6 +564,8 @@ modelfusion.setLogFormat("detailed-object"); // log full events
563
564
  - [Split Text](https://modelfusion.dev/guide/text-chunk/split)
564
565
  - [Utilities](https://modelfusion.dev/guide/util/)
565
566
  - [API Configuration](https://modelfusion.dev/guide/util/api-configuration)
567
+ - [Base URL](https://modelfusion.dev/guide/util/api-configuration/base-url)
568
+ - [Headers](https://modelfusion.dev/guide/util/api-configuration/headers)
566
569
  - [Retry strategies](https://modelfusion.dev/guide/util/api-configuration/retry)
567
570
  - [Throttling strategies](https://modelfusion.dev/guide/util/api-configuration/throttle)
568
571
  - [Logging](https://modelfusion.dev/guide/util/logging)
@@ -36,11 +36,11 @@ const detailedObjectObserver = {
36
36
  // Remove the "response" property from the result (if any):
37
37
  if (event.eventType === "finished" &&
38
38
  event.result != null &&
39
- "response" in event.result &&
40
- event.result?.response != null) {
39
+ "rawResponse" in event.result &&
40
+ event.result?.rawResponse != null) {
41
41
  event = {
42
42
  ...event,
43
- result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "response")
43
+ result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
44
44
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
45
45
  ),
46
46
  };
@@ -73,11 +73,11 @@ const detailedJsonObserver = {
73
73
  // Remove the "response" property from the result (if any):
74
74
  if (event.eventType === "finished" &&
75
75
  event.result != null &&
76
- "response" in event.result &&
77
- event.result?.response != null) {
76
+ "rawResponse" in event.result &&
77
+ event.result?.rawResponse != null) {
78
78
  event = {
79
79
  ...event,
80
- result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "response")
80
+ result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
81
81
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
82
82
  ),
83
83
  };
@@ -32,11 +32,11 @@ const detailedObjectObserver = {
32
32
  // Remove the "response" property from the result (if any):
33
33
  if (event.eventType === "finished" &&
34
34
  event.result != null &&
35
- "response" in event.result &&
36
- event.result?.response != null) {
35
+ "rawResponse" in event.result &&
36
+ event.result?.rawResponse != null) {
37
37
  event = {
38
38
  ...event,
39
- result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "response")
39
+ result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
40
40
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
41
41
  ),
42
42
  };
@@ -69,11 +69,11 @@ const detailedJsonObserver = {
69
69
  // Remove the "response" property from the result (if any):
70
70
  if (event.eventType === "finished" &&
71
71
  event.result != null &&
72
- "response" in event.result &&
73
- event.result?.response != null) {
72
+ "rawResponse" in event.result &&
73
+ event.result?.rawResponse != null) {
74
74
  event = {
75
75
  ...event,
76
- result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "response")
76
+ result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
77
77
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
78
78
  ),
79
79
  };
@@ -22,7 +22,7 @@ export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
22
22
  }
23
23
  export type BaseModelCallFinishedEventResult = {
24
24
  status: "success";
25
- response: unknown;
25
+ rawResponse: unknown;
26
26
  value: unknown;
27
27
  /**
28
28
  * Optional usage information for the model call. The type depends on the call type.
@@ -6,7 +6,7 @@ export interface EmbeddingStartedEvent extends BaseModelCallStartedEvent {
6
6
  }
7
7
  export type EmbeddingFinishedEventResult = {
8
8
  status: "success";
9
- response: unknown;
9
+ rawResponse: unknown;
10
10
  value: Vector | Array<Vector>;
11
11
  } | {
12
12
  status: "error";
@@ -17,7 +17,7 @@ export interface EmbeddingModel<VALUE, SETTINGS extends EmbeddingModelSettings =
17
17
  */
18
18
  readonly isParallelizable: boolean;
19
19
  doEmbedValues(values: VALUE[], options: FunctionCallOptions): PromiseLike<{
20
- response: unknown;
20
+ rawResponse: unknown;
21
21
  embeddings: Vector[];
22
22
  }>;
23
23
  }
@@ -32,13 +32,13 @@ async function embedMany(model, values, options) {
32
32
  responses.push(response);
33
33
  }
34
34
  }
35
- const rawResponses = responses.map((response) => response.response);
35
+ const rawResponses = responses.map((response) => response.rawResponse);
36
36
  const embeddings = [];
37
37
  for (const response of responses) {
38
38
  embeddings.push(...response.embeddings);
39
39
  }
40
40
  return {
41
- response: rawResponses,
41
+ rawResponse: rawResponses,
42
42
  extractedValue: embeddings,
43
43
  };
44
44
  },
@@ -46,7 +46,7 @@ async function embedMany(model, values, options) {
46
46
  return options?.fullResponse
47
47
  ? {
48
48
  embeddings: fullResponse.value,
49
- response: fullResponse.response,
49
+ rawResponse: fullResponse.rawResponse,
50
50
  metadata: fullResponse.metadata,
51
51
  }
52
52
  : fullResponse.value;
@@ -61,7 +61,7 @@ async function embed(model, value, options) {
61
61
  generateResponse: async (options) => {
62
62
  const result = await model.doEmbedValues([value], options);
63
63
  return {
64
- response: result.response,
64
+ rawResponse: result.rawResponse,
65
65
  extractedValue: result.embeddings[0],
66
66
  };
67
67
  },
@@ -69,7 +69,7 @@ async function embed(model, value, options) {
69
69
  return options?.fullResponse
70
70
  ? {
71
71
  embedding: fullResponse.value,
72
- response: fullResponse.response,
72
+ rawResponse: fullResponse.rawResponse,
73
73
  metadata: fullResponse.metadata,
74
74
  }
75
75
  : fullResponse.value;
@@ -29,7 +29,7 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
29
29
  fullResponse: true;
30
30
  }): Promise<{
31
31
  embeddings: Vector[];
32
- response: unknown;
32
+ rawResponse: unknown;
33
33
  metadata: ModelCallMetadata;
34
34
  }>;
35
35
  /**
@@ -56,6 +56,6 @@ export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModel
56
56
  fullResponse: true;
57
57
  }): Promise<{
58
58
  embedding: Vector;
59
- response: unknown;
59
+ rawResponse: unknown;
60
60
  metadata: ModelCallMetadata;
61
61
  }>;
@@ -29,13 +29,13 @@ export async function embedMany(model, values, options) {
29
29
  responses.push(response);
30
30
  }
31
31
  }
32
- const rawResponses = responses.map((response) => response.response);
32
+ const rawResponses = responses.map((response) => response.rawResponse);
33
33
  const embeddings = [];
34
34
  for (const response of responses) {
35
35
  embeddings.push(...response.embeddings);
36
36
  }
37
37
  return {
38
- response: rawResponses,
38
+ rawResponse: rawResponses,
39
39
  extractedValue: embeddings,
40
40
  };
41
41
  },
@@ -43,7 +43,7 @@ export async function embedMany(model, values, options) {
43
43
  return options?.fullResponse
44
44
  ? {
45
45
  embeddings: fullResponse.value,
46
- response: fullResponse.response,
46
+ rawResponse: fullResponse.rawResponse,
47
47
  metadata: fullResponse.metadata,
48
48
  }
49
49
  : fullResponse.value;
@@ -57,7 +57,7 @@ export async function embed(model, value, options) {
57
57
  generateResponse: async (options) => {
58
58
  const result = await model.doEmbedValues([value], options);
59
59
  return {
60
- response: result.response,
60
+ rawResponse: result.rawResponse,
61
61
  extractedValue: result.embeddings[0],
62
62
  };
63
63
  },
@@ -65,7 +65,7 @@ export async function embed(model, value, options) {
65
65
  return options?.fullResponse
66
66
  ? {
67
67
  embedding: fullResponse.value,
68
- response: fullResponse.response,
68
+ rawResponse: fullResponse.rawResponse,
69
69
  metadata: fullResponse.metadata,
70
70
  }
71
71
  : fullResponse.value;
@@ -78,7 +78,7 @@ async function executeStandardCall({ model, options, input, functionType, genera
78
78
  });
79
79
  throw result.error;
80
80
  }
81
- const response = result.value.response;
81
+ const rawResponse = result.value.rawResponse;
82
82
  const value = result.value.extractedValue;
83
83
  const usage = result.value.usage;
84
84
  eventSource.notify({
@@ -87,13 +87,13 @@ async function executeStandardCall({ model, options, input, functionType, genera
87
87
  result: {
88
88
  status: "success",
89
89
  usage,
90
- response,
90
+ rawResponse,
91
91
  value,
92
92
  },
93
93
  });
94
94
  return {
95
95
  value,
96
- response,
96
+ rawResponse,
97
97
  metadata: {
98
98
  model: model.modelInformation,
99
99
  callId: finishMetadata.callId,
@@ -8,12 +8,12 @@ export declare function executeStandardCall<VALUE, MODEL extends Model<ModelSett
8
8
  input: unknown;
9
9
  functionType: ModelCallStartedEvent["functionType"];
10
10
  generateResponse: (options: FunctionCallOptions) => PromiseLike<{
11
- response: unknown;
11
+ rawResponse: unknown;
12
12
  extractedValue: VALUE;
13
13
  usage?: unknown;
14
14
  }>;
15
15
  }): Promise<{
16
16
  value: VALUE;
17
- response: unknown;
17
+ rawResponse: unknown;
18
18
  metadata: ModelCallMetadata;
19
19
  }>;
@@ -75,7 +75,7 @@ export async function executeStandardCall({ model, options, input, functionType,
75
75
  });
76
76
  throw result.error;
77
77
  }
78
- const response = result.value.response;
78
+ const rawResponse = result.value.rawResponse;
79
79
  const value = result.value.extractedValue;
80
80
  const usage = result.value.usage;
81
81
  eventSource.notify({
@@ -84,13 +84,13 @@ export async function executeStandardCall({ model, options, input, functionType,
84
84
  result: {
85
85
  status: "success",
86
86
  usage,
87
- response,
87
+ rawResponse,
88
88
  value,
89
89
  },
90
90
  });
91
91
  return {
92
92
  value,
93
- response,
93
+ rawResponse,
94
94
  metadata: {
95
95
  model: model.modelInformation,
96
96
  callId: finishMetadata.callId,
@@ -4,7 +4,7 @@ export interface ImageGenerationStartedEvent extends BaseModelCallStartedEvent {
4
4
  }
5
5
  export type ImageGenerationFinishedEventResult = {
6
6
  status: "success";
7
- response: unknown;
7
+ rawResponse: unknown;
8
8
  value: string;
9
9
  } | {
10
10
  status: "error";
@@ -15,7 +15,7 @@ export interface ImageGenerationModelSettings extends ModelSettings {
15
15
  }
16
16
  export interface ImageGenerationModel<PROMPT, SETTINGS extends ImageGenerationModelSettings = ImageGenerationModelSettings> extends Model<SETTINGS> {
17
17
  doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
18
- response: unknown;
18
+ rawResponse: unknown;
19
19
  base64Images: string[];
20
20
  }>;
21
21
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, PROMPT>): ImageGenerationModel<INPUT_PROMPT, SETTINGS>;
@@ -11,7 +11,7 @@ export declare class PromptTemplateImageGenerationModel<PROMPT, MODEL_PROMPT, SE
11
11
  get modelInformation(): import("../ModelInformation.js").ModelInformation;
12
12
  get settings(): SETTINGS;
13
13
  doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
14
- response: unknown;
14
+ rawResponse: unknown;
15
15
  base64Images: string[];
16
16
  }>;
17
17
  get settingsForEvent(): Partial<SETTINGS>;
@@ -11,7 +11,7 @@ async function generateImage(model, prompt, options) {
11
11
  generateResponse: async (options) => {
12
12
  const result = await model.doGenerateImages(prompt, options);
13
13
  return {
14
- response: result.response,
14
+ rawResponse: result.rawResponse,
15
15
  extractedValue: result.base64Images,
16
16
  };
17
17
  },
@@ -24,7 +24,7 @@ async function generateImage(model, prompt, options) {
24
24
  imageBase64: imagesBase64[0],
25
25
  images,
26
26
  imagesBase64,
27
- response: fullResponse.response,
27
+ rawResponse: fullResponse.rawResponse,
28
28
  metadata: fullResponse.metadata,
29
29
  }
30
30
  : images[0];
@@ -36,6 +36,6 @@ export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT
36
36
  imageBase64: string;
37
37
  images: Buffer[];
38
38
  imagesBase64: string[];
39
- response: unknown;
39
+ rawResponse: unknown;
40
40
  metadata: ModelCallMetadata;
41
41
  }>;
@@ -8,7 +8,7 @@ export async function generateImage(model, prompt, options) {
8
8
  generateResponse: async (options) => {
9
9
  const result = await model.doGenerateImages(prompt, options);
10
10
  return {
11
- response: result.response,
11
+ rawResponse: result.rawResponse,
12
12
  extractedValue: result.base64Images,
13
13
  };
14
14
  },
@@ -21,7 +21,7 @@ export async function generateImage(model, prompt, options) {
21
21
  imageBase64: imagesBase64[0],
22
22
  images,
23
23
  imagesBase64,
24
- response: fullResponse.response,
24
+ rawResponse: fullResponse.rawResponse,
25
25
  metadata: fullResponse.metadata,
26
26
  }
27
27
  : images[0];
@@ -6,7 +6,7 @@ export interface SpeechGenerationStartedEvent extends BaseModelCallStartedEvent
6
6
  }
7
7
  export type SpeechGenerationFinishedEventResult = {
8
8
  status: "success";
9
- response: unknown;
9
+ rawResponse: unknown;
10
10
  value: Buffer;
11
11
  } | {
12
12
  status: "error";
@@ -11,7 +11,7 @@ async function generateSpeech(model, text, options) {
11
11
  generateResponse: async (options) => {
12
12
  const response = await model.doGenerateSpeechStandard(text, options);
13
13
  return {
14
- response,
14
+ rawResponse: response,
15
15
  extractedValue: response,
16
16
  };
17
17
  },
@@ -19,7 +19,7 @@ async function generateSpeech(model, text, options) {
19
19
  return options?.fullResponse
20
20
  ? {
21
21
  speech: fullResponse.value,
22
- response: fullResponse.response,
22
+ rawResponse: fullResponse.rawResponse,
23
23
  metadata: fullResponse.metadata,
24
24
  }
25
25
  : fullResponse.value;
@@ -27,6 +27,6 @@ export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenera
27
27
  fullResponse: true;
28
28
  }): Promise<{
29
29
  speech: Buffer;
30
- response: unknown;
30
+ rawResponse: unknown;
31
31
  metadata: ModelCallMetadata;
32
32
  }>;
@@ -8,7 +8,7 @@ export async function generateSpeech(model, text, options) {
8
8
  generateResponse: async (options) => {
9
9
  const response = await model.doGenerateSpeechStandard(text, options);
10
10
  return {
11
- response,
11
+ rawResponse: response,
12
12
  extractedValue: response,
13
13
  };
14
14
  },
@@ -16,7 +16,7 @@ export async function generateSpeech(model, text, options) {
16
16
  return options?.fullResponse
17
17
  ? {
18
18
  speech: fullResponse.value,
19
- response: fullResponse.response,
19
+ rawResponse: fullResponse.rawResponse,
20
20
  metadata: fullResponse.metadata,
21
21
  }
22
22
  : fullResponse.value;
@@ -30,7 +30,7 @@ class StructureFromTextGenerationModel {
30
30
  return this.model.settingsForEvent;
31
31
  }
32
32
  async doGenerateStructure(schema, prompt, options) {
33
- const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
33
+ const { rawResponse: response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
34
34
  ...options,
35
35
  fullResponse: true,
36
36
  });
@@ -27,7 +27,7 @@ export class StructureFromTextGenerationModel {
27
27
  return this.model.settingsForEvent;
28
28
  }
29
29
  async doGenerateStructure(schema, prompt, options) {
30
- const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
30
+ const { rawResponse: response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
31
31
  ...options,
32
32
  fullResponse: true,
33
33
  });
@@ -12,7 +12,7 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
12
12
  super(options);
13
13
  }
14
14
  async doGenerateStructure(schema, prompt, options) {
15
- const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
15
+ const { rawResponse: response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
16
16
  ...options,
17
17
  fullResponse: true,
18
18
  });
@@ -9,7 +9,7 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
9
9
  super(options);
10
10
  }
11
11
  async doGenerateStructure(schema, prompt, options) {
12
- const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
12
+ const { rawResponse: response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
13
13
  ...options,
14
14
  fullResponse: true,
15
15
  });
@@ -4,7 +4,7 @@ export interface StructureGenerationStartedEvent extends BaseModelCallStartedEve
4
4
  }
5
5
  export type StructureGenerationFinishedEventResult = {
6
6
  status: "success";
7
- response: unknown;
7
+ rawResponse: unknown;
8
8
  value: unknown;
9
9
  usage?: {
10
10
  promptTokens: number;
@@ -29,7 +29,7 @@ async function generateStructure(model, schema, prompt, options) {
29
29
  }
30
30
  const value = parseResult.data;
31
31
  return {
32
- response: result.response,
32
+ rawResponse: result.response,
33
33
  extractedValue: value,
34
34
  usage: result.usage,
35
35
  };
@@ -38,7 +38,7 @@ async function generateStructure(model, schema, prompt, options) {
38
38
  return options?.fullResponse
39
39
  ? {
40
40
  structure: fullResponse.value,
41
- response: fullResponse.response,
41
+ rawResponse: fullResponse.rawResponse,
42
42
  metadata: fullResponse.metadata,
43
43
  }
44
44
  : fullResponse.value;
@@ -44,6 +44,6 @@ export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends St
44
44
  fullResponse: true;
45
45
  }): Promise<{
46
46
  structure: STRUCTURE;
47
- response: unknown;
47
+ rawResponse: unknown;
48
48
  metadata: ModelCallMetadata;
49
49
  }>;
@@ -26,7 +26,7 @@ export async function generateStructure(model, schema, prompt, options) {
26
26
  }
27
27
  const value = parseResult.data;
28
28
  return {
29
- response: result.response,
29
+ rawResponse: result.response,
30
30
  extractedValue: value,
31
31
  usage: result.usage,
32
32
  };
@@ -35,7 +35,7 @@ export async function generateStructure(model, schema, prompt, options) {
35
35
  return options?.fullResponse
36
36
  ? {
37
37
  structure: fullResponse.value,
38
- response: fullResponse.response,
38
+ rawResponse: fullResponse.rawResponse,
39
39
  metadata: fullResponse.metadata,
40
40
  }
41
41
  : fullResponse.value;