modelfusion 0.22.0 → 0.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/README.md +3 -0
  2. package/core/FunctionOptions.d.ts +14 -0
  3. package/core/GlobalFunctionLogging.cjs +12 -0
  4. package/core/GlobalFunctionLogging.d.ts +3 -0
  5. package/core/GlobalFunctionLogging.js +7 -0
  6. package/core/getFunctionCallLogger.cjs +74 -0
  7. package/core/getFunctionCallLogger.d.ts +3 -0
  8. package/core/getFunctionCallLogger.js +70 -0
  9. package/core/index.cjs +1 -1
  10. package/core/index.d.ts +1 -1
  11. package/core/index.js +1 -1
  12. package/model-function/AbstractModel.d.ts +1 -0
  13. package/model-function/Model.d.ts +6 -1
  14. package/model-function/ModelCallEvent.d.ts +21 -2
  15. package/model-function/embed-text/embedText.d.ts +2 -2
  16. package/model-function/executeCall.cjs +24 -17
  17. package/model-function/executeCall.d.ts +15 -13
  18. package/model-function/executeCall.js +22 -15
  19. package/model-function/generate-image/generateImage.d.ts +1 -1
  20. package/model-function/generate-json/JsonGenerationEvent.d.ts +16 -0
  21. package/model-function/generate-json/JsonGenerationModel.d.ts +13 -0
  22. package/model-function/generate-json/JsonOrTextGenerationModel.d.ts +23 -0
  23. package/model-function/generate-json/JsonTextGenerationModel.cjs +3 -0
  24. package/model-function/generate-json/JsonTextGenerationModel.d.ts +6 -5
  25. package/model-function/generate-json/JsonTextGenerationModel.js +3 -0
  26. package/model-function/generate-json/generateJson.cjs +1 -0
  27. package/model-function/generate-json/generateJson.d.ts +2 -2
  28. package/model-function/generate-json/generateJson.js +1 -0
  29. package/model-function/generate-json/generateJsonOrText.cjs +1 -0
  30. package/model-function/generate-json/generateJsonOrText.d.ts +2 -2
  31. package/model-function/generate-json/generateJsonOrText.js +1 -0
  32. package/model-function/generate-text/TextGenerationEvent.d.ts +5 -2
  33. package/model-function/generate-text/TextGenerationModel.d.ts +5 -0
  34. package/model-function/generate-text/generateText.cjs +1 -0
  35. package/model-function/generate-text/generateText.d.ts +1 -1
  36. package/model-function/generate-text/generateText.js +1 -0
  37. package/model-function/generate-text/streamText.cjs +9 -6
  38. package/model-function/generate-text/streamText.d.ts +5 -5
  39. package/model-function/generate-text/streamText.js +9 -6
  40. package/model-function/index.cjs +3 -2
  41. package/model-function/index.d.ts +3 -2
  42. package/model-function/index.js +3 -2
  43. package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +2 -2
  44. package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +1 -1
  45. package/model-function/synthesize-speech/synthesizeSpeech.d.ts +1 -1
  46. package/model-function/transcribe-speech/TranscriptionModel.d.ts +1 -1
  47. package/model-function/transcribe-speech/transcribe.d.ts +1 -1
  48. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +9 -0
  49. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -0
  50. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +9 -0
  51. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +6 -0
  52. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -0
  53. package/model-provider/cohere/CohereTextEmbeddingModel.js +6 -0
  54. package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -0
  55. package/model-provider/cohere/CohereTextGenerationModel.d.ts +2 -1
  56. package/model-provider/cohere/CohereTextGenerationModel.js +20 -0
  57. package/model-provider/cohere/CohereTokenizer.d.ts +1 -1
  58. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +8 -0
  59. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +1 -0
  60. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +8 -0
  61. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +7 -0
  62. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -0
  63. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +7 -0
  64. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +16 -0
  65. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -0
  66. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +16 -0
  67. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +6 -0
  68. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -0
  69. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +6 -0
  70. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +31 -0
  71. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +6 -0
  72. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +31 -0
  73. package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -0
  74. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -0
  75. package/model-provider/openai/OpenAIImageGenerationModel.js +8 -0
  76. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +5 -0
  77. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -11
  78. package/model-provider/openai/OpenAITextEmbeddingModel.js +5 -0
  79. package/model-provider/openai/OpenAITextGenerationModel.cjs +62 -6
  80. package/model-provider/openai/OpenAITextGenerationModel.d.ts +34 -17
  81. package/model-provider/openai/OpenAITextGenerationModel.js +60 -5
  82. package/model-provider/openai/OpenAITranscriptionModel.cjs +7 -0
  83. package/model-provider/openai/OpenAITranscriptionModel.d.ts +2 -0
  84. package/model-provider/openai/OpenAITranscriptionModel.js +7 -0
  85. package/model-provider/openai/TikTokenTokenizer.d.ts +4 -2
  86. package/model-provider/openai/chat/OpenAIChatModel.cjs +68 -9
  87. package/model-provider/openai/chat/OpenAIChatModel.d.ts +40 -20
  88. package/model-provider/openai/chat/OpenAIChatModel.js +66 -8
  89. package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +2 -2
  90. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +5 -2
  91. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +5 -2
  92. package/model-provider/stability/StabilityImageGenerationModel.cjs +15 -0
  93. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -0
  94. package/model-provider/stability/StabilityImageGenerationModel.js +15 -0
  95. package/package.json +1 -1
  96. package/prompt/PromptFormatTextGenerationModel.cjs +3 -0
  97. package/prompt/PromptFormatTextGenerationModel.d.ts +1 -0
  98. package/prompt/PromptFormatTextGenerationModel.js +3 -0
  99. package/tool/executeTool.cjs +3 -0
  100. package/tool/executeTool.js +3 -0
  101. package/tool/useTool.d.ts +2 -2
  102. package/tool/useToolOrGenerateText.d.ts +2 -2
  103. package/core/ConsoleLogger.cjs +0 -9
  104. package/core/ConsoleLogger.d.ts +0 -5
  105. package/core/ConsoleLogger.js +0 -5
  106. package/model-function/generate-json/GenerateJsonModel.d.ts +0 -8
  107. package/model-function/generate-json/GenerateJsonOrTextModel.d.ts +0 -18
  108. /package/model-function/generate-json/{GenerateJsonModel.cjs → JsonGenerationModel.cjs} +0 -0
  109. /package/model-function/generate-json/{GenerateJsonModel.js → JsonGenerationModel.js} +0 -0
  110. /package/model-function/generate-json/{GenerateJsonOrTextModel.cjs → JsonOrTextGenerationModel.cjs} +0 -0
  111. /package/model-function/generate-json/{GenerateJsonOrTextModel.js → JsonOrTextGenerationModel.js} +0 -0
@@ -49,6 +49,8 @@ export const OPENAI_CHAT_MODELS = {
49
49
  contextWindowSize: 4096,
50
50
  promptTokenCostInMillicents: 0.15,
51
51
  completionTokenCostInMillicents: 0.2,
52
+ fineTunedPromptTokenCostInMillicents: 1.2,
53
+ fineTunedCompletionTokenCostInMillicents: 1.6,
52
54
  },
53
55
  "gpt-3.5-turbo-0301": {
54
56
  contextWindowSize: 4096,
@@ -59,6 +61,8 @@ export const OPENAI_CHAT_MODELS = {
59
61
  contextWindowSize: 4096,
60
62
  promptTokenCostInMillicents: 0.15,
61
63
  completionTokenCostInMillicents: 0.2,
64
+ fineTunedPromptTokenCostInMillicents: 1.2,
65
+ fineTunedCompletionTokenCostInMillicents: 1.6,
62
66
  },
63
67
  "gpt-3.5-turbo-16k": {
64
68
  contextWindowSize: 16384,
@@ -71,11 +75,43 @@ export const OPENAI_CHAT_MODELS = {
71
75
  completionTokenCostInMillicents: 0.4,
72
76
  },
73
77
  };
74
- export const isOpenAIChatModel = (model) => model in OPENAI_CHAT_MODELS;
75
- export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => response.usage.prompt_tokens *
76
- OPENAI_CHAT_MODELS[model].promptTokenCostInMillicents +
77
- response.usage.completion_tokens *
78
- OPENAI_CHAT_MODELS[model].completionTokenCostInMillicents;
78
+ export function getOpenAIChatModelInformation(model) {
79
+ // Model is already a base model:
80
+ if (model in OPENAI_CHAT_MODELS) {
81
+ const baseModelInformation = OPENAI_CHAT_MODELS[model];
82
+ return {
83
+ baseModel: model,
84
+ isFineTuned: false,
85
+ contextWindowSize: baseModelInformation.contextWindowSize,
86
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
87
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
88
+ };
89
+ }
90
+ // Extract the base model from the fine-tuned model:
91
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
92
+ const [_, baseModel, ___, ____, _____] = model.split(":");
93
+ if (["gpt-3.5-turbo", "gpt-3.5-turbo-0613"].includes(baseModel)) {
94
+ const baseModelInformation = OPENAI_CHAT_MODELS[baseModel];
95
+ return {
96
+ baseModel: baseModel,
97
+ isFineTuned: true,
98
+ contextWindowSize: baseModelInformation.contextWindowSize,
99
+ promptTokenCostInMillicents: baseModelInformation.fineTunedPromptTokenCostInMillicents,
100
+ completionTokenCostInMillicents: baseModelInformation.fineTunedCompletionTokenCostInMillicents,
101
+ };
102
+ }
103
+ throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
104
+ }
105
+ export const isOpenAIChatModel = (model) => model in OPENAI_CHAT_MODELS ||
106
+ model.startsWith("ft:gpt-3.5-turbo-0613:") ||
107
+ model.startsWith("ft:gpt-3.5-turbo:");
108
+ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => {
109
+ const modelInformation = getOpenAIChatModelInformation(model);
110
+ return (response.usage.prompt_tokens *
111
+ modelInformation.promptTokenCostInMillicents +
112
+ response.usage.completion_tokens *
113
+ modelInformation.completionTokenCostInMillicents);
114
+ };
79
115
  /**
80
116
  * Create a text generation model that calls the OpenAI chat completion API.
81
117
  *
@@ -116,9 +152,11 @@ export class OpenAIChatModel extends AbstractModel {
116
152
  writable: true,
117
153
  value: void 0
118
154
  });
119
- this.tokenizer = new TikTokenTokenizer({ model: this.settings.model });
120
- this.contextWindowSize =
121
- OPENAI_CHAT_MODELS[this.settings.model].contextWindowSize;
155
+ const modelInformation = getOpenAIChatModelInformation(this.settings.model);
156
+ this.tokenizer = new TikTokenTokenizer({
157
+ model: modelInformation.baseModel,
158
+ });
159
+ this.contextWindowSize = modelInformation.contextWindowSize;
122
160
  }
123
161
  get modelName() {
124
162
  return this.settings.model;
@@ -162,6 +200,19 @@ export class OpenAIChatModel extends AbstractModel {
162
200
  call: async () => callOpenAIChatCompletionAPI(callSettings),
163
201
  });
164
202
  }
203
+ get settingsForEvent() {
204
+ const eventSettingProperties = [
205
+ "stopSequences",
206
+ "maxCompletionTokens",
207
+ "baseUrl",
208
+ "functions",
209
+ "functionCall",
210
+ "temperature",
211
+ "topP",
212
+ "n",
213
+ ];
214
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
215
+ }
165
216
  generateTextResponse(prompt, options) {
166
217
  return this.callAPI(prompt, {
167
218
  ...options,
@@ -203,6 +254,13 @@ export class OpenAIChatModel extends AbstractModel {
203
254
  const jsonText = response.choices[0].message.function_call.arguments;
204
255
  return SecureJSON.parse(jsonText);
205
256
  }
257
+ extractUsage(response) {
258
+ return {
259
+ promptTokens: response.usage.prompt_tokens,
260
+ completionTokens: response.usage.completion_tokens,
261
+ totalTokens: response.usage.total_tokens,
262
+ };
263
+ }
206
264
  withPromptFormat(promptFormat) {
207
265
  return new PromptFormatTextGenerationModel({
208
266
  model: this.withSettings({ stopSequences: promptFormat.stopSequences }),
@@ -1,5 +1,5 @@
1
1
  import z from "zod";
2
- import { GenerateJsonOrTextPrompt } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
2
+ import { JsonOrTextGenerationPrompt } from "../../../model-function/generate-json/JsonOrTextGenerationModel.js";
3
3
  import { SchemaDefinition } from "../../../model-function/generate-json/SchemaDefinition.js";
4
4
  import { Tool } from "../../../tool/Tool.js";
5
5
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
@@ -76,7 +76,7 @@ export declare class OpenAIChatSingleFunctionPrompt<FUNCTION> {
76
76
  };
77
77
  }[];
78
78
  }
79
- export declare class OpenAIChatAutoFunctionPrompt<FUNCTIONS extends Array<OpenAIFunctionDescription<any>>> implements GenerateJsonOrTextPrompt<OpenAIChatResponse> {
79
+ export declare class OpenAIChatAutoFunctionPrompt<FUNCTIONS extends Array<OpenAIFunctionDescription<any>>> implements JsonOrTextGenerationPrompt<OpenAIChatResponse> {
80
80
  readonly messages: OpenAIChatMessage[];
81
81
  readonly fns: FUNCTIONS;
82
82
  constructor({ messages, fns, }: {
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.countOpenAIChatPromptTokens = exports.countOpenAIChatMessageTokens = exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = exports.OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = void 0;
4
4
  const countTokens_js_1 = require("../../../model-function/tokenize-text/countTokens.cjs");
5
5
  const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
6
+ const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
6
7
  /**
7
8
  * Prompt tokens that are included automatically for every full
8
9
  * chat prompt (several messages) that is sent to OpenAI.
@@ -14,8 +15,10 @@ exports.OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
14
15
  */
15
16
  exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
16
17
  async function countOpenAIChatMessageTokens({ message, model, }) {
17
- return (exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
18
- (await (0, countTokens_js_1.countTokens)(new TikTokenTokenizer_js_1.TikTokenTokenizer({ model }), message.content ?? "")));
18
+ const contentTokenCount = await (0, countTokens_js_1.countTokens)(new TikTokenTokenizer_js_1.TikTokenTokenizer({
19
+ model: (0, OpenAIChatModel_js_1.getOpenAIChatModelInformation)(model).baseModel,
20
+ }), message.content ?? "");
21
+ return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
19
22
  }
20
23
  exports.countOpenAIChatMessageTokens = countOpenAIChatMessageTokens;
21
24
  async function countOpenAIChatPromptTokens({ messages, model, }) {
@@ -1,5 +1,6 @@
1
1
  import { countTokens } from "../../../model-function/tokenize-text/countTokens.js";
2
2
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
3
+ import { getOpenAIChatModelInformation, } from "./OpenAIChatModel.js";
3
4
  /**
4
5
  * Prompt tokens that are included automatically for every full
5
6
  * chat prompt (several messages) that is sent to OpenAI.
@@ -11,8 +12,10 @@ export const OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
11
12
  */
12
13
  export const OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
13
14
  export async function countOpenAIChatMessageTokens({ message, model, }) {
14
- return (OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
15
- (await countTokens(new TikTokenTokenizer({ model }), message.content ?? "")));
15
+ const contentTokenCount = await countTokens(new TikTokenTokenizer({
16
+ model: getOpenAIChatModelInformation(model).baseModel,
17
+ }), message.content ?? "");
18
+ return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
16
19
  }
17
20
  export async function countOpenAIChatPromptTokens({ messages, model, }) {
18
21
  let tokens = OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT;
@@ -64,6 +64,21 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
64
64
  call: async () => callStabilityImageGenerationAPI(callSettings),
65
65
  });
66
66
  }
67
+ get settingsForEvent() {
68
+ const eventSettingProperties = [
69
+ "baseUrl",
70
+ "height",
71
+ "width",
72
+ "cfgScale",
73
+ "clipGuidancePreset",
74
+ "sampler",
75
+ "samples",
76
+ "seed",
77
+ "steps",
78
+ "stylePreset",
79
+ ];
80
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
81
+ }
67
82
  generateImageResponse(prompt, options) {
68
83
  return this.callAPI(prompt, options);
69
84
  }
@@ -32,6 +32,7 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
32
32
  get modelName(): string;
33
33
  private get apiKey();
34
34
  callAPI(input: StabilityImageGenerationPrompt, options?: ModelFunctionOptions<StabilityImageGenerationModelSettings>): Promise<StabilityImageGenerationResponse>;
35
+ get settingsForEvent(): Partial<StabilityImageGenerationModelSettings>;
35
36
  generateImageResponse(prompt: StabilityImageGenerationPrompt, options?: ModelFunctionOptions<StabilityImageGenerationModelSettings>): Promise<{
36
37
  artifacts: {
37
38
  seed: number;
@@ -61,6 +61,21 @@ export class StabilityImageGenerationModel extends AbstractModel {
61
61
  call: async () => callStabilityImageGenerationAPI(callSettings),
62
62
  });
63
63
  }
64
+ get settingsForEvent() {
65
+ const eventSettingProperties = [
66
+ "baseUrl",
67
+ "height",
68
+ "width",
69
+ "cfgScale",
70
+ "clipGuidancePreset",
71
+ "sampler",
72
+ "samples",
73
+ "seed",
74
+ "steps",
75
+ "stylePreset",
76
+ ];
77
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
78
+ }
64
79
  generateImageResponse(prompt, options) {
65
80
  return this.callAPI(prompt, options);
66
81
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.22.0",
4
+ "version": "0.24.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -65,6 +65,9 @@ class PromptFormatTextGenerationModel {
65
65
  promptFormat,
66
66
  });
67
67
  }
68
+ get settingsForEvent() {
69
+ return this.model.settingsForEvent;
70
+ }
68
71
  withSettings(additionalSettings) {
69
72
  return new PromptFormatTextGenerationModel({
70
73
  model: this.model.withSettings(additionalSettings),
@@ -19,5 +19,6 @@ export declare class PromptFormatTextGenerationModel<PROMPT, MODEL_PROMPT, RESPO
19
19
  get generateDeltaStreamResponse(): MODEL["generateDeltaStreamResponse"] extends undefined ? undefined : (prompt: PROMPT, options: ModelFunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
20
20
  get extractTextDelta(): MODEL["extractTextDelta"];
21
21
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, RESPONSE, FULL_DELTA, SETTINGS, this>;
22
+ get settingsForEvent(): Partial<SETTINGS>;
22
23
  withSettings(additionalSettings: Partial<SETTINGS>): this;
23
24
  }
@@ -62,6 +62,9 @@ export class PromptFormatTextGenerationModel {
62
62
  promptFormat,
63
63
  });
64
64
  }
65
+ get settingsForEvent() {
66
+ return this.model.settingsForEvent;
67
+ }
65
68
  withSettings(additionalSettings) {
66
69
  return new PromptFormatTextGenerationModel({
67
70
  model: this.model.withSettings(additionalSettings),
@@ -3,7 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.executeTool = exports.ExecuteToolPromise = void 0;
4
4
  const nanoid_1 = require("nanoid");
5
5
  const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
6
+ const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
6
7
  const GlobalFunctionObservers_js_1 = require("../core/GlobalFunctionObservers.cjs");
8
+ const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
7
9
  const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
8
10
  const AbortError_js_1 = require("../util/api/AbortError.cjs");
9
11
  const runSafe_js_1 = require("../util/runSafe.cjs");
@@ -55,6 +57,7 @@ async function doExecuteTool(tool, input, options) {
55
57
  const run = options?.run;
56
58
  const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
57
59
  observers: [
60
+ ...(0, getFunctionCallLogger_js_1.getFunctionCallLogger)(options?.logging ?? (0, GlobalFunctionLogging_js_1.getGlobalFunctionLogging)()),
58
61
  ...(0, GlobalFunctionObservers_js_1.getGlobalFunctionObservers)(),
59
62
  ...(run?.observers ?? []),
60
63
  ...(options?.observers ?? []),
@@ -1,6 +1,8 @@
1
1
  import { nanoid as createId } from "nanoid";
2
2
  import { FunctionEventSource } from "../core/FunctionEventSource.js";
3
+ import { getGlobalFunctionLogging } from "../core/GlobalFunctionLogging.js";
3
4
  import { getGlobalFunctionObservers } from "../core/GlobalFunctionObservers.js";
5
+ import { getFunctionCallLogger } from "../core/getFunctionCallLogger.js";
4
6
  import { startDurationMeasurement } from "../util/DurationMeasurement.js";
5
7
  import { AbortError } from "../util/api/AbortError.js";
6
8
  import { runSafe } from "../util/runSafe.js";
@@ -50,6 +52,7 @@ async function doExecuteTool(tool, input, options) {
50
52
  const run = options?.run;
51
53
  const eventSource = new FunctionEventSource({
52
54
  observers: [
55
+ ...getFunctionCallLogger(options?.logging ?? getGlobalFunctionLogging()),
53
56
  ...getGlobalFunctionObservers(),
54
57
  ...(run?.observers ?? []),
55
58
  ...(options?.observers ?? []),
package/tool/useTool.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ModelFunctionOptions } from "../model-function/ModelFunctionOptions.js";
2
- import { GenerateJsonModel, GenerateJsonModelSettings } from "../model-function/generate-json/GenerateJsonModel.js";
2
+ import { JsonGenerationModel, JsonGenerationModelSettings } from "../model-function/generate-json/JsonGenerationModel.js";
3
3
  import { Tool } from "./Tool.js";
4
4
  /**
5
5
  * `useTool` uses `generateJson` to generate parameters for a tool and then executes the tool with the parameters.
@@ -8,7 +8,7 @@ import { Tool } from "./Tool.js";
8
8
  * the parameters (`parameters` property, typed),
9
9
  * and the result of the tool execution (`result` property, typed).
10
10
  */
11
- export declare function useTool<PROMPT, RESPONSE, SETTINGS extends GenerateJsonModelSettings, TOOL extends Tool<any, any, any>>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, tool: TOOL, prompt: (tool: TOOL) => PROMPT, options?: ModelFunctionOptions<SETTINGS>): Promise<{
11
+ export declare function useTool<PROMPT, RESPONSE, SETTINGS extends JsonGenerationModelSettings, TOOL extends Tool<any, any, any>>(model: JsonGenerationModel<PROMPT, RESPONSE, SETTINGS>, tool: TOOL, prompt: (tool: TOOL) => PROMPT, options?: ModelFunctionOptions<SETTINGS>): Promise<{
12
12
  tool: TOOL["name"];
13
13
  parameters: TOOL["inputSchema"];
14
14
  result: Awaited<ReturnType<TOOL["execute"]>>;
@@ -1,5 +1,5 @@
1
1
  import { ModelFunctionOptions } from "../model-function/ModelFunctionOptions.js";
2
- import { GenerateJsonOrTextModel, GenerateJsonOrTextModelSettings, GenerateJsonOrTextPrompt } from "../model-function/generate-json/GenerateJsonOrTextModel.js";
2
+ import { JsonOrTextGenerationModel, JsonOrTextGenerationModelSettings, JsonOrTextGenerationPrompt } from "../model-function/generate-json/JsonOrTextGenerationModel.js";
3
3
  import { Tool } from "./Tool.js";
4
4
  type ToolArray<T extends Tool<any, any, any>[]> = T;
5
5
  type ToToolMap<T extends ToolArray<Tool<any, any, any>[]>> = {
@@ -14,7 +14,7 @@ type ToToolUnion<T> = {
14
14
  } : never;
15
15
  }[keyof T];
16
16
  type ToOutputValue<TOOLS extends ToolArray<Tool<any, any, any>[]>> = ToToolUnion<ToToolMap<TOOLS>>;
17
- export declare function useToolOrGenerateText<PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings, TOOLS extends Array<Tool<any, any, any>>>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, tools: TOOLS, prompt: (tools: TOOLS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options?: ModelFunctionOptions<SETTINGS>): Promise<{
17
+ export declare function useToolOrGenerateText<PROMPT, RESPONSE, SETTINGS extends JsonOrTextGenerationModelSettings, TOOLS extends Array<Tool<any, any, any>>>(model: JsonOrTextGenerationModel<PROMPT, RESPONSE, SETTINGS>, tools: TOOLS, prompt: (tools: TOOLS) => PROMPT & JsonOrTextGenerationPrompt<RESPONSE>, options?: ModelFunctionOptions<SETTINGS>): Promise<{
18
18
  tool: null;
19
19
  parameters: null;
20
20
  result: null;
@@ -1,9 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ConsoleLogger = void 0;
4
- class ConsoleLogger {
5
- onFunctionEvent(event) {
6
- console.log(JSON.stringify(event, null, 2));
7
- }
8
- }
9
- exports.ConsoleLogger = ConsoleLogger;
@@ -1,5 +0,0 @@
1
- import { FunctionEvent } from "./FunctionEvent.js";
2
- import { FunctionObserver } from "./FunctionObserver.js";
3
- export declare class ConsoleLogger implements FunctionObserver {
4
- onFunctionEvent(event: FunctionEvent): void;
5
- }
@@ -1,5 +0,0 @@
1
- export class ConsoleLogger {
2
- onFunctionEvent(event) {
3
- console.log(JSON.stringify(event, null, 2));
4
- }
5
- }
@@ -1,8 +0,0 @@
1
- import { ModelFunctionOptions } from "../ModelFunctionOptions.js";
2
- import { Model, ModelSettings } from "../Model.js";
3
- export interface GenerateJsonModelSettings extends ModelSettings {
4
- }
5
- export interface GenerateJsonModel<PROMPT, RESPONSE, SETTINGS extends GenerateJsonModelSettings> extends Model<SETTINGS> {
6
- generateJsonResponse(prompt: PROMPT, options?: ModelFunctionOptions<SETTINGS>): PromiseLike<RESPONSE>;
7
- extractJson(response: RESPONSE): unknown;
8
- }
@@ -1,18 +0,0 @@
1
- import { ModelFunctionOptions } from "../ModelFunctionOptions.js";
2
- import { Model, ModelSettings } from "../Model.js";
3
- export interface GenerateJsonOrTextModelSettings extends ModelSettings {
4
- }
5
- export interface GenerateJsonOrTextPrompt<RESPONSE> {
6
- extractJsonAndText(response: RESPONSE): {
7
- schema: null;
8
- value: null;
9
- text: string;
10
- } | {
11
- schema: string;
12
- value: unknown;
13
- text: string | null;
14
- };
15
- }
16
- export interface GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings> extends Model<SETTINGS> {
17
- generateJsonResponse(prompt: PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options?: ModelFunctionOptions<SETTINGS>): PromiseLike<RESPONSE>;
18
- }