modelfusion 0.5.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +22 -21
  2. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +3 -3
  3. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +4 -4
  4. package/composed-function/use-tool/useTool.cjs +4 -1
  5. package/composed-function/use-tool/useTool.js +4 -1
  6. package/model-function/embed-text/embedText.cjs +16 -30
  7. package/model-function/embed-text/embedText.d.ts +14 -4
  8. package/model-function/embed-text/embedText.js +16 -30
  9. package/model-function/generate-image/generateImage.cjs +7 -20
  10. package/model-function/generate-image/generateImage.d.ts +7 -2
  11. package/model-function/generate-image/generateImage.js +7 -20
  12. package/model-function/generate-json/generateJson.cjs +7 -5
  13. package/model-function/generate-json/generateJson.d.ts +6 -1
  14. package/model-function/generate-json/generateJson.js +7 -5
  15. package/model-function/generate-json/generateJsonOrText.cjs +7 -5
  16. package/model-function/generate-json/generateJsonOrText.d.ts +10 -1
  17. package/model-function/generate-json/generateJsonOrText.js +7 -5
  18. package/model-function/generate-text/generateText.cjs +7 -17
  19. package/model-function/generate-text/generateText.d.ts +7 -2
  20. package/model-function/generate-text/generateText.js +7 -17
  21. package/model-function/generate-text/streamText.cjs +6 -4
  22. package/model-function/generate-text/streamText.d.ts +9 -1
  23. package/model-function/generate-text/streamText.js +6 -4
  24. package/model-function/transcribe-audio/transcribe.cjs +7 -19
  25. package/model-function/transcribe-audio/transcribe.d.ts +7 -2
  26. package/model-function/transcribe-audio/transcribe.js +7 -19
  27. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
  28. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
  29. package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
  30. package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
  31. package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
  32. package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
  33. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
  34. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
  35. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
  36. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +1 -1
  37. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
  38. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +1 -1
  39. package/model-provider/openai/OpenAIImageGenerationModel.cjs +1 -1
  40. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
  41. package/model-provider/openai/OpenAIImageGenerationModel.js +1 -1
  42. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
  43. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
  44. package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
  45. package/model-provider/openai/OpenAITextGenerationModel.cjs +1 -1
  46. package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -1
  47. package/model-provider/openai/OpenAITextGenerationModel.js +1 -1
  48. package/model-provider/openai/OpenAITranscriptionModel.cjs +1 -1
  49. package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
  50. package/model-provider/openai/OpenAITranscriptionModel.js +1 -1
  51. package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
  52. package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
  53. package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
  54. package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
  55. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
  56. package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
  57. package/package.json +2 -2
  58. package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.cjs +1 -1
  59. package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.js +1 -1
  60. package/text-chunk/split/splitOnSeparator.cjs +7 -9
  61. package/text-chunk/split/splitOnSeparator.d.ts +5 -6
  62. package/text-chunk/split/splitOnSeparator.js +6 -7
  63. package/text-chunk/split/splitRecursively.cjs +16 -7
  64. package/text-chunk/split/splitRecursively.d.ts +13 -4
  65. package/text-chunk/split/splitRecursively.js +13 -4
  66. package/text-chunk/split/splitTextChunks.cjs +10 -8
  67. package/text-chunk/split/splitTextChunks.d.ts +1 -0
  68. package/text-chunk/split/splitTextChunks.js +8 -7
  69. package/text-chunk/upsertTextChunks.cjs +1 -1
  70. package/text-chunk/upsertTextChunks.js +1 -1
@@ -1,16 +1,4 @@
1
1
  import { executeCall } from "../executeCall.js";
2
- /**
3
- * Generates a text using a prompt.
4
- * The prompt format depends on the model.
5
- * For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
6
- *
7
- * @example
8
- * const model = new OpenAITextGenerationModel(...);
9
- *
10
- * const { text } = await model.generateText(
11
- * "Write a short story about a robot learning to love:\n\n"
12
- * );
13
- */
14
2
  export async function generateText(
15
3
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
16
4
  model, prompt, options) {
@@ -55,9 +43,11 @@ model, prompt, options) {
55
43
  generatedText: output,
56
44
  }),
57
45
  });
58
- return {
59
- text: result.output,
60
- response: result.response,
61
- metadata: result.metadata,
62
- };
46
+ return options?.fullResponse === true
47
+ ? {
48
+ text: result.output,
49
+ response: result.response,
50
+ metadata: result.metadata,
51
+ }
52
+ : result.output;
63
53
  }
@@ -107,9 +107,11 @@ async function streamText(model, prompt, options) {
107
107
  });
108
108
  throw result.error;
109
109
  }
110
- return {
111
- textStream: result.output,
112
- metadata: startMetadata,
113
- };
110
+ return options?.fullResponse === true
111
+ ? {
112
+ textStream: result.output,
113
+ metadata: startMetadata,
114
+ }
115
+ : result.output;
114
116
  }
115
117
  exports.streamText = streamText;
@@ -5,7 +5,15 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
5
5
  export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
6
6
  generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
7
7
  extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
8
- }, prompt: PROMPT, options?: FunctionOptions<SETTINGS>): Promise<{
8
+ }, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
9
+ fullResponse?: false;
10
+ }): Promise<AsyncIterable<string>>;
11
+ export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
12
+ generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
13
+ extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
14
+ }, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
15
+ fullResponse: true;
16
+ }): Promise<{
9
17
  textStream: AsyncIterable<string>;
10
18
  metadata: Omit<CallMetadata<TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS>>, "durationInMs">;
11
19
  }>;
@@ -104,8 +104,10 @@ export async function streamText(model, prompt, options) {
104
104
  });
105
105
  throw result.error;
106
106
  }
107
- return {
108
- textStream: result.output,
109
- metadata: startMetadata,
110
- };
107
+ return options?.fullResponse === true
108
+ ? {
109
+ textStream: result.output,
110
+ metadata: startMetadata,
111
+ }
112
+ : result.output;
111
113
  }
@@ -2,20 +2,6 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.transcribe = void 0;
4
4
  const executeCall_js_1 = require("../executeCall.cjs");
5
- /**
6
- * Transcribe audio data into text.
7
- *
8
- * @example
9
- * const data = await fs.promises.readFile("data/test.mp3");
10
- *
11
- * const { transcription } = await transcribe(
12
- * new OpenAITranscriptionModel({ model: "whisper-1" }),
13
- * {
14
- * type: "mp3",
15
- * data,
16
- * }
17
- * );
18
- */
19
5
  async function transcribe(model, data, options) {
20
6
  const result = await (0, executeCall_js_1.executeCall)({
21
7
  model,
@@ -53,10 +39,12 @@ async function transcribe(model, data, options) {
53
39
  transcription: output,
54
40
  }),
55
41
  });
56
- return {
57
- transcription: result.output,
58
- response: result.response,
59
- metadata: result.metadata,
60
- };
42
+ return options?.fullResponse === true
43
+ ? {
44
+ transcription: result.output,
45
+ response: result.response,
46
+ metadata: result.metadata,
47
+ }
48
+ : result.output;
61
49
  }
62
50
  exports.transcribe = transcribe;
@@ -7,7 +7,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
7
7
  * @example
8
8
  * const data = await fs.promises.readFile("data/test.mp3");
9
9
  *
10
- * const { transcription } = await transcribe(
10
+ * const transcription = await transcribe(
11
11
  * new OpenAITranscriptionModel({ model: "whisper-1" }),
12
12
  * {
13
13
  * type: "mp3",
@@ -15,8 +15,13 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
15
15
  * }
16
16
  * );
17
17
  */
18
- export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options?: FunctionOptions<SETTINGS>): Promise<{
18
+ export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options: FunctionOptions<SETTINGS> & {
19
+ fullResponse: true;
20
+ }): Promise<{
19
21
  transcription: string;
20
22
  response: RESPONSE;
21
23
  metadata: CallMetadata<TranscriptionModel<DATA, RESPONSE, SETTINGS>>;
22
24
  }>;
25
+ export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options?: FunctionOptions<SETTINGS> & {
26
+ fullResponse?: false;
27
+ }): Promise<string>;
@@ -1,18 +1,4 @@
1
1
  import { executeCall } from "../executeCall.js";
2
- /**
3
- * Transcribe audio data into text.
4
- *
5
- * @example
6
- * const data = await fs.promises.readFile("data/test.mp3");
7
- *
8
- * const { transcription } = await transcribe(
9
- * new OpenAITranscriptionModel({ model: "whisper-1" }),
10
- * {
11
- * type: "mp3",
12
- * data,
13
- * }
14
- * );
15
- */
16
2
  export async function transcribe(model, data, options) {
17
3
  const result = await executeCall({
18
4
  model,
@@ -50,9 +36,11 @@ export async function transcribe(model, data, options) {
50
36
  transcription: output,
51
37
  }),
52
38
  });
53
- return {
54
- transcription: result.output,
55
- response: result.response,
56
- metadata: result.metadata,
57
- };
39
+ return options?.fullResponse === true
40
+ ? {
41
+ transcription: result.output,
42
+ response: result.response,
43
+ metadata: result.metadata,
44
+ }
45
+ : result.output;
58
46
  }
@@ -30,7 +30,7 @@ exports.COHERE_TEXT_EMBEDDING_MODELS = {
30
30
  * @see https://docs.cohere.com/reference/embed
31
31
  *
32
32
  * @example
33
- * const { embeddings } = await embedTexts(
33
+ * const embeddings = await embedTexts(
34
34
  * new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
35
35
  * [
36
36
  * "At first, Nox didn't know what to do with the pup.",
@@ -38,7 +38,7 @@ export interface CohereTextEmbeddingModelSettings extends TextEmbeddingModelSett
38
38
  * @see https://docs.cohere.com/reference/embed
39
39
  *
40
40
  * @example
41
- * const { embeddings } = await embedTexts(
41
+ * const embeddings = await embedTexts(
42
42
  * new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
43
43
  * [
44
44
  * "At first, Nox didn't know what to do with the pup.",
@@ -24,7 +24,7 @@ export const COHERE_TEXT_EMBEDDING_MODELS = {
24
24
  * @see https://docs.cohere.com/reference/embed
25
25
  *
26
26
  * @example
27
- * const { embeddings } = await embedTexts(
27
+ * const embeddings = await embedTexts(
28
28
  * new CohereTextEmbeddingModel({ model: "embed-english-light-v2.0" }),
29
29
  * [
30
30
  * "At first, Nox didn't know what to do with the pup.",
@@ -40,7 +40,7 @@ exports.COHERE_TEXT_GENERATION_MODELS = {
40
40
  * maxTokens: 500,
41
41
  * });
42
42
  *
43
- * const { text } = await generateText(
43
+ * const text = await generateText(
44
44
  * model,
45
45
  * "Write a short story about a robot learning to love:\n\n"
46
46
  * );
@@ -59,7 +59,7 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
59
59
  * maxTokens: 500,
60
60
  * });
61
61
  *
62
- * const { text } = await generateText(
62
+ * const text = await generateText(
63
63
  * model,
64
64
  * "Write a short story about a robot learning to love:\n\n"
65
65
  * );
@@ -34,7 +34,7 @@ export const COHERE_TEXT_GENERATION_MODELS = {
34
34
  * maxTokens: 500,
35
35
  * });
36
36
  *
37
- * const { text } = await generateText(
37
+ * const text = await generateText(
38
38
  * model,
39
39
  * "Write a short story about a robot learning to love:\n\n"
40
40
  * );
@@ -21,7 +21,7 @@ const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
21
21
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
22
22
  * });
23
23
  *
24
- * const { embeddings } = await embedTexts(
24
+ * const embeddings = await embedTexts(
25
25
  * model,
26
26
  * [
27
27
  * "At first, Nox didn't know what to do with the pup.",
@@ -29,7 +29,7 @@ export interface HuggingFaceTextEmbeddingModelSettings extends TextEmbeddingMode
29
29
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
30
30
  * });
31
31
  *
32
- * const { embeddings } = await embedTexts(
32
+ * const embeddings = await embedTexts(
33
33
  * model,
34
34
  * [
35
35
  * "At first, Nox didn't know what to do with the pup.",
@@ -15,7 +15,7 @@ import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
15
15
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
16
16
  * });
17
17
  *
18
- * const { embeddings } = await embedTexts(
18
+ * const embeddings = await embedTexts(
19
19
  * model,
20
20
  * [
21
21
  * "At first, Nox didn't know what to do with the pup.",
@@ -23,7 +23,7 @@ const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappin
23
23
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
24
24
  * });
25
25
  *
26
- * const { text } = await generateText(
26
+ * const text = await generateText(
27
27
  * model,
28
28
  * "Write a short story about a robot learning to love:\n\n"
29
29
  * );
@@ -38,7 +38,7 @@ export interface HuggingFaceTextGenerationModelSettings extends TextGenerationMo
38
38
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
39
39
  * });
40
40
  *
41
- * const { text } = await generateText(
41
+ * const text = await generateText(
42
42
  * model,
43
43
  * "Write a short story about a robot learning to love:\n\n"
44
44
  * );
@@ -17,7 +17,7 @@ import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingText
17
17
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
18
18
  * });
19
19
  *
20
- * const { text } = await generateText(
20
+ * const text = await generateText(
21
21
  * model,
22
22
  * "Write a short story about a robot learning to love:\n\n"
23
23
  * );
@@ -22,7 +22,7 @@ exports.calculateOpenAIImageGenerationCostInMillicents = calculateOpenAIImageGen
22
22
  * @see https://platform.openai.com/docs/api-reference/images/create
23
23
  *
24
24
  * @example
25
- * const { image } = await generateImage(
25
+ * const image = await generateImage(
26
26
  * new OpenAIImageGenerationModel({ size: "512x512" }),
27
27
  * "the wicked witch of the west in the style of early 19th century painting"
28
28
  * );
@@ -20,7 +20,7 @@ export interface OpenAIImageGenerationSettings extends ImageGenerationModelSetti
20
20
  * @see https://platform.openai.com/docs/api-reference/images/create
21
21
  *
22
22
  * @example
23
- * const { image } = await generateImage(
23
+ * const image = await generateImage(
24
24
  * new OpenAIImageGenerationModel({ size: "512x512" }),
25
25
  * "the wicked witch of the west in the style of early 19th century painting"
26
26
  * );
@@ -18,7 +18,7 @@ export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) =>
18
18
  * @see https://platform.openai.com/docs/api-reference/images/create
19
19
  *
20
20
  * @example
21
- * const { image } = await generateImage(
21
+ * const image = await generateImage(
22
22
  * new OpenAIImageGenerationModel({ size: "512x512" }),
23
23
  * "the wicked witch of the west in the style of early 19th century painting"
24
24
  * );
@@ -36,7 +36,7 @@ exports.calculateOpenAIEmbeddingCostInMillicents = calculateOpenAIEmbeddingCostI
36
36
  * @see https://platform.openai.com/docs/api-reference/embeddings
37
37
  *
38
38
  * @example
39
- * const { embeddings } = await embedTexts(
39
+ * const embeddings = await embedTexts(
40
40
  * new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
41
41
  * [
42
42
  * "At first, Nox didn't know what to do with the pup.",
@@ -32,7 +32,7 @@ export interface OpenAITextEmbeddingModelSettings extends TextEmbeddingModelSett
32
32
  * @see https://platform.openai.com/docs/api-reference/embeddings
33
33
  *
34
34
  * @example
35
- * const { embeddings } = await embedTexts(
35
+ * const embeddings = await embedTexts(
36
36
  * new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
37
37
  * [
38
38
  * "At first, Nox didn't know what to do with the pup.",
@@ -28,7 +28,7 @@ export const calculateOpenAIEmbeddingCostInMillicents = ({ model, responses, })
28
28
  * @see https://platform.openai.com/docs/api-reference/embeddings
29
29
  *
30
30
  * @example
31
- * const { embeddings } = await embedTexts(
31
+ * const embeddings = await embedTexts(
32
32
  * new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
33
33
  * [
34
34
  * "At first, Nox didn't know what to do with the pup.",
@@ -79,7 +79,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
79
79
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
80
80
  * });
81
81
  *
82
- * const { text } = await generateText(
82
+ * const text = await generateText(
83
83
  * model,
84
84
  * "Write a short story about a robot learning to love:\n\n"
85
85
  * );
@@ -96,7 +96,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
96
96
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
97
97
  * });
98
98
  *
99
- * const { text } = await generateText(
99
+ * const text = await generateText(
100
100
  * model,
101
101
  * "Write a short story about a robot learning to love:\n\n"
102
102
  * );
@@ -71,7 +71,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
71
71
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
72
72
  * });
73
73
  *
74
- * const { text } = await generateText(
74
+ * const text = await generateText(
75
75
  * model,
76
76
  * "Write a short story about a robot learning to love:\n\n"
77
77
  * );
@@ -34,7 +34,7 @@ exports.calculateOpenAITranscriptionCostInMillicents = calculateOpenAITranscript
34
34
  * @example
35
35
  * const data = await fs.promises.readFile("data/test.mp3");
36
36
  *
37
- * const { transcription } = await transcribe(
37
+ * const transcription = await transcribe(
38
38
  * new OpenAITranscriptionModel({ model: "whisper-1" }),
39
39
  * {
40
40
  * type: "mp3",
@@ -39,7 +39,7 @@ export type OpenAITranscriptionInput = {
39
39
  * @example
40
40
  * const data = await fs.promises.readFile("data/test.mp3");
41
41
  *
42
- * const { transcription } = await transcribe(
42
+ * const transcription = await transcribe(
43
43
  * new OpenAITranscriptionModel({ model: "whisper-1" }),
44
44
  * {
45
45
  * type: "mp3",
@@ -27,7 +27,7 @@ export const calculateOpenAITranscriptionCostInMillicents = ({ model, response,
27
27
  * @example
28
28
  * const data = await fs.promises.readFile("data/test.mp3");
29
29
  *
30
- * const { transcription } = await transcribe(
30
+ * const transcription = await transcribe(
31
31
  * new OpenAITranscriptionModel({ model: "whisper-1" }),
32
32
  * {
33
33
  * type: "mp3",
@@ -95,7 +95,7 @@ exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicent
95
95
  * maxTokens: 500,
96
96
  * });
97
97
  *
98
- * const { text } = await generateText([
98
+ * const text = await generateText([
99
99
  * model,
100
100
  * OpenAIChatMessage.system(
101
101
  * "Write a short story about a robot learning to love:"
@@ -109,7 +109,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIM
109
109
  * maxTokens: 500,
110
110
  * });
111
111
  *
112
- * const { text } = await generateText([
112
+ * const text = await generateText([
113
113
  * model,
114
114
  * OpenAIChatMessage.system(
115
115
  * "Write a short story about a robot learning to love:"
@@ -87,7 +87,7 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => res
87
87
  * maxTokens: 500,
88
88
  * });
89
89
  *
90
- * const { text } = await generateText([
90
+ * const text = await generateText([
91
91
  * model,
92
92
  * OpenAIChatMessage.system(
93
93
  * "Write a short story about a robot learning to love:"
@@ -12,7 +12,7 @@ const StabilityError_js_1 = require("./StabilityError.cjs");
12
12
  * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
13
13
  *
14
14
  * @example
15
- * const { image } = await generateImage(
15
+ * const image = await generateImage(
16
16
  * new StabilityImageGenerationModel({
17
17
  * model: "stable-diffusion-512-v2-1",
18
18
  * cfgScale: 7,
@@ -10,7 +10,7 @@ import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
11
11
  *
12
12
  * @example
13
- * const { image } = await generateImage(
13
+ * const image = await generateImage(
14
14
  * new StabilityImageGenerationModel({
15
15
  * model: "stable-diffusion-512-v2-1",
16
16
  * cfgScale: 7,
@@ -9,7 +9,7 @@ import { failedStabilityCallResponseHandler } from "./StabilityError.js";
9
9
  * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
10
10
  *
11
11
  * @example
12
- * const { image } = await generateImage(
12
+ * const image = await generateImage(
13
13
  * new StabilityImageGenerationModel({
14
14
  * model: "stable-diffusion-512-v2-1",
15
15
  * cfgScale: 7,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.5.0",
4
+ "version": "0.7.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -65,7 +65,7 @@
65
65
  "@typescript-eslint/parser": "^6.1.0",
66
66
  "copyfiles": "2.4.1",
67
67
  "eslint": "^8.45.0",
68
- "eslint-config-prettier": "8.10.0",
68
+ "eslint-config-prettier": "9.0.0",
69
69
  "husky": "^8.0.3",
70
70
  "lint-staged": "13.2.3",
71
71
  "prettier": "3.0.1",
@@ -36,7 +36,7 @@ class SimilarTextChunksFromVectorIndexRetriever {
36
36
  run: options.run,
37
37
  });
38
38
  }
39
- const { embedding } = await (0, embedText_js_1.embedText)(this.embeddingModel, query, {
39
+ const embedding = await (0, embedText_js_1.embedText)(this.embeddingModel, query, {
40
40
  functionId: options?.functionId,
41
41
  run: options?.run,
42
42
  });
@@ -33,7 +33,7 @@ export class SimilarTextChunksFromVectorIndexRetriever {
33
33
  run: options.run,
34
34
  });
35
35
  }
36
- const { embedding } = await embedText(this.embeddingModel, query, {
36
+ const embedding = await embedText(this.embeddingModel, query, {
37
37
  functionId: options?.functionId,
38
38
  run: options?.run,
39
39
  });
@@ -1,12 +1,10 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.splitOnSeparatorAsSplitFunction = exports.splitOnSeparator = void 0;
4
- const splitOnSeparator = async ({ separator, text, }) => {
5
- return text.split(separator);
6
- };
3
+ exports.splitOnSeparator = void 0;
4
+ /**
5
+ * Splits text on a separator string.
6
+ */
7
+ function splitOnSeparator({ separator, }) {
8
+ return async ({ text }) => text.split(separator);
9
+ }
7
10
  exports.splitOnSeparator = splitOnSeparator;
8
- const splitOnSeparatorAsSplitFunction = ({ separator }) => async ({ text }) => (0, exports.splitOnSeparator)({
9
- separator,
10
- text,
11
- });
12
- exports.splitOnSeparatorAsSplitFunction = splitOnSeparatorAsSplitFunction;
@@ -1,8 +1,7 @@
1
1
  import { SplitFunction } from "./SplitFunction.js";
2
- export declare const splitOnSeparator: ({ separator, text, }: {
2
+ /**
3
+ * Splits text on a separator string.
4
+ */
5
+ export declare function splitOnSeparator({ separator, }: {
3
6
  separator: string;
4
- text: string;
5
- }) => Promise<string[]>;
6
- export declare const splitOnSeparatorAsSplitFunction: ({ separator }: {
7
- separator: string;
8
- }) => SplitFunction;
7
+ }): SplitFunction;
@@ -1,7 +1,6 @@
1
- export const splitOnSeparator = async ({ separator, text, }) => {
2
- return text.split(separator);
3
- };
4
- export const splitOnSeparatorAsSplitFunction = ({ separator }) => async ({ text }) => splitOnSeparator({
5
- separator,
6
- text,
7
- });
1
+ /**
2
+ * Splits text on a separator string.
3
+ */
4
+ export function splitOnSeparator({ separator, }) {
5
+ return async ({ text }) => text.split(separator);
6
+ }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.splitRecursivelyAtToken = exports.splitRecursivelyAtCharacter = void 0;
3
+ exports.splitAtToken = exports.splitAtCharacter = void 0;
4
4
  // when segments is a string, it splits by character, otherwise according to the provided segments
5
5
  function splitRecursively({ maxChunkSize, segments, }) {
6
6
  if (segments.length < maxChunkSize) {
@@ -20,13 +20,22 @@ function splitRecursively({ maxChunkSize, segments, }) {
20
20
  }),
21
21
  ];
22
22
  }
23
- const splitRecursivelyAtCharacter = ({ maxChunkSize }) => async ({ text }) => splitRecursively({
24
- maxChunkSize,
23
+ /**
24
+ * Splits text recursively until the resulting chunks are smaller than the `maxCharactersPerChunk`.
25
+ * The text is recursively split in the middle, so that all chunks are roughtly the same size.
26
+ */
27
+ const splitAtCharacter = ({ maxCharactersPerChunk, }) => async ({ text }) => splitRecursively({
28
+ maxChunkSize: maxCharactersPerChunk,
25
29
  segments: text,
26
30
  });
27
- exports.splitRecursivelyAtCharacter = splitRecursivelyAtCharacter;
28
- const splitRecursivelyAtToken = ({ tokenizer, maxChunkSize, }) => async ({ text }) => splitRecursively({
29
- maxChunkSize,
31
+ exports.splitAtCharacter = splitAtCharacter;
32
+ /**
33
+ * Splits text recursively until the resulting chunks are smaller than the `maxTokensPerChunk`,
34
+ * while respecting the token boundaries.
35
+ * The text is recursively split in the middle, so that all chunks are roughtly the same size.
36
+ */
37
+ const splitAtToken = ({ tokenizer, maxTokensPerChunk, }) => async ({ text }) => splitRecursively({
38
+ maxChunkSize: maxTokensPerChunk,
30
39
  segments: (await tokenizer.tokenizeWithTexts(text)).tokenTexts,
31
40
  });
32
- exports.splitRecursivelyAtToken = splitRecursivelyAtToken;
41
+ exports.splitAtToken = splitAtToken;