modelfusion 0.123.0 → 0.125.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/CHANGELOG.md +47 -1
  2. package/README.md +9 -22
  3. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -11
  4. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -1
  5. package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -11
  6. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -11
  7. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -1
  8. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -11
  9. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -11
  10. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -1
  11. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -11
  12. package/model-function/generate-text/TextGenerationModel.d.ts +31 -1
  13. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  14. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -9
  15. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
  16. package/model-provider/cohere/CohereTextGenerationModel.js +7 -10
  17. package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
  18. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +2 -2
  19. package/model-provider/mistral/MistralChatModel.cjs +0 -9
  20. package/model-provider/mistral/MistralChatModel.d.ts +2 -11
  21. package/model-provider/mistral/MistralChatModel.js +0 -9
  22. package/model-provider/mistral/index.cjs +1 -2
  23. package/model-provider/mistral/index.d.ts +0 -1
  24. package/model-provider/mistral/index.js +0 -1
  25. package/model-provider/ollama/OllamaChatModel.cjs +0 -9
  26. package/model-provider/ollama/OllamaChatModel.d.ts +2 -11
  27. package/model-provider/ollama/OllamaChatModel.js +0 -9
  28. package/model-provider/ollama/OllamaCompletionModel.d.ts +2 -2
  29. package/model-provider/ollama/index.cjs +0 -1
  30. package/model-provider/ollama/index.d.ts +0 -1
  31. package/model-provider/ollama/index.js +0 -1
  32. package/model-provider/openai/AbstractOpenAIChatModel.cjs +5 -3
  33. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -5
  34. package/model-provider/openai/AbstractOpenAIChatModel.js +5 -3
  35. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.cjs +82 -0
  36. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.d.ts +91 -0
  37. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.js +78 -0
  38. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  39. package/model-provider/openai/OpenAIChatModel.cjs +0 -9
  40. package/model-provider/openai/OpenAIChatModel.d.ts +2 -11
  41. package/model-provider/openai/OpenAIChatModel.js +0 -9
  42. package/model-provider/openai/OpenAICompletionModel.cjs +3 -6
  43. package/model-provider/openai/OpenAICompletionModel.d.ts +3 -8
  44. package/model-provider/openai/OpenAICompletionModel.js +4 -7
  45. package/model-provider/openai/OpenAIFacade.cjs +18 -18
  46. package/model-provider/openai/OpenAIFacade.d.ts +18 -18
  47. package/model-provider/openai/OpenAIFacade.js +18 -18
  48. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -68
  49. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +4 -82
  50. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -68
  51. package/model-provider/openai/index.cjs +2 -2
  52. package/model-provider/openai/index.d.ts +1 -1
  53. package/model-provider/openai/index.js +1 -1
  54. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -9
  55. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -11
  56. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -9
  57. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +10 -0
  58. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +10 -2
  59. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +10 -0
  60. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +40 -7
  61. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +35 -6
  62. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +37 -6
  63. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +27 -0
  64. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +18 -0
  65. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +23 -0
  66. package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +33 -0
  67. package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +13 -0
  68. package/model-provider/openai-compatible/PerplexityApiConfiguration.js +29 -0
  69. package/model-provider/openai-compatible/index.cjs +2 -0
  70. package/model-provider/openai-compatible/index.d.ts +2 -0
  71. package/model-provider/openai-compatible/index.js +2 -0
  72. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,5 +1,47 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.125.0 - 2024-01-14
4
+
5
+ ### Added
6
+
7
+ - Perplexity AI chat completion support. Example:
8
+
9
+ ```ts
10
+ import { openaicompatible, streamText } from "modelfusion";
11
+
12
+ const textStream = await streamText({
13
+ model: openaicompatible
14
+ .ChatTextGenerator({
15
+ api: openaicompatible.PerplexityApi(),
16
+ provider: "openaicompatible-perplexity",
17
+ model: "pplx-70b-online", // online model with access to web search
18
+ maxGenerationTokens: 500,
19
+ })
20
+ .withTextPrompt(),
21
+
22
+ prompt: "What is RAG in AI?",
23
+ });
24
+ ```
25
+
26
+ ## v0.124.0 - 2024-01-13
27
+
28
+ ### Added
29
+
30
+ - [Embedding-support for OpenAI-compatible providers](https://modelfusion.dev/integration/model-provider/openaicompatible/#embed-text). You can for example use the Together AI embedding endpoint:
31
+
32
+ ```ts
33
+ import { embed, openaicompatible } from "modelfusion";
34
+
35
+ const embedding = await embed({
36
+ model: openaicompatible.TextEmbedder({
37
+ api: openaicompatible.TogetherAIApi(),
38
+ provider: "openaicompatible-togetherai",
39
+ model: "togethercomputer/m2-bert-80M-8k-retrieval",
40
+ }),
41
+ value: "At first, Nox didn't know what to do with the pup.",
42
+ });
43
+ ```
44
+
3
45
  ## v0.123.0 - 2024-01-13
4
46
 
5
47
  ### Added
@@ -1049,7 +1091,7 @@ Ollama edge case and error handling improvements.
1049
1091
  ### Changed
1050
1092
 
1051
1093
  - **breaking change**: `ChatPrompt` structure and terminology has changed to align more closely with OpenAI and similar chat prompts. This is also in preparation for integrating images and function calls results into chat prompts.
1052
- - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-format) for documentation of the new prompt formats.
1094
+ - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-styles) for documentation of the new prompt formats.
1053
1095
 
1054
1096
  ## v0.69.0 - 2023-11-15
1055
1097
 
@@ -2003,3 +2045,7 @@ Since this change already affected all JSON generation calls and tools, I includ
2003
2045
  1. Recursive Character Splitter: A feature to split text into characters recursively for more detailed text analysis.
2004
2046
  1. Recursive Text Mapping: This enables recursive mapping of text, beneficial for tasks like summarization or extraction.
2005
2047
  1. Split-Map-Filter-Reduce for Text Processing: A process chain developed for sophisticated text handling, allowing operations to split, map, filter, and reduce text data.
2048
+
2049
+ ```
2050
+
2051
+ ```
package/README.md CHANGED
@@ -30,7 +30,7 @@
30
30
  npm install modelfusion
31
31
  ```
32
32
 
33
- Or use a template:
33
+ Or use a start template:
34
34
 
35
35
  - [ModelFusion terminal app starter](https://github.com/lgrammel/modelfusion-terminal-app-starter)
36
36
  - [Next.js, Vercel AI SDK, Llama.cpp & ModelFusion starter](https://github.com/lgrammel/modelfusion-llamacpp-nextjs-starter)
@@ -39,14 +39,14 @@ Or use a template:
39
39
  ## Usage Examples
40
40
 
41
41
  > [!TIP]
42
- > The basic examples are a great way to get started and to explore in parallel with the [documentation](https://modelfusion.dev/). You can find them in the [examples/basic](https://github.com/lgrammel/modelfusion/tree/main/examples/basic) folder.
42
+ > The basic examples are a great way to get started and to explore in parallel with the [documentation](https://modelfusion.dev/guide/function/). You can find them in the [examples/basic](https://github.com/lgrammel/modelfusion/tree/main/examples/basic) folder.
43
43
 
44
44
  You can provide API keys for the different [integrations](https://modelfusion.dev/integration/model-provider/) using environment variables (e.g., `OPENAI_API_KEY`) or pass them into the model constructors as options.
45
45
 
46
46
  ### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
47
47
 
48
48
  Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)).
49
- You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-template) to change the prompt template of a model.
49
+ You can use [prompt styles](https://modelfusion.dev/guide/function/generate-text#prompt-styles) to use text, instruction, or chat prompts.
50
50
 
51
51
  #### generateText
52
52
 
@@ -309,7 +309,7 @@ const embeddings = await embedMany({
309
309
  });
310
310
  ```
311
311
 
312
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
312
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
313
313
 
314
314
  ### [Classify Value](https://modelfusion.dev/guide/function/classify)
315
315
 
@@ -442,11 +442,11 @@ const retrievedTexts = await retrieve(
442
442
 
443
443
  Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-index/memory), [SQLite VSS](https://modelfusion.dev/integration/vector-index/sqlite-vss), [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
444
444
 
445
- ### [Text Generation Prompt Templates](https://modelfusion.dev/guide/function/generate-text#prompt-format)
445
+ ### [Text Generation Prompt Styles](https://modelfusion.dev/guide/function/generate-text#prompt-styles)
446
446
 
447
- Prompt templates let you use higher level prompt structures (such as text, instruction or chat prompts) for different models.
447
+ You can use different prompt styles (such as text, instruction or chat prompts) with ModelFusion text generation models. These prompt styles can be accessed through the methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()`:
448
448
 
449
- #### Text Prompt Example
449
+ #### Text Prompt Style
450
450
 
451
451
  ```ts
452
452
  const text = await generateText({
@@ -460,7 +460,7 @@ const text = await generateText({
460
460
  });
461
461
  ```
462
462
 
463
- #### Instruction Prompt Example
463
+ #### Instruction Prompt Style
464
464
 
465
465
  ```ts
466
466
  const text = await generateText({
@@ -480,9 +480,7 @@ const text = await generateText({
480
480
  });
481
481
  ```
482
482
 
483
- They can also be accessed through the shorthand methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
484
-
485
- #### Chat Prompt Example
483
+ #### Chat Prompt Style
486
484
 
487
485
  ```ts
488
486
  const textStream = await streamText({
@@ -512,17 +510,6 @@ const textStream = await streamText({
512
510
  });
513
511
  ```
514
512
 
515
- | Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
516
- | ---------------- | ----------- | ------------------ | ----------- |
517
- | Alpaca | ✅ | ✅ | ❌ |
518
- | ChatML | ✅ | ✅ | ✅ |
519
- | Llama 2 | ✅ | ✅ | ✅ |
520
- | Mistral Instruct | ✅ | ✅ | ✅ |
521
- | NeuralChat | ✅ | ✅ | ✅ |
522
- | Synthia | ✅ | ✅ | ✅ |
523
- | Vicuna | ✅ | ✅ | ✅ |
524
- | Generic Text | ✅ | ✅ | ✅ |
525
-
526
513
  ### [Image Generation Prompt Templates](https://modelfusion.dev/guide/function/generate-image/prompt-format)
527
514
 
528
515
  You an use prompt templates with image models as well, e.g. to use a basic text prompt. It is available as a shorthand method:
@@ -14,17 +14,6 @@ class PromptTemplateFullTextModel extends PromptTemplateTextStreamingModel_js_1.
14
14
  const mappedPrompt = this.promptTemplate.format(prompt);
15
15
  return this.model.doGenerateToolCalls(tools, mappedPrompt, options);
16
16
  }
17
- withPromptTemplate(promptTemplate) {
18
- return new PromptTemplateFullTextModel({
19
- model: this.withSettings({
20
- stopSequences: [
21
- ...(this.settings.stopSequences ?? []),
22
- ...promptTemplate.stopSequences,
23
- ],
24
- }),
25
- promptTemplate,
26
- });
27
- }
28
17
  withSettings(additionalSettings) {
29
18
  return new PromptTemplateFullTextModel({
30
19
  model: this.model.withSettings(additionalSettings),
@@ -36,6 +36,5 @@ export declare class PromptTemplateFullTextModel<PROMPT, MODEL_PROMPT, SETTINGS
36
36
  totalTokens: number;
37
37
  } | undefined;
38
38
  }>;
39
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateFullTextModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
40
39
  withSettings(additionalSettings: Partial<SETTINGS>): this;
41
40
  }
@@ -11,17 +11,6 @@ export class PromptTemplateFullTextModel extends PromptTemplateTextStreamingMode
11
11
  const mappedPrompt = this.promptTemplate.format(prompt);
12
12
  return this.model.doGenerateToolCalls(tools, mappedPrompt, options);
13
13
  }
14
- withPromptTemplate(promptTemplate) {
15
- return new PromptTemplateFullTextModel({
16
- model: this.withSettings({
17
- stopSequences: [
18
- ...(this.settings.stopSequences ?? []),
19
- ...promptTemplate.stopSequences,
20
- ],
21
- }),
22
- promptTemplate,
23
- });
24
- }
25
14
  withSettings(additionalSettings) {
26
15
  return new PromptTemplateFullTextModel({
27
16
  model: this.model.withSettings(additionalSettings),
@@ -74,17 +74,6 @@ class PromptTemplateTextGenerationModel {
74
74
  promptTemplate: this.promptTemplate,
75
75
  });
76
76
  }
77
- withPromptTemplate(promptTemplate) {
78
- return new PromptTemplateTextGenerationModel({
79
- model: this.withSettings({
80
- stopSequences: [
81
- ...(this.settings.stopSequences ?? []),
82
- ...promptTemplate.stopSequences,
83
- ],
84
- }),
85
- promptTemplate,
86
- });
87
- }
88
77
  withSettings(additionalSettings) {
89
78
  return new PromptTemplateTextGenerationModel({
90
79
  model: this.model.withSettings(additionalSettings),
@@ -43,6 +43,5 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
43
43
  asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallsModel<INPUT_PROMPT, PROMPT, this>;
44
44
  asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextGenerationModel<INPUT_PROMPT, PROMPT, this>;
45
45
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
46
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextGenerationModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
47
46
  withSettings(additionalSettings: Partial<SETTINGS>): this;
48
47
  }
@@ -71,17 +71,6 @@ export class PromptTemplateTextGenerationModel {
71
71
  promptTemplate: this.promptTemplate,
72
72
  });
73
73
  }
74
- withPromptTemplate(promptTemplate) {
75
- return new PromptTemplateTextGenerationModel({
76
- model: this.withSettings({
77
- stopSequences: [
78
- ...(this.settings.stopSequences ?? []),
79
- ...promptTemplate.stopSequences,
80
- ],
81
- }),
82
- promptTemplate,
83
- });
84
- }
85
74
  withSettings(additionalSettings) {
86
75
  return new PromptTemplateTextGenerationModel({
87
76
  model: this.model.withSettings(additionalSettings),
@@ -26,17 +26,6 @@ class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerationModel
26
26
  promptTemplate: this.promptTemplate,
27
27
  });
28
28
  }
29
- withPromptTemplate(promptTemplate) {
30
- return new PromptTemplateTextStreamingModel({
31
- model: this.withSettings({
32
- stopSequences: [
33
- ...(this.settings.stopSequences ?? []),
34
- ...promptTemplate.stopSequences,
35
- ],
36
- }),
37
- promptTemplate,
38
- });
39
- }
40
29
  withSettings(additionalSettings) {
41
30
  return new PromptTemplateTextStreamingModel({
42
31
  model: this.model.withSettings(additionalSettings),
@@ -15,6 +15,5 @@ export declare class PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETT
15
15
  extractTextDelta(delta: unknown): string | undefined;
16
16
  asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextStreamingModel<INPUT_PROMPT, PROMPT, this>;
17
17
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
18
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextStreamingModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
19
18
  withSettings(additionalSettings: Partial<SETTINGS>): this;
20
19
  }
@@ -23,17 +23,6 @@ export class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerati
23
23
  promptTemplate: this.promptTemplate,
24
24
  });
25
25
  }
26
- withPromptTemplate(promptTemplate) {
27
- return new PromptTemplateTextStreamingModel({
28
- model: this.withSettings({
29
- stopSequences: [
30
- ...(this.settings.stopSequences ?? []),
31
- ...promptTemplate.stopSequences,
32
- ],
33
- }),
34
- promptTemplate,
35
- });
36
- }
37
26
  withSettings(additionalSettings) {
38
27
  return new PromptTemplateTextStreamingModel({
39
28
  model: this.model.withSettings(additionalSettings),
@@ -6,6 +6,8 @@ import { Model, ModelSettings } from "../Model.js";
6
6
  import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
7
7
  import { TextGenerationPromptTemplate } from "./TextGenerationPromptTemplate.js";
8
8
  import { TextGenerationResult } from "./TextGenerationResult.js";
9
+ import { ChatPrompt } from "./prompt-template/ChatPrompt.js";
10
+ import { InstructionPrompt } from "./prompt-template/InstructionPrompt.js";
9
11
  export declare const textGenerationModelProperties: readonly ["maxGenerationTokens", "stopSequences", "numberOfGenerations", "trimWhitespace"];
10
12
  export interface TextGenerationModelSettings extends ModelSettings {
11
13
  /**
@@ -82,15 +84,43 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
82
84
  totalTokens: number;
83
85
  };
84
86
  };
85
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
86
87
  /**
87
88
  * When possible, limit the output generation to the specified JSON schema,
88
89
  * or super sets of it (e.g. JSON in general).
89
90
  */
90
91
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
91
92
  }
93
+ export interface TextGenerationBaseModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
94
+ /**
95
+ * Returns this model with a text prompt template.
96
+ */
97
+ withTextPrompt(): TextGenerationModel<string, SETTINGS>;
98
+ /**
99
+ * Returns this model with an instruction prompt template.
100
+ */
101
+ withInstructionPrompt(): TextGenerationModel<InstructionPrompt, SETTINGS>;
102
+ /**
103
+ * Returns this model with a chat prompt template.
104
+ */
105
+ withChatPrompt(): TextGenerationModel<ChatPrompt, SETTINGS>;
106
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
107
+ }
92
108
  export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
93
109
  doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
94
110
  extractTextDelta(delta: unknown): string | undefined;
111
+ }
112
+ export interface TextStreamingBaseModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextStreamingModel<PROMPT, SETTINGS> {
113
+ /**
114
+ * Returns this model with a text prompt template.
115
+ */
116
+ withTextPrompt(): TextStreamingModel<string, SETTINGS>;
117
+ /**
118
+ * Returns this model with an instruction prompt template.
119
+ */
120
+ withInstructionPrompt(): TextStreamingModel<InstructionPrompt, SETTINGS>;
121
+ /**
122
+ * Returns this model with a chat prompt template.
123
+ */
124
+ withChatPrompt(): TextStreamingModel<ChatPrompt, SETTINGS>;
95
125
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
96
126
  }
@@ -75,8 +75,8 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
75
75
  doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
76
76
  rawResponse: {
77
77
  embeddings: number[][];
78
- texts: string[];
79
78
  id: string;
79
+ texts: string[];
80
80
  meta: {
81
81
  api_version: {
82
82
  version: string;
@@ -110,8 +110,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
110
110
  }>;
111
111
  }, "strip", z.ZodTypeAny, {
112
112
  embeddings: number[][];
113
- texts: string[];
114
113
  id: string;
114
+ texts: string[];
115
115
  meta: {
116
116
  api_version: {
117
117
  version: string;
@@ -119,8 +119,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
119
119
  };
120
120
  }, {
121
121
  embeddings: number[][];
122
- texts: string[];
123
122
  id: string;
123
+ texts: string[];
124
124
  meta: {
125
125
  api_version: {
126
126
  version: string;
@@ -170,21 +170,18 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
170
170
  const chunk = delta;
171
171
  return chunk.is_finished === true ? "" : chunk.text;
172
172
  }
173
- /**
174
- * Returns this model with an instruction prompt template.
175
- */
173
+ withJsonOutput() {
174
+ return this;
175
+ }
176
+ withTextPrompt() {
177
+ return this.withPromptTemplate((0, TextPromptTemplate_js_1.text)());
178
+ }
176
179
  withInstructionPrompt() {
177
180
  return this.withPromptTemplate((0, TextPromptTemplate_js_1.instruction)());
178
181
  }
179
- /**
180
- * Returns this model with a chat prompt template.
181
- */
182
182
  withChatPrompt(options) {
183
183
  return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
184
184
  }
185
- withJsonOutput() {
186
- return this;
187
- }
188
185
  withPromptTemplate(promptTemplate) {
189
186
  return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
190
187
  model: this.withSettings({
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
7
+ import { TextGenerationModelSettings, TextStreamingBaseModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
9
  import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
10
  import { CohereTokenizer } from "./CohereTokenizer.js";
@@ -47,7 +47,7 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
47
47
  * "Write a short story about a robot learning to love:\n\n"
48
48
  * );
49
49
  */
50
- export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextStreamingModel<string, CohereTextGenerationModelSettings> {
50
+ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextStreamingBaseModel<string, CohereTextGenerationModelSettings> {
51
51
  constructor(settings: CohereTextGenerationModelSettings);
52
52
  readonly provider: "cohere";
53
53
  get modelName(): "command" | "command-light";
@@ -141,18 +141,13 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
141
141
  is_finished: true;
142
142
  }>>>;
143
143
  extractTextDelta(delta: unknown): string;
144
- /**
145
- * Returns this model with an instruction prompt template.
146
- */
144
+ withJsonOutput(): this;
145
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, string, CohereTextGenerationModelSettings, this>;
147
146
  withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, CohereTextGenerationModelSettings, this>;
148
- /**
149
- * Returns this model with a chat prompt template.
150
- */
151
147
  withChatPrompt(options?: {
152
148
  user?: string;
153
149
  assistant?: string;
154
150
  }): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
155
- withJsonOutput(): this;
156
151
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
157
152
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
158
153
  }
@@ -6,7 +6,7 @@ import { validateTypes } from "../../core/schema/validateTypes.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
9
- import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
+ import { chat, instruction, text, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
10
10
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
11
11
  import { createJsonStreamResponseHandler } from "../../util/streaming/createJsonStreamResponseHandler.js";
12
12
  import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
@@ -167,21 +167,18 @@ export class CohereTextGenerationModel extends AbstractModel {
167
167
  const chunk = delta;
168
168
  return chunk.is_finished === true ? "" : chunk.text;
169
169
  }
170
- /**
171
- * Returns this model with an instruction prompt template.
172
- */
170
+ withJsonOutput() {
171
+ return this;
172
+ }
173
+ withTextPrompt() {
174
+ return this.withPromptTemplate(text());
175
+ }
173
176
  withInstructionPrompt() {
174
177
  return this.withPromptTemplate(instruction());
175
178
  }
176
- /**
177
- * Returns this model with a chat prompt template.
178
- */
179
179
  withChatPrompt(options) {
180
180
  return this.withPromptTemplate(chat(options));
181
181
  }
182
- withJsonOutput() {
183
- return this;
184
- }
185
182
  withPromptTemplate(promptTemplate) {
186
183
  return new PromptTemplateTextStreamingModel({
187
184
  model: this.withSettings({
@@ -93,21 +93,21 @@ declare const cohereTokenizationResponseSchema: z.ZodObject<{
93
93
  };
94
94
  }>;
95
95
  }, "strip", z.ZodTypeAny, {
96
+ tokens: number[];
97
+ token_strings: string[];
96
98
  meta: {
97
99
  api_version: {
98
100
  version: string;
99
101
  };
100
102
  };
103
+ }, {
101
104
  tokens: number[];
102
105
  token_strings: string[];
103
- }, {
104
106
  meta: {
105
107
  api_version: {
106
108
  version: string;
107
109
  };
108
110
  };
109
- tokens: number[];
110
- token_strings: string[];
111
111
  }>;
112
112
  export type CohereTokenizationResponse = z.infer<typeof cohereTokenizationResponseSchema>;
113
113
  export {};
@@ -9,7 +9,7 @@ import { Delta } from "../../model-function/Delta.js";
9
9
  import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
10
10
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
11
11
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
12
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
12
+ import { TextGenerationModelSettings, TextStreamingBaseModel, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
13
13
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
14
14
  import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
15
15
  import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
@@ -143,7 +143,7 @@ export interface LlamaCppCompletionPrompt {
143
143
  */
144
144
  images?: Record<number, string>;
145
145
  }
146
- export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
146
+ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingBaseModel<LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
147
147
  constructor(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
148
148
  readonly provider = "llamacpp";
149
149
  get modelName(): null;
@@ -126,21 +126,12 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
126
126
  const chunk = delta;
127
127
  return chunk.choices[0].delta.content ?? undefined;
128
128
  }
129
- /**
130
- * Returns this model with a text prompt template.
131
- */
132
129
  withTextPrompt() {
133
130
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.text)());
134
131
  }
135
- /**
136
- * Returns this model with an instruction prompt template.
137
- */
138
132
  withInstructionPrompt() {
139
133
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.instruction)());
140
134
  }
141
- /**
142
- * Returns this model with a chat prompt template.
143
- */
144
135
  withChatPrompt() {
145
136
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.chat)());
146
137
  }
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
7
+ import { TextGenerationModelSettings, TextStreamingBaseModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
9
  import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
10
  export type MistralChatMessage = {
@@ -47,7 +47,7 @@ export interface MistralChatModelSettings extends TextGenerationModelSettings {
47
47
  */
48
48
  randomSeed?: number | null;
49
49
  }
50
- export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingModel<MistralChatPrompt, MistralChatModelSettings> {
50
+ export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingBaseModel<MistralChatPrompt, MistralChatModelSettings> {
51
51
  constructor(settings: MistralChatModelSettings);
52
52
  readonly provider = "mistral";
53
53
  get modelName(): "mistral-tiny" | "mistral-small" | "mistral-medium";
@@ -149,17 +149,8 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
149
149
  created?: number | undefined;
150
150
  }>>>;
151
151
  extractTextDelta(delta: unknown): string | undefined;
152
- /**
153
- * Returns this model with a text prompt template.
154
- */
155
152
  withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralChatPrompt, MistralChatModelSettings, this>;
156
- /**
157
- * Returns this model with an instruction prompt template.
158
- */
159
153
  withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
160
- /**
161
- * Returns this model with a chat prompt template.
162
- */
163
154
  withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
164
155
  withJsonOutput(): this;
165
156
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralChatPrompt, MistralChatModelSettings, this>;
@@ -123,21 +123,12 @@ export class MistralChatModel extends AbstractModel {
123
123
  const chunk = delta;
124
124
  return chunk.choices[0].delta.content ?? undefined;
125
125
  }
126
- /**
127
- * Returns this model with a text prompt template.
128
- */
129
126
  withTextPrompt() {
130
127
  return this.withPromptTemplate(text());
131
128
  }
132
- /**
133
- * Returns this model with an instruction prompt template.
134
- */
135
129
  withInstructionPrompt() {
136
130
  return this.withPromptTemplate(instruction());
137
131
  }
138
- /**
139
- * Returns this model with a chat prompt template.
140
- */
141
132
  withChatPrompt() {
142
133
  return this.withPromptTemplate(chat());
143
134
  }
@@ -26,9 +26,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
26
26
  return result;
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.mistral = exports.MistralChatPrompt = void 0;
29
+ exports.mistral = void 0;
30
30
  __exportStar(require("./MistralApiConfiguration.cjs"), exports);
31
31
  __exportStar(require("./MistralChatModel.cjs"), exports);
32
- exports.MistralChatPrompt = __importStar(require("./MistralChatPromptTemplate.cjs"));
33
32
  exports.mistral = __importStar(require("./MistralFacade.cjs"));
34
33
  __exportStar(require("./MistralTextEmbeddingModel.cjs"), exports);
@@ -1,6 +1,5 @@
1
1
  export * from "./MistralApiConfiguration.js";
2
2
  export * from "./MistralChatModel.js";
3
- export * as MistralChatPrompt from "./MistralChatPromptTemplate.js";
4
3
  export { MistralErrorData } from "./MistralError.js";
5
4
  export * as mistral from "./MistralFacade.js";
6
5
  export * from "./MistralTextEmbeddingModel.js";
@@ -1,5 +1,4 @@
1
1
  export * from "./MistralApiConfiguration.js";
2
2
  export * from "./MistralChatModel.js";
3
- export * as MistralChatPrompt from "./MistralChatPromptTemplate.js";
4
3
  export * as mistral from "./MistralFacade.js";
5
4
  export * from "./MistralTextEmbeddingModel.js";