modelfusion 0.124.0 → 0.126.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/CHANGELOG.md +34 -1
  2. package/README.md +17 -30
  3. package/core/FunctionEvent.d.ts +3 -3
  4. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -11
  5. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -1
  6. package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -11
  7. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -11
  8. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -1
  9. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -11
  10. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -11
  11. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -1
  12. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -11
  13. package/model-function/generate-text/TextGenerationModel.d.ts +31 -1
  14. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  15. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -9
  16. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
  17. package/model-provider/cohere/CohereTextGenerationModel.js +7 -10
  18. package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
  19. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +2 -2
  20. package/model-provider/mistral/MistralChatModel.cjs +0 -9
  21. package/model-provider/mistral/MistralChatModel.d.ts +2 -11
  22. package/model-provider/mistral/MistralChatModel.js +0 -9
  23. package/model-provider/mistral/index.cjs +1 -2
  24. package/model-provider/mistral/index.d.ts +0 -1
  25. package/model-provider/mistral/index.js +0 -1
  26. package/model-provider/ollama/OllamaChatModel.cjs +0 -9
  27. package/model-provider/ollama/OllamaChatModel.d.ts +2 -11
  28. package/model-provider/ollama/OllamaChatModel.js +0 -9
  29. package/model-provider/ollama/OllamaCompletionModel.d.ts +2 -2
  30. package/model-provider/ollama/index.cjs +0 -1
  31. package/model-provider/ollama/index.d.ts +0 -1
  32. package/model-provider/ollama/index.js +0 -1
  33. package/model-provider/openai/AbstractOpenAIChatModel.cjs +5 -3
  34. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -5
  35. package/model-provider/openai/AbstractOpenAIChatModel.js +5 -3
  36. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  37. package/model-provider/openai/OpenAIChatModel.cjs +0 -9
  38. package/model-provider/openai/OpenAIChatModel.d.ts +2 -11
  39. package/model-provider/openai/OpenAIChatModel.js +0 -9
  40. package/model-provider/openai/OpenAICompletionModel.cjs +3 -6
  41. package/model-provider/openai/OpenAICompletionModel.d.ts +3 -8
  42. package/model-provider/openai/OpenAICompletionModel.js +4 -7
  43. package/model-provider/openai/index.cjs +1 -2
  44. package/model-provider/openai/index.d.ts +0 -1
  45. package/model-provider/openai/index.js +0 -1
  46. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -9
  47. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -11
  48. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -9
  49. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +10 -0
  50. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +8 -2
  51. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +10 -0
  52. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +13 -1
  53. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +11 -0
  54. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +11 -0
  55. package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +33 -0
  56. package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +13 -0
  57. package/model-provider/openai-compatible/PerplexityApiConfiguration.js +29 -0
  58. package/model-provider/openai-compatible/index.cjs +2 -1
  59. package/model-provider/openai-compatible/index.d.ts +2 -1
  60. package/model-provider/openai-compatible/index.js +2 -1
  61. package/package.json +1 -1
  62. package/tool/index.cjs +2 -2
  63. package/tool/index.d.ts +2 -2
  64. package/tool/index.js +2 -2
  65. package/tool/run-tool/RunToolEvent.d.ts +7 -0
  66. package/tool/{use-tool → run-tool}/index.cjs +2 -2
  67. package/tool/run-tool/index.d.ts +2 -0
  68. package/tool/run-tool/index.js +2 -0
  69. package/tool/{use-tool/useTool.cjs → run-tool/runTool.cjs} +5 -5
  70. package/tool/{use-tool/useTool.d.ts → run-tool/runTool.d.ts} +2 -2
  71. package/tool/{use-tool/useTool.js → run-tool/runTool.js} +3 -3
  72. package/tool/run-tools/RunToolsEvent.d.ts +7 -0
  73. package/tool/{use-tools → run-tools}/index.cjs +2 -2
  74. package/tool/run-tools/index.d.ts +2 -0
  75. package/tool/run-tools/index.js +2 -0
  76. package/tool/{use-tools/useTools.cjs → run-tools/runTools.cjs} +4 -4
  77. package/tool/{use-tools/useTools.d.ts → run-tools/runTools.d.ts} +1 -1
  78. package/tool/{use-tools/useTools.js → run-tools/runTools.js} +2 -2
  79. package/tool/use-tool/UseToolEvent.d.ts +0 -7
  80. package/tool/use-tool/index.d.ts +0 -2
  81. package/tool/use-tool/index.js +0 -2
  82. package/tool/use-tools/UseToolsEvent.d.ts +0 -7
  83. package/tool/use-tools/index.d.ts +0 -2
  84. package/tool/use-tools/index.js +0 -2
  85. /package/tool/{use-tool/UseToolEvent.cjs → run-tool/RunToolEvent.cjs} +0 -0
  86. /package/tool/{use-tool/UseToolEvent.js → run-tool/RunToolEvent.js} +0 -0
  87. /package/tool/{use-tools/UseToolsEvent.cjs → run-tools/RunToolsEvent.cjs} +0 -0
  88. /package/tool/{use-tools/UseToolsEvent.js → run-tools/RunToolsEvent.js} +0 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,34 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.126.0 - 2024-01-15
4
+
5
+ ### Changed
6
+
7
+ - **breaking change**: rename `useTool` to `runTool` and `useTools` to `runTools` to avoid confusion with React hooks.
8
+
9
+ ## v0.125.0 - 2024-01-14
10
+
11
+ ### Added
12
+
13
+ - Perplexity AI chat completion support. Example:
14
+
15
+ ```ts
16
+ import { openaicompatible, streamText } from "modelfusion";
17
+
18
+ const textStream = await streamText({
19
+ model: openaicompatible
20
+ .ChatTextGenerator({
21
+ api: openaicompatible.PerplexityApi(),
22
+ provider: "openaicompatible-perplexity",
23
+ model: "pplx-70b-online", // online model with access to web search
24
+ maxGenerationTokens: 500,
25
+ })
26
+ .withTextPrompt(),
27
+
28
+ prompt: "What is RAG in AI?",
29
+ });
30
+ ```
31
+
3
32
  ## v0.124.0 - 2024-01-13
4
33
 
5
34
  ### Added
@@ -1068,7 +1097,7 @@ Ollama edge case and error handling improvements.
1068
1097
  ### Changed
1069
1098
 
1070
1099
  - **breaking change**: `ChatPrompt` structure and terminology has changed to align more closely with OpenAI and similar chat prompts. This is also in preparation for integrating images and function calls results into chat prompts.
1071
- - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-format) for documentation of the new prompt formats.
1100
+ - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-styles) for documentation of the new prompt formats.
1072
1101
 
1073
1102
  ## v0.69.0 - 2023-11-15
1074
1103
 
@@ -2022,3 +2051,7 @@ Since this change already affected all JSON generation calls and tools, I includ
2022
2051
  1. Recursive Character Splitter: A feature to split text into characters recursively for more detailed text analysis.
2023
2052
  1. Recursive Text Mapping: This enables recursive mapping of text, beneficial for tasks like summarization or extraction.
2024
2053
  1. Split-Map-Filter-Reduce for Text Processing: A process chain developed for sophisticated text handling, allowing operations to split, map, filter, and reduce text data.
2054
+
2055
+ ```
2056
+
2057
+ ```
package/README.md CHANGED
@@ -30,7 +30,7 @@
30
30
  npm install modelfusion
31
31
  ```
32
32
 
33
- Or use a template:
33
+ Or use a start template:
34
34
 
35
35
  - [ModelFusion terminal app starter](https://github.com/lgrammel/modelfusion-terminal-app-starter)
36
36
  - [Next.js, Vercel AI SDK, Llama.cpp & ModelFusion starter](https://github.com/lgrammel/modelfusion-llamacpp-nextjs-starter)
@@ -39,14 +39,14 @@ Or use a template:
39
39
  ## Usage Examples
40
40
 
41
41
  > [!TIP]
42
- > The basic examples are a great way to get started and to explore in parallel with the [documentation](https://modelfusion.dev/). You can find them in the [examples/basic](https://github.com/lgrammel/modelfusion/tree/main/examples/basic) folder.
42
+ > The basic examples are a great way to get started and to explore in parallel with the [documentation](https://modelfusion.dev/guide/function/). You can find them in the [examples/basic](https://github.com/lgrammel/modelfusion/tree/main/examples/basic) folder.
43
43
 
44
44
  You can provide API keys for the different [integrations](https://modelfusion.dev/integration/model-provider/) using environment variables (e.g., `OPENAI_API_KEY`) or pass them into the model constructors as options.
45
45
 
46
46
  ### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
47
47
 
48
48
  Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)).
49
- You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-template) to change the prompt template of a model.
49
+ You can use [prompt styles](https://modelfusion.dev/guide/function/generate-text#prompt-styles) to use text, instruction, or chat prompts.
50
50
 
51
51
  #### generateText
52
52
 
@@ -372,12 +372,12 @@ Tools are functions (and associated metadata) that can be executed by an AI mode
372
372
 
373
373
  ModelFusion offers several tools out-of-the-box: [Math.js](https://modelfusion.dev/guide/tools/available-tools/mathjs), [MediaWiki Search](https://modelfusion.dev/guide/tools/available-tools/mediawiki-search), [SerpAPI](https://modelfusion.dev/guide/tools/available-tools/serpapi), [Google Custom Search](https://modelfusion.dev/guide/tools/available-tools/google-custom-search). You can also create [custom tools](https://modelfusion.dev/guide/tools).
374
374
 
375
- #### [useTool](https://modelfusion.dev/guide/tools/use-tool)
375
+ #### [runTool](https://modelfusion.dev/guide/tools/run-tool)
376
376
 
377
- With `useTool`, you can ask a tool-compatible language model (e.g. OpenAI chat) to invoke a single tool. `useTool` first generates a tool call and then executes the tool with the arguments.
377
+ With `runTool`, you can ask a tool-compatible language model (e.g. OpenAI chat) to invoke a single tool. `runTool` first generates a tool call and then executes the tool with the arguments.
378
378
 
379
379
  ```ts
380
- const { tool, toolCall, args, ok, result } = await useTool({
380
+ const { tool, toolCall, args, ok, result } = await runTool({
381
381
  model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
382
382
  too: calculator,
383
383
  prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
@@ -390,12 +390,12 @@ console.log(`Ok:`, ok);
390
390
  console.log(`Result or Error:`, result);
391
391
  ```
392
392
 
393
- #### [useTools](https://modelfusion.dev/guide/tools/use-tools)
393
+ #### [runTools](https://modelfusion.dev/guide/tools/run-tools)
394
394
 
395
- With `useTools`, you can ask a language model to generate several tool calls as well as text. The model will choose which tools (if any) should be called with which arguments. Both the text and the tool calls are optional. This function executes the tools.
395
+ With `runTools`, you can ask a language model to generate several tool calls as well as text. The model will choose which tools (if any) should be called with which arguments. Both the text and the tool calls are optional. This function executes the tools.
396
396
 
397
397
  ```ts
398
- const { text, toolResults } = await useTools({
398
+ const { text, toolResults } = await runTools({
399
399
  model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
400
400
  tools: [calculator /* ... */],
401
401
  prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
@@ -404,7 +404,7 @@ const { text, toolResults } = await useTools({
404
404
 
405
405
  #### [Agent Loop](https://modelfusion.dev/guide/tools/agent-loop)
406
406
 
407
- You can use `useTools` to implement an agent loop that responds to user messages and executes tools. [Learn more](https://modelfusion.dev/guide/tools/agent-loop).
407
+ You can use `runTools` to implement an agent loop that responds to user messages and executes tools. [Learn more](https://modelfusion.dev/guide/tools/agent-loop).
408
408
 
409
409
  ### [Vector Indices](https://modelfusion.dev/guide/vector-index)
410
410
 
@@ -442,11 +442,11 @@ const retrievedTexts = await retrieve(
442
442
 
443
443
  Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-index/memory), [SQLite VSS](https://modelfusion.dev/integration/vector-index/sqlite-vss), [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
444
444
 
445
- ### [Text Generation Prompt Templates](https://modelfusion.dev/guide/function/generate-text#prompt-format)
445
+ ### [Text Generation Prompt Styles](https://modelfusion.dev/guide/function/generate-text#prompt-styles)
446
446
 
447
- Prompt templates let you use higher level prompt structures (such as text, instruction or chat prompts) for different models.
447
+ You can use different prompt styles (such as text, instruction or chat prompts) with ModelFusion text generation models. These prompt styles can be accessed through the methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()`:
448
448
 
449
- #### Text Prompt Example
449
+ #### Text Prompt Style
450
450
 
451
451
  ```ts
452
452
  const text = await generateText({
@@ -460,7 +460,7 @@ const text = await generateText({
460
460
  });
461
461
  ```
462
462
 
463
- #### Instruction Prompt Example
463
+ #### Instruction Prompt Style
464
464
 
465
465
  ```ts
466
466
  const text = await generateText({
@@ -480,9 +480,7 @@ const text = await generateText({
480
480
  });
481
481
  ```
482
482
 
483
- They can also be accessed through the shorthand methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
484
-
485
- #### Chat Prompt Example
483
+ #### Chat Prompt Style
486
484
 
487
485
  ```ts
488
486
  const textStream = await streamText({
@@ -512,17 +510,6 @@ const textStream = await streamText({
512
510
  });
513
511
  ```
514
512
 
515
- | Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
516
- | ---------------- | ----------- | ------------------ | ----------- |
517
- | Alpaca | ✅ | ✅ | ❌ |
518
- | ChatML | ✅ | ✅ | ✅ |
519
- | Llama 2 | ✅ | ✅ | ✅ |
520
- | Mistral Instruct | ✅ | ✅ | ✅ |
521
- | NeuralChat | ✅ | ✅ | ✅ |
522
- | Synthia | ✅ | ✅ | ✅ |
523
- | Vicuna | ✅ | ✅ | ✅ |
524
- | Generic Text | ✅ | ✅ | ✅ |
525
-
526
513
  ### [Image Generation Prompt Templates](https://modelfusion.dev/guide/function/generate-image/prompt-format)
527
514
 
528
515
  You an use prompt templates with image models as well, e.g. to use a basic text prompt. It is available as a shorthand method:
@@ -593,8 +580,8 @@ modelfusion.setLogFormat("detailed-object"); // log full events
593
580
  - [Embed Value](https://modelfusion.dev/guide/function/embed)
594
581
  - [Classify Value](https://modelfusion.dev/guide/function/classify)
595
582
  - [Tools](https://modelfusion.dev/guide/tools)
596
- - [Use Tool](https://modelfusion.dev/guide/tools/use-tool)
597
- - [Use Tools](https://modelfusion.dev/guide/tools/use-tools)
583
+ - [Use Tool](https://modelfusion.dev/guide/tools/run-tool)
584
+ - [Use Tools](https://modelfusion.dev/guide/tools/run-tools)
598
585
  - [Agent Loop](https://modelfusion.dev/guide/tools/agent-loop)
599
586
  - [Available Tools](https://modelfusion.dev/guide/tools/available-tools/)
600
587
  - [Custom Tools](https://modelfusion.dev/guide/tools/custom-tools)
@@ -1,8 +1,8 @@
1
1
  import { ModelCallFinishedEvent, ModelCallStartedEvent } from "../model-function/ModelCallEvent.js";
2
2
  import { RetrieveFinishedEvent, RetrieveStartedEvent } from "../retriever/RetrieveEvent.js";
3
3
  import { ExecuteToolFinishedEvent, ExecuteToolStartedEvent } from "../tool/execute-tool/ExecuteToolEvent.js";
4
- import { UseToolFinishedEvent, UseToolStartedEvent } from "../tool/use-tool/UseToolEvent.js";
5
- import { useToolsFinishedEvent, useToolsStartedEvent } from "../tool/use-tools/UseToolsEvent.js";
4
+ import { runToolFinishedEvent, runToolStartedEvent } from "../tool/run-tool/RunToolEvent.js";
5
+ import { runToolsFinishedEvent, runToolsStartedEvent } from "../tool/run-tools/RunToolsEvent.js";
6
6
  import { UpsertIntoVectorIndexFinishedEvent, UpsertIntoVectorIndexStartedEvent } from "../vector-index/UpsertIntoVectorIndexEvent.js";
7
7
  import { ExecuteFunctionFinishedEvent, ExecuteFunctionStartedEvent } from "./ExecuteFunctionEvent.js";
8
8
  import { ExtensionFunctionFinishedEvent, ExtensionFunctionStartedEvent } from "./ExtensionFunctionEvent.js";
@@ -82,4 +82,4 @@ export interface BaseFunctionFinishedEvent extends BaseFunctionEvent {
82
82
  */
83
83
  result: BaseFunctionFinishedEventResult;
84
84
  }
85
- export type FunctionEvent = ExecuteFunctionStartedEvent | ExecuteFunctionFinishedEvent | ExecuteToolStartedEvent | ExecuteToolFinishedEvent | ExtensionFunctionStartedEvent | ExtensionFunctionFinishedEvent | ModelCallStartedEvent | ModelCallFinishedEvent | RetrieveStartedEvent | RetrieveFinishedEvent | UpsertIntoVectorIndexStartedEvent | UpsertIntoVectorIndexFinishedEvent | UseToolStartedEvent | UseToolFinishedEvent | useToolsStartedEvent | useToolsFinishedEvent;
85
+ export type FunctionEvent = ExecuteFunctionStartedEvent | ExecuteFunctionFinishedEvent | ExecuteToolStartedEvent | ExecuteToolFinishedEvent | ExtensionFunctionStartedEvent | ExtensionFunctionFinishedEvent | ModelCallStartedEvent | ModelCallFinishedEvent | RetrieveStartedEvent | RetrieveFinishedEvent | UpsertIntoVectorIndexStartedEvent | UpsertIntoVectorIndexFinishedEvent | runToolStartedEvent | runToolFinishedEvent | runToolsStartedEvent | runToolsFinishedEvent;
@@ -14,17 +14,6 @@ class PromptTemplateFullTextModel extends PromptTemplateTextStreamingModel_js_1.
14
14
  const mappedPrompt = this.promptTemplate.format(prompt);
15
15
  return this.model.doGenerateToolCalls(tools, mappedPrompt, options);
16
16
  }
17
- withPromptTemplate(promptTemplate) {
18
- return new PromptTemplateFullTextModel({
19
- model: this.withSettings({
20
- stopSequences: [
21
- ...(this.settings.stopSequences ?? []),
22
- ...promptTemplate.stopSequences,
23
- ],
24
- }),
25
- promptTemplate,
26
- });
27
- }
28
17
  withSettings(additionalSettings) {
29
18
  return new PromptTemplateFullTextModel({
30
19
  model: this.model.withSettings(additionalSettings),
@@ -36,6 +36,5 @@ export declare class PromptTemplateFullTextModel<PROMPT, MODEL_PROMPT, SETTINGS
36
36
  totalTokens: number;
37
37
  } | undefined;
38
38
  }>;
39
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateFullTextModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
40
39
  withSettings(additionalSettings: Partial<SETTINGS>): this;
41
40
  }
@@ -11,17 +11,6 @@ export class PromptTemplateFullTextModel extends PromptTemplateTextStreamingMode
11
11
  const mappedPrompt = this.promptTemplate.format(prompt);
12
12
  return this.model.doGenerateToolCalls(tools, mappedPrompt, options);
13
13
  }
14
- withPromptTemplate(promptTemplate) {
15
- return new PromptTemplateFullTextModel({
16
- model: this.withSettings({
17
- stopSequences: [
18
- ...(this.settings.stopSequences ?? []),
19
- ...promptTemplate.stopSequences,
20
- ],
21
- }),
22
- promptTemplate,
23
- });
24
- }
25
14
  withSettings(additionalSettings) {
26
15
  return new PromptTemplateFullTextModel({
27
16
  model: this.model.withSettings(additionalSettings),
@@ -74,17 +74,6 @@ class PromptTemplateTextGenerationModel {
74
74
  promptTemplate: this.promptTemplate,
75
75
  });
76
76
  }
77
- withPromptTemplate(promptTemplate) {
78
- return new PromptTemplateTextGenerationModel({
79
- model: this.withSettings({
80
- stopSequences: [
81
- ...(this.settings.stopSequences ?? []),
82
- ...promptTemplate.stopSequences,
83
- ],
84
- }),
85
- promptTemplate,
86
- });
87
- }
88
77
  withSettings(additionalSettings) {
89
78
  return new PromptTemplateTextGenerationModel({
90
79
  model: this.model.withSettings(additionalSettings),
@@ -43,6 +43,5 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
43
43
  asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallsModel<INPUT_PROMPT, PROMPT, this>;
44
44
  asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextGenerationModel<INPUT_PROMPT, PROMPT, this>;
45
45
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
46
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextGenerationModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
47
46
  withSettings(additionalSettings: Partial<SETTINGS>): this;
48
47
  }
@@ -71,17 +71,6 @@ export class PromptTemplateTextGenerationModel {
71
71
  promptTemplate: this.promptTemplate,
72
72
  });
73
73
  }
74
- withPromptTemplate(promptTemplate) {
75
- return new PromptTemplateTextGenerationModel({
76
- model: this.withSettings({
77
- stopSequences: [
78
- ...(this.settings.stopSequences ?? []),
79
- ...promptTemplate.stopSequences,
80
- ],
81
- }),
82
- promptTemplate,
83
- });
84
- }
85
74
  withSettings(additionalSettings) {
86
75
  return new PromptTemplateTextGenerationModel({
87
76
  model: this.model.withSettings(additionalSettings),
@@ -26,17 +26,6 @@ class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerationModel
26
26
  promptTemplate: this.promptTemplate,
27
27
  });
28
28
  }
29
- withPromptTemplate(promptTemplate) {
30
- return new PromptTemplateTextStreamingModel({
31
- model: this.withSettings({
32
- stopSequences: [
33
- ...(this.settings.stopSequences ?? []),
34
- ...promptTemplate.stopSequences,
35
- ],
36
- }),
37
- promptTemplate,
38
- });
39
- }
40
29
  withSettings(additionalSettings) {
41
30
  return new PromptTemplateTextStreamingModel({
42
31
  model: this.model.withSettings(additionalSettings),
@@ -15,6 +15,5 @@ export declare class PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETT
15
15
  extractTextDelta(delta: unknown): string | undefined;
16
16
  asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextStreamingModel<INPUT_PROMPT, PROMPT, this>;
17
17
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
18
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextStreamingModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
19
18
  withSettings(additionalSettings: Partial<SETTINGS>): this;
20
19
  }
@@ -23,17 +23,6 @@ export class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerati
23
23
  promptTemplate: this.promptTemplate,
24
24
  });
25
25
  }
26
- withPromptTemplate(promptTemplate) {
27
- return new PromptTemplateTextStreamingModel({
28
- model: this.withSettings({
29
- stopSequences: [
30
- ...(this.settings.stopSequences ?? []),
31
- ...promptTemplate.stopSequences,
32
- ],
33
- }),
34
- promptTemplate,
35
- });
36
- }
37
26
  withSettings(additionalSettings) {
38
27
  return new PromptTemplateTextStreamingModel({
39
28
  model: this.model.withSettings(additionalSettings),
@@ -6,6 +6,8 @@ import { Model, ModelSettings } from "../Model.js";
6
6
  import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
7
7
  import { TextGenerationPromptTemplate } from "./TextGenerationPromptTemplate.js";
8
8
  import { TextGenerationResult } from "./TextGenerationResult.js";
9
+ import { ChatPrompt } from "./prompt-template/ChatPrompt.js";
10
+ import { InstructionPrompt } from "./prompt-template/InstructionPrompt.js";
9
11
  export declare const textGenerationModelProperties: readonly ["maxGenerationTokens", "stopSequences", "numberOfGenerations", "trimWhitespace"];
10
12
  export interface TextGenerationModelSettings extends ModelSettings {
11
13
  /**
@@ -82,15 +84,43 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
82
84
  totalTokens: number;
83
85
  };
84
86
  };
85
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
86
87
  /**
87
88
  * When possible, limit the output generation to the specified JSON schema,
88
89
  * or super sets of it (e.g. JSON in general).
89
90
  */
90
91
  withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
91
92
  }
93
+ export interface TextGenerationBaseModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
94
+ /**
95
+ * Returns this model with a text prompt template.
96
+ */
97
+ withTextPrompt(): TextGenerationModel<string, SETTINGS>;
98
+ /**
99
+ * Returns this model with an instruction prompt template.
100
+ */
101
+ withInstructionPrompt(): TextGenerationModel<InstructionPrompt, SETTINGS>;
102
+ /**
103
+ * Returns this model with a chat prompt template.
104
+ */
105
+ withChatPrompt(): TextGenerationModel<ChatPrompt, SETTINGS>;
106
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
107
+ }
92
108
  export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
93
109
  doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
94
110
  extractTextDelta(delta: unknown): string | undefined;
111
+ }
112
+ export interface TextStreamingBaseModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextStreamingModel<PROMPT, SETTINGS> {
113
+ /**
114
+ * Returns this model with a text prompt template.
115
+ */
116
+ withTextPrompt(): TextStreamingModel<string, SETTINGS>;
117
+ /**
118
+ * Returns this model with an instruction prompt template.
119
+ */
120
+ withInstructionPrompt(): TextStreamingModel<InstructionPrompt, SETTINGS>;
121
+ /**
122
+ * Returns this model with a chat prompt template.
123
+ */
124
+ withChatPrompt(): TextStreamingModel<ChatPrompt, SETTINGS>;
95
125
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
96
126
  }
@@ -75,8 +75,8 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
75
75
  doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
76
76
  rawResponse: {
77
77
  embeddings: number[][];
78
- texts: string[];
79
78
  id: string;
79
+ texts: string[];
80
80
  meta: {
81
81
  api_version: {
82
82
  version: string;
@@ -110,8 +110,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
110
110
  }>;
111
111
  }, "strip", z.ZodTypeAny, {
112
112
  embeddings: number[][];
113
- texts: string[];
114
113
  id: string;
114
+ texts: string[];
115
115
  meta: {
116
116
  api_version: {
117
117
  version: string;
@@ -119,8 +119,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
119
119
  };
120
120
  }, {
121
121
  embeddings: number[][];
122
- texts: string[];
123
122
  id: string;
123
+ texts: string[];
124
124
  meta: {
125
125
  api_version: {
126
126
  version: string;
@@ -170,21 +170,18 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
170
170
  const chunk = delta;
171
171
  return chunk.is_finished === true ? "" : chunk.text;
172
172
  }
173
- /**
174
- * Returns this model with an instruction prompt template.
175
- */
173
+ withJsonOutput() {
174
+ return this;
175
+ }
176
+ withTextPrompt() {
177
+ return this.withPromptTemplate((0, TextPromptTemplate_js_1.text)());
178
+ }
176
179
  withInstructionPrompt() {
177
180
  return this.withPromptTemplate((0, TextPromptTemplate_js_1.instruction)());
178
181
  }
179
- /**
180
- * Returns this model with a chat prompt template.
181
- */
182
182
  withChatPrompt(options) {
183
183
  return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
184
184
  }
185
- withJsonOutput() {
186
- return this;
187
- }
188
185
  withPromptTemplate(promptTemplate) {
189
186
  return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
190
187
  model: this.withSettings({
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
7
+ import { TextGenerationModelSettings, TextStreamingBaseModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
9
  import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
10
  import { CohereTokenizer } from "./CohereTokenizer.js";
@@ -47,7 +47,7 @@ export interface CohereTextGenerationModelSettings extends TextGenerationModelSe
47
47
  * "Write a short story about a robot learning to love:\n\n"
48
48
  * );
49
49
  */
50
- export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextStreamingModel<string, CohereTextGenerationModelSettings> {
50
+ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextGenerationModelSettings> implements TextStreamingBaseModel<string, CohereTextGenerationModelSettings> {
51
51
  constructor(settings: CohereTextGenerationModelSettings);
52
52
  readonly provider: "cohere";
53
53
  get modelName(): "command" | "command-light";
@@ -141,18 +141,13 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
141
141
  is_finished: true;
142
142
  }>>>;
143
143
  extractTextDelta(delta: unknown): string;
144
- /**
145
- * Returns this model with an instruction prompt template.
146
- */
144
+ withJsonOutput(): this;
145
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, string, CohereTextGenerationModelSettings, this>;
147
146
  withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, CohereTextGenerationModelSettings, this>;
148
- /**
149
- * Returns this model with a chat prompt template.
150
- */
151
147
  withChatPrompt(options?: {
152
148
  user?: string;
153
149
  assistant?: string;
154
150
  }): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
155
- withJsonOutput(): this;
156
151
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
157
152
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
158
153
  }
@@ -6,7 +6,7 @@ import { validateTypes } from "../../core/schema/validateTypes.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
9
- import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
+ import { chat, instruction, text, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
10
10
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
11
11
  import { createJsonStreamResponseHandler } from "../../util/streaming/createJsonStreamResponseHandler.js";
12
12
  import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
@@ -167,21 +167,18 @@ export class CohereTextGenerationModel extends AbstractModel {
167
167
  const chunk = delta;
168
168
  return chunk.is_finished === true ? "" : chunk.text;
169
169
  }
170
- /**
171
- * Returns this model with an instruction prompt template.
172
- */
170
+ withJsonOutput() {
171
+ return this;
172
+ }
173
+ withTextPrompt() {
174
+ return this.withPromptTemplate(text());
175
+ }
173
176
  withInstructionPrompt() {
174
177
  return this.withPromptTemplate(instruction());
175
178
  }
176
- /**
177
- * Returns this model with a chat prompt template.
178
- */
179
179
  withChatPrompt(options) {
180
180
  return this.withPromptTemplate(chat(options));
181
181
  }
182
- withJsonOutput() {
183
- return this;
184
- }
185
182
  withPromptTemplate(promptTemplate) {
186
183
  return new PromptTemplateTextStreamingModel({
187
184
  model: this.withSettings({
@@ -93,21 +93,21 @@ declare const cohereTokenizationResponseSchema: z.ZodObject<{
93
93
  };
94
94
  }>;
95
95
  }, "strip", z.ZodTypeAny, {
96
+ tokens: number[];
97
+ token_strings: string[];
96
98
  meta: {
97
99
  api_version: {
98
100
  version: string;
99
101
  };
100
102
  };
103
+ }, {
101
104
  tokens: number[];
102
105
  token_strings: string[];
103
- }, {
104
106
  meta: {
105
107
  api_version: {
106
108
  version: string;
107
109
  };
108
110
  };
109
- tokens: number[];
110
- token_strings: string[];
111
111
  }>;
112
112
  export type CohereTokenizationResponse = z.infer<typeof cohereTokenizationResponseSchema>;
113
113
  export {};
@@ -9,7 +9,7 @@ import { Delta } from "../../model-function/Delta.js";
9
9
  import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
10
10
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
11
11
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
12
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
12
+ import { TextGenerationModelSettings, TextStreamingBaseModel, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
13
13
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
14
14
  import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
15
15
  import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
@@ -143,7 +143,7 @@ export interface LlamaCppCompletionPrompt {
143
143
  */
144
144
  images?: Record<number, string>;
145
145
  }
146
- export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
146
+ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingBaseModel<LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
147
147
  constructor(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
148
148
  readonly provider = "llamacpp";
149
149
  get modelName(): null;
@@ -126,21 +126,12 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
126
126
  const chunk = delta;
127
127
  return chunk.choices[0].delta.content ?? undefined;
128
128
  }
129
- /**
130
- * Returns this model with a text prompt template.
131
- */
132
129
  withTextPrompt() {
133
130
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.text)());
134
131
  }
135
- /**
136
- * Returns this model with an instruction prompt template.
137
- */
138
132
  withInstructionPrompt() {
139
133
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.instruction)());
140
134
  }
141
- /**
142
- * Returns this model with a chat prompt template.
143
- */
144
135
  withChatPrompt() {
145
136
  return this.withPromptTemplate((0, MistralChatPromptTemplate_js_1.chat)());
146
137
  }
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
7
+ import { TextGenerationModelSettings, TextStreamingBaseModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
9
  import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
10
  export type MistralChatMessage = {
@@ -47,7 +47,7 @@ export interface MistralChatModelSettings extends TextGenerationModelSettings {
47
47
  */
48
48
  randomSeed?: number | null;
49
49
  }
50
- export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingModel<MistralChatPrompt, MistralChatModelSettings> {
50
+ export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingBaseModel<MistralChatPrompt, MistralChatModelSettings> {
51
51
  constructor(settings: MistralChatModelSettings);
52
52
  readonly provider = "mistral";
53
53
  get modelName(): "mistral-tiny" | "mistral-small" | "mistral-medium";
@@ -149,17 +149,8 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
149
149
  created?: number | undefined;
150
150
  }>>>;
151
151
  extractTextDelta(delta: unknown): string | undefined;
152
- /**
153
- * Returns this model with a text prompt template.
154
- */
155
152
  withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralChatPrompt, MistralChatModelSettings, this>;
156
- /**
157
- * Returns this model with an instruction prompt template.
158
- */
159
153
  withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
160
- /**
161
- * Returns this model with a chat prompt template.
162
- */
163
154
  withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
164
155
  withJsonOutput(): this;
165
156
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralChatPrompt, MistralChatModelSettings, this>;