modelfusion 0.99.0 → 0.101.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/CHANGELOG.md +1411 -0
  2. package/README.md +9 -15
  3. package/core/api/BaseUrlApiConfiguration.d.ts +7 -6
  4. package/core/api/BaseUrlPartsApiConfiguration.cjs +53 -0
  5. package/core/api/BaseUrlPartsApiConfiguration.d.ts +26 -0
  6. package/core/api/BaseUrlPartsApiConfiguration.js +49 -0
  7. package/core/api/index.cjs +1 -0
  8. package/core/api/index.d.ts +1 -0
  9. package/core/api/index.js +1 -0
  10. package/guard/fixStructure.cjs +3 -3
  11. package/guard/fixStructure.d.ts +3 -3
  12. package/guard/fixStructure.js +3 -3
  13. package/model-function/generate-structure/generateStructure.d.ts +2 -2
  14. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  15. package/model-provider/automatic1111/Automatic1111ApiConfiguration.cjs +8 -9
  16. package/model-provider/automatic1111/Automatic1111ApiConfiguration.d.ts +7 -9
  17. package/model-provider/automatic1111/Automatic1111ApiConfiguration.js +8 -9
  18. package/model-provider/automatic1111/Automatic1111Error.cjs +7 -31
  19. package/model-provider/automatic1111/Automatic1111Error.d.ts +2 -11
  20. package/model-provider/automatic1111/Automatic1111Error.js +6 -28
  21. package/model-provider/automatic1111/Automatic1111Facade.cjs +10 -1
  22. package/model-provider/automatic1111/Automatic1111Facade.d.ts +7 -0
  23. package/model-provider/automatic1111/Automatic1111Facade.js +8 -0
  24. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +22 -27
  25. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +8 -8
  26. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +22 -27
  27. package/model-provider/automatic1111/index.cjs +1 -3
  28. package/model-provider/automatic1111/index.d.ts +1 -1
  29. package/model-provider/automatic1111/index.js +0 -1
  30. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +6 -6
  31. package/model-provider/mistral/{MistralTextGenerationModel.cjs → MistralChatModel.cjs} +13 -13
  32. package/model-provider/mistral/{MistralTextGenerationModel.d.ts → MistralChatModel.d.ts} +21 -20
  33. package/model-provider/mistral/{MistralTextGenerationModel.js → MistralChatModel.js} +11 -11
  34. package/model-provider/mistral/MistralFacade.cjs +5 -5
  35. package/model-provider/mistral/MistralFacade.d.ts +3 -2
  36. package/model-provider/mistral/MistralFacade.js +3 -3
  37. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  38. package/model-provider/mistral/index.cjs +1 -1
  39. package/model-provider/mistral/index.d.ts +1 -1
  40. package/model-provider/mistral/index.js +1 -1
  41. package/model-provider/ollama/OllamaApiConfiguration.d.ts +6 -5
  42. package/model-provider/ollama/OllamaChatModel.cjs +303 -0
  43. package/model-provider/ollama/OllamaChatModel.d.ts +171 -0
  44. package/model-provider/ollama/OllamaChatModel.js +299 -0
  45. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +76 -0
  46. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +20 -0
  47. package/model-provider/ollama/OllamaChatPromptTemplate.js +69 -0
  48. package/model-provider/ollama/{OllamaTextGenerationModel.cjs → OllamaCompletionModel.cjs} +13 -11
  49. package/model-provider/ollama/OllamaCompletionModel.d.ts +159 -0
  50. package/model-provider/ollama/{OllamaTextGenerationModel.js → OllamaCompletionModel.js} +11 -9
  51. package/model-provider/ollama/{OllamaTextGenerationModel.test.cjs → OllamaCompletionModel.test.cjs} +3 -3
  52. package/model-provider/ollama/{OllamaTextGenerationModel.test.js → OllamaCompletionModel.test.js} +3 -3
  53. package/model-provider/ollama/OllamaFacade.cjs +15 -5
  54. package/model-provider/ollama/OllamaFacade.d.ts +7 -2
  55. package/model-provider/ollama/OllamaFacade.js +11 -3
  56. package/model-provider/ollama/OllamaTextGenerationSettings.cjs +2 -0
  57. package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +87 -0
  58. package/model-provider/ollama/OllamaTextGenerationSettings.js +1 -0
  59. package/model-provider/ollama/index.cjs +4 -1
  60. package/model-provider/ollama/index.d.ts +4 -1
  61. package/model-provider/ollama/index.js +4 -1
  62. package/model-provider/openai/OpenAIFacade.cjs +4 -2
  63. package/model-provider/openai/OpenAIFacade.d.ts +3 -1
  64. package/model-provider/openai/OpenAIFacade.js +2 -1
  65. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +1 -1
  66. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +3 -3
  67. package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
  68. package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
  69. package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
  70. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +5 -5
  71. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +1 -1
  72. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +1 -1
  73. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +1 -1
  74. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +1 -1
  75. package/package.json +15 -15
  76. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +0 -230
  77. /package/model-provider/ollama/{OllamaTextGenerationModel.test.d.ts → OllamaCompletionModel.test.d.ts} +0 -0
package/CHANGELOG.md ADDED
@@ -0,0 +1,1411 @@
1
+ # Changelog
2
+
3
+ ## v0.101.0 - 2023-12-22
4
+
5
+ ### Changed
6
+
7
+ - Automatic1111 Stable Diffusion Web UI configuration has separate configuration of host, port, and path.
8
+
9
+ ### Fixed
10
+
11
+ - Automatic1111 Stable Diffusion Web UI uses negative prompt and seed.
12
+
13
+ ## v0.100.0 - 2023-12-17
14
+
15
+ ### Added
16
+
17
+ - `ollama.ChatTextGenerator` model that calls the Ollama chat API.
18
+ - Ollama chat messages and prompts are exposed through `ollama.ChatMessage` and `ollama.ChatPrompt`
19
+ - OpenAI chat messages and prompts are exposed through `openai.ChatMessage` and `openai.ChatPrompt`
20
+ - Mistral chat messages and prompts are exposed through `mistral.ChatMessage` and `mistral.ChatPrompt`
21
+
22
+ ### Changed
23
+
24
+ - **breaking change**: renamed `ollama.TextGenerator` to `ollama.CompletionTextGenerator`
25
+ - **breaking change**: renamed `mistral.TextGenerator` to `mistral.ChatTextGenerator`
26
+
27
+ ## v0.99.0 - 2023-12-16
28
+
29
+ ### Added
30
+
31
+ - You can specify `numberOfGenerations` on text generation models and access multiple generations by using the `fullResponse: true` option. Example:
32
+
33
+ ```ts
34
+ // generate 2 texts:
35
+ const { texts } = await generateText(
36
+ openai.CompletionTextGenerator({
37
+ model: "gpt-3.5-turbo-instruct",
38
+ numberOfGenerations: 2,
39
+ maxGenerationTokens: 1000,
40
+ }),
41
+ "Write a short story about a robot learning to love:\n\n",
42
+ { fullResponse: true }
43
+ );
44
+ ```
45
+
46
+ - **breaking change**: Text generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
47
+
48
+ ### Changed
49
+
50
+ - **breaking change**: Renamed `maxCompletionTokens` text generation model setting to `maxGenerationTokens`.
51
+
52
+ ## v0.98.0 - 2023-12-16
53
+
54
+ ### Changed
55
+
56
+ - **breaking change**: `responseType` option was changed into `fullResponse` option and uses a boolean value to make discovery easy. The response values from the full response have been renamed for clarity. For base64 image generation, you can use the `imageBase64` value from the full response:
57
+
58
+ ```ts
59
+ const { imageBase64 } = await generateImage(model, prompt, {
60
+ fullResponse: true,
61
+ });
62
+ ```
63
+
64
+ ### Improved
65
+
66
+ - Better docs for the OpenAI chat settings. Thanks [@bearjaws](https://github.com/bearjaws) for the contribution!
67
+
68
+ ### Fixed
69
+
70
+ - Streaming OpenAI chat text generation when setting `n:2` or higher returns only the stream from the first choice.
71
+
72
+ ## v0.97.0 - 2023-12-14
73
+
74
+ ### Added
75
+
76
+ - **breaking change**: Ollama image (vision) support. This changes the Ollama prompt format. You can add `.withTextPrompt()` to existing Ollama text generators to get a text prompt like before.
77
+
78
+ Vision example:
79
+
80
+ ```ts
81
+ import { ollama, streamText } from "modelfusion";
82
+
83
+ const textStream = await streamText(
84
+ ollama.TextGenerator({
85
+ model: "bakllava",
86
+ maxCompletionTokens: 1024,
87
+ temperature: 0,
88
+ }),
89
+ {
90
+ prompt: "Describe the image in detail",
91
+ images: [image], // base-64 encoded png or jpeg
92
+ }
93
+ );
94
+ ```
95
+
96
+ ### Changed
97
+
98
+ - **breaking change**: Switch Ollama settings to camelCase to align with the rest of the library.
99
+
100
+ ## v0.96.0 - 2023-12-14
101
+
102
+ ### Added
103
+
104
+ - [Mistral platform support](https://modelfusion.dev/integration/model-provider/mistral)
105
+
106
+ ## v0.95.0 - 2023-12-10
107
+
108
+ ### Added
109
+
110
+ - `cachePrompt` parameter for llama.cpp models. Thanks [@djwhitt](https://github.com/djwhitt) for the contribution!
111
+
112
+ ## v0.94.0 - 2023-12-10
113
+
114
+ ### Added
115
+
116
+ - Prompt template for neural-chat models.
117
+
118
+ ## v0.93.0 - 2023-12-10
119
+
120
+ ### Added
121
+
122
+ - Optional response prefix for instruction prompts to guide the LLM response.
123
+
124
+ ### Changed
125
+
126
+ - **breaking change**: Renamed prompt format to prompt template to align with the commonly used language (e.g. from model cards).
127
+
128
+ ## v0.92.1 - 2023-12-10
129
+
130
+ ### Changed
131
+
132
+ - Improved Ollama error handling.
133
+
134
+ ## v0.92.0 - 2023-12-09
135
+
136
+ ### Changed
137
+
138
+ - **breaking change**: setting global function observers and global logging has changed.
139
+ You can call methods on a `modelfusion` import:
140
+
141
+ ```ts
142
+ import { modelfusion } from "modelfusion";
143
+
144
+ modelfusion.setLogFormat("basic-text");
145
+ ```
146
+
147
+ - Cleaned output when using `detailed-object` log format.
148
+
149
+ ## v0.91.0 - 2023-12-09
150
+
151
+ ### Added
152
+
153
+ - `Whisper.cpp` [transcription (speech-to-text) model](https://modelfusion.dev/integration/model-provider/whispercpp) support.
154
+
155
+ ```ts
156
+ import { generateTranscription, whispercpp } from "modelfusion";
157
+
158
+ const data = await fs.promises.readFile("data/test.wav");
159
+
160
+ const transcription = await generateTranscription(whispercpp.Transcriber(), {
161
+ type: "wav",
162
+ data,
163
+ });
164
+ ```
165
+
166
+ ### Improved
167
+
168
+ - Better error reporting.
169
+
170
+ ## v0.90.0 - 2023-12-03
171
+
172
+ ### Added
173
+
174
+ - Temperature and language settings to OpenAI transcription model.
175
+
176
+ ## v0.89.0 - 2023-11-30
177
+
178
+ ### Added
179
+
180
+ - `maxValuesPerCall` setting for `OpenAITextEmbeddingModel` to enable different configurations, e.g. for Azure. Thanks [@nanotronic](https://github.com/nanotronic) for the contribution!
181
+
182
+ ## v0.88.0 - 2023-11-28
183
+
184
+ ### Added
185
+
186
+ - Multi-modal chat prompts. Supported by OpenAI vision chat models and by BakLLaVA prompt format.
187
+
188
+ ### Changed
189
+
190
+ - **breaking change**: renamed `ChatPrompt` to `TextChatPrompt` to distinguish it from multi-modal chat prompts.
191
+
192
+ ## v0.87.0 - 2023-11-27
193
+
194
+ ### Added
195
+
196
+ - **experimental**: `modelfusion/extension` export with functions and classes that are necessary to implement providers in 3rd party node modules. See [lgrammel/modelfusion-example-provider](https://github.com/lgrammel/modelfusion-example-provider) for an example.
197
+
198
+ ## v0.85.0 - 2023-11-26
199
+
200
+ ### Added
201
+
202
+ - `OpenAIChatMessage` function call support.
203
+
204
+ ## v0.84.0 - 2023-11-26
205
+
206
+ ### Added
207
+
208
+ - Support for OpenAI-compatible chat APIs. See [OpenAI Compatible](https://modelfusion.dev/integration/model-provider/openaicompatible) for details.
209
+
210
+ ```ts
211
+ import {
212
+ BaseUrlApiConfiguration,
213
+ openaicompatible,
214
+ generateText,
215
+ } from "modelfusion";
216
+
217
+ const text = await generateText(
218
+ openaicompatible
219
+ .ChatTextGenerator({
220
+ api: new BaseUrlApiConfiguration({
221
+ baseUrl: "https://api.fireworks.ai/inference/v1",
222
+ headers: {
223
+ Authorization: `Bearer ${process.env.FIREWORKS_API_KEY}`,
224
+ },
225
+ }),
226
+ model: "accounts/fireworks/models/mistral-7b",
227
+ })
228
+ .withTextPrompt(),
229
+
230
+ "Write a story about a robot learning to love"
231
+ );
232
+ ```
233
+
234
+ ## v0.83.0 - 2023-11-26
235
+
236
+ ### Added
237
+
238
+ - Introduce `uncheckedSchema()` facade function as an easier way to create unchecked ModelFusion schemas. This aligns the API with `zodSchema()`.
239
+
240
+ ### Changed
241
+
242
+ - **breaking change**: Renamed `InstructionPrompt` interface to `MultiModalInstructionPrompt` to clearly distinguish it from `TextInstructionPrompt`.
243
+ - **breaking change**: Renamed `.withBasicPrompt` methods for image generation models to `.withTextPrompt` to align with text generation models.
244
+
245
+ ## v0.82.0 - 2023-11-25
246
+
247
+ ### Added
248
+
249
+ - Introduce `zodSchema()` facade function as an easier way to create new ModelFusion Zod schemas. This clearly distinguishes it from `ZodSchema` that is also part of the zod library.
250
+
251
+ ## v0.81.0 - 2023-11-25
252
+
253
+ **breaking change**: `generateStructure` and `streamStructure` redesign. The new API does not require function calling and `StructureDefinition` objects any more. This makes it more flexible and it can be used in 3 ways:
254
+
255
+ - with OpenAI function calling:
256
+
257
+ ```ts
258
+ const model = openai
259
+ .ChatTextGenerator({ model: "gpt-3.5-turbo" })
260
+ .asFunctionCallStructureGenerationModel({
261
+ fnName: "...",
262
+ fnDescription: "...",
263
+ });
264
+ ```
265
+
266
+ - with OpenAI JSON format:
267
+
268
+ ```ts
269
+ const model = openai
270
+ .ChatTextGenerator({
271
+ model: "gpt-4-1106-preview",
272
+ temperature: 0,
273
+ maxCompletionTokens: 1024,
274
+ responseFormat: { type: "json_object" },
275
+ })
276
+ .asStructureGenerationModel(
277
+ jsonStructurePrompt((instruction: string, schema) => [
278
+ OpenAIChatMessage.system(
279
+ "JSON schema: \n" +
280
+ JSON.stringify(schema.getJsonSchema()) +
281
+ "\n\n" +
282
+ "Respond only using JSON that matches the above schema."
283
+ ),
284
+ OpenAIChatMessage.user(instruction),
285
+ ])
286
+ );
287
+ ```
288
+
289
+ - with Ollama (and a capable model, e.g., OpenHermes 2.5):
290
+ ```ts
291
+ const model = ollama
292
+ .TextGenerator({
293
+ model: "openhermes2.5-mistral",
294
+ maxCompletionTokens: 1024,
295
+ temperature: 0,
296
+ format: "json",
297
+ raw: true,
298
+ stopSequences: ["\n\n"], // prevent infinite generation
299
+ })
300
+ .withPromptFormat(ChatMLPromptFormat.instruction())
301
+ .asStructureGenerationModel(
302
+ jsonStructurePrompt((instruction: string, schema) => ({
303
+ system:
304
+ "JSON schema: \n" +
305
+ JSON.stringify(schema.getJsonSchema()) +
306
+ "\n\n" +
307
+ "Respond only using JSON that matches the above schema.",
308
+ instruction,
309
+ }))
310
+ );
311
+ ```
312
+
313
+ See [generateStructure](https://modelfusion.dev/guide/function/generate-structure) for details on the new API.
314
+
315
+ ## v0.80.0 - 2023-11-24
316
+
317
+ ### Changed
318
+
319
+ - **breaking change**: Restructured multi-modal instruction prompts and `OpenAIChatMessage.user()`
320
+
321
+ ## v0.79.0 - 2023-11-23
322
+
323
+ ### Added
324
+
325
+ - Multi-tool usage from open source models
326
+
327
+ Use `TextGenerationToolCallsOrGenerateTextModel` and related helper methods `.asToolCallsOrTextGenerationModel()` to create custom prompts & parsers.
328
+
329
+ Examples:
330
+
331
+ - `examples/basic/src/model-provider/ollama/ollama-use-tools-or-generate-text-openhermes-example.ts`
332
+ - `examples/basic/src/model-provider/llamacpp/llamacpp-use-tools-or-generate-text-openhermes-example.ts`
333
+
334
+ Example prompt format:
335
+
336
+ - `examples/basic/src/tool/prompts/open-hermes.ts` for OpenHermes 2.5
337
+
338
+ ## v0.78.0 - 2023-11-23
339
+
340
+ ### Removed
341
+
342
+ - **breaking change**: Removed `FunctionListToolCallPromptFormat`. See `examples/basic/src/model-provide/ollama/ollama-use-tool-mistral-example.ts` for how to implement a `ToolCallPromptFormat` for your tool.
343
+
344
+ ## v0.77.0 - 2023-11-23
345
+
346
+ ### Changed
347
+
348
+ - **breaking change**: Rename `Speech` to `SpeechGenerator` in facades
349
+ - **breaking change**: Rename `Transcription` to `Transcriber` in facades
350
+
351
+ ## v0.76.0 - 2023-11-23
352
+
353
+ ### Added
354
+
355
+ - Anthropic Claude 2.1 support
356
+
357
+ ## v0.75.0 - 2023-11-22
358
+
359
+ Introducing model provider facades:
360
+
361
+ ```ts
362
+ const image = await generateImage(
363
+ openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
364
+ "the wicked witch of the west in the style of early 19th century painting"
365
+ );
366
+ ```
367
+
368
+ ### Added
369
+
370
+ - Model provider facades. You can e.g. use `ollama.TextGenerator(...)` instead of `new OllamaTextGenerationModel(...)`.
371
+
372
+ ### Changed
373
+
374
+ - **breaking change**: Fixed method name `isParallizable` to `isParallelizable` in `EmbeddingModel`.
375
+
376
+ ### Removed
377
+
378
+ - **breaking change**: removed `HuggingFaceImageDescriptionModel`. Image description models will be replaced by multi-modal vision models.
379
+
380
+ ## v0.74.1 - 2023-11-22
381
+
382
+ ### Improved
383
+
384
+ - Increase OpenAI chat streaming resilience.
385
+
386
+ ## v0.74.0 - 2023-11-21
387
+
388
+ Prompt format and tool calling improvements.
389
+
390
+ ### Added
391
+
392
+ - text prompt format. Use simple text prompts, e.g. with `OpenAIChatModel`:
393
+ ```ts
394
+ const textStream = await streamText(
395
+ new OpenAIChatModel({
396
+ model: "gpt-3.5-turbo",
397
+ }).withTextPrompt(),
398
+ "Write a short story about a robot learning to love."
399
+ );
400
+ ```
401
+ - `.withTextPromptFormat` to `LlamaCppTextGenerationModel` for simplified prompt construction:
402
+ ```ts
403
+ const textStream = await streamText(
404
+ new LlamaCppTextGenerationModel({
405
+ // ...
406
+ }).withTextPromptFormat(Llama2PromptFormat.text()),
407
+ "Write a short story about a robot learning to love."
408
+ );
409
+ ```
410
+ - `.asToolCallGenerationModel()` to `OllamaTextGenerationModel` to simplify tool calls.
411
+
412
+ ### Improved
413
+
414
+ - better error reporting when using exponent backoff retries
415
+
416
+ ### Removed
417
+
418
+ - **breaking change**: removed `input` from `InstructionPrompt` (was Alpaca-specific, `AlpacaPromptFormat` still supports it)
419
+
420
+ ## v0.73.1 - 2023-11-19
421
+
422
+ Remove section newlines from Llama 2 prompt format.
423
+
424
+ ## v0.73.0 - 2023-11-19
425
+
426
+ Ollama edge case and error handling improvements.
427
+
428
+ ## v0.72.0 - 2023-11-19
429
+
430
+ **Breaking change**: the tool calling API has been reworked to support multiple parallel tool calls. This required multiple breaking changes (see below). Check out the updated [tools documentation](https://modelfusion.dev/guide/tools/) for details.
431
+
432
+ ### Changed
433
+
434
+ - `Tool` has `parameters` and `returnType` schemas (instead of `inputSchema` and `outputSchema`).
435
+ - `useTool` uses `generateToolCall` under the hood. The return value and error handling has changed.
436
+ - `useToolOrGenerateText` has been renamed to `useToolsOrGenerateText`. It uses `generateToolCallsOrText` under the hood. The return value and error handling has changed. It can invoke several tools in parallel and returns an array of tool results.
437
+ - The `maxRetries` parameter in `guard` has been replaced by a `maxAttempt` parameter.
438
+
439
+ ### Removed
440
+
441
+ - `generateStructureOrText` has been removed.
442
+
443
+ ## v0.71.0 - 2023-11-17
444
+
445
+ ### Added
446
+
447
+ - Experimental generateToolCallsOrText function for generating a multiple parallel tool call using the OpenAI chat/tools API.
448
+
449
+ ## v0.70.0 - 2023-11-16
450
+
451
+ ### Added
452
+
453
+ - ChatML prompt format.
454
+
455
+ ### Changed
456
+
457
+ - **breaking change**: `ChatPrompt` structure and terminology has changed to align more closely with OpenAI and similar chat prompts. This is also in preparation for integrating images and function calls results into chat prompts.
458
+ - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-format) for documentation of the new prompt formats.
459
+
460
+ ## v0.69.0 - 2023-11-15
461
+
462
+ ### Added
463
+
464
+ - Experimental generateToolCall function for generating a single tool call using the OpenAI chat/tools API.
465
+
466
+ ## v0.68.0 - 2023-11-14
467
+
468
+ ### Changed
469
+
470
+ - Refactored JSON parsing to use abstracted schemas. You can use `parseJSON` and `safeParseJSON` to securely parse JSON objects and optionally type-check them using any schema (e.g. a Zod schema).
471
+
472
+ ## v0.67.0 - 2023-11-12
473
+
474
+ ### Added
475
+
476
+ - Ollama 0.1.9 support: `format` (for forcing JSON output) and `raw` settings
477
+ - Improved Ollama settings documentation
478
+
479
+ ## v0.66.0 - 2023-11-12
480
+
481
+ ### Added
482
+
483
+ - Support for fine-tuned OpenAI `gpt-4-0613` models
484
+ - Support for `trimWhitespace` model setting in `streamText` calls
485
+
486
+ ## v0.65.0 - 2023-11-12
487
+
488
+ ### Added
489
+
490
+ - Image support for `OpenAIChatMessage.user`
491
+ - `mapInstructionPromptToBakLLaVA1ForLlamaCppFormat` prompt format
492
+
493
+ ### Changed
494
+
495
+ - **breaking change**: `VisionInstructionPrompt` was replaced by an optional `image` field in `InstructionPrompt`.
496
+
497
+ ## v0.64.0 - 2023-11-11
498
+
499
+ ### Added
500
+
501
+ - Support for OpenAI vision model.
502
+ - Example: `examples/basic/src/model-provider/openai/openai-chat-stream-text-vision-example.ts`
503
+
504
+ ## v0.63.0 - 2023-11-08
505
+
506
+ ### Added
507
+
508
+ - Support for OpenAI chat completion `seed` and `responseFormat` options.
509
+
510
+ ## v0.62.0 - 2023-11-08
511
+
512
+ ### Added
513
+
514
+ - OpenAI speech generation support. Shoutout to [@bjsi](https://github.com/bjsi) for the awesome contribution!
515
+
516
+ ## v0.61.0 - 2023-11-07
517
+
518
+ ### Added
519
+
520
+ - OpenAI `gpt-3.5-turbo-1106`, `gpt-4-1106-preview`, `gpt-4-vision-preview` chat models.
521
+ - OpenAI `Dalle-E-3` image model.
522
+
523
+ ### Changed
524
+
525
+ - **breaking change**: `OpenAIImageGenerationModel` requires a `model` parameter.
526
+
527
+ ## v0.60.0 - 2023-11-06
528
+
529
+ ### Added
530
+
531
+ - Support image input for multi-modal Llama.cpp models (e.g. Llava, Bakllava).
532
+
533
+ ### Changed
534
+
535
+ - **breaking change**: Llama.cpp prompt format has changed to support images. Use `.withTextPrompt()` to get a text prompt format.
536
+
537
+ ## v0.59.0 - 2023-11-06
538
+
539
+ ### Added
540
+
541
+ - ElevenLabs `eleven_turbo_v2` support.
542
+
543
+ ## v0.58 - 2023-11-05
544
+
545
+ ### Fixed
546
+
547
+ - **breaking change**: Uncaught errors were caused by custom Promises. ModelFusion uses only standard Promises. To get full responses from model function, you need to use the `{ returnType: "full" }` option instead of calling `.asFullResponse()` on the result.
548
+
549
+ ## v0.57.1 - 2023-11-05
550
+
551
+ ### Improved
552
+
553
+ - ModelFusion server error logging and reporting.
554
+
555
+ ### Fixed
556
+
557
+ - ModelFusion server creates directory for runs automatically when errors are thrown.
558
+
559
+ ## v0.57.0 - 2023-11-04
560
+
561
+ ### Added
562
+
563
+ - Support for [Cohere v3 embeddings](https://txt.cohere.com/introducing-embed-v3/).
564
+
565
+ ## v0.56.0 - 2023-11-04
566
+
567
+ ### Added
568
+
569
+ - [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text embeddings.
570
+
571
+ ## v0.55.1 - 2023-11-04
572
+
573
+ ### Fixed
574
+
575
+ - Llama.cpp embeddings are invoked sequentially to avoid rejection by the server.
576
+
577
+ ## v0.55.0 - 2023-11-04
578
+
579
+ ### Added
580
+
581
+ - [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text generation and text streaming.
582
+
583
+ ## v0.54.0 - 2023-10-29
584
+
585
+ Adding experimental ModelFusion server, flows, and browser utils.
586
+
587
+ ### Added
588
+
589
+ - ModelFusion server (separate export 'modelfusion/server') with a Fastify plugin for running ModelFusion flows on a server.
590
+ - ModelFusion flows.
591
+ - ModelFusion browser utils (separate export 'modelfusion/browser') for dealing with audio data and invoking ModelFusion flows on the server (`invokeFlow`).
592
+
593
+ ### Changed
594
+
595
+ - **breaking change**: `readEventSource` and `readEventSourceStream` are part of 'modelfusion/browser'.
596
+
597
+ ## v0.53.2 - 2023-10-26
598
+
599
+ ### Added
600
+
601
+ - Prompt callback option for `streamStructure`
602
+
603
+ ### Improved
604
+
605
+ - Inline JSDoc comments for the model functions.
606
+
607
+ ## v0.53.1 - 2023-10-25
608
+
609
+ ### Fixed
610
+
611
+ - Abort signals and errors during streaming are caught and forwarded correctly.
612
+
613
+ ## v0.53.0 - 2023-10-23
614
+
615
+ ### Added
616
+
617
+ - `executeFunction` utility function for tracing execution time, parameters, and result of composite functions and non-ModelFusion functions.
618
+
619
+ ## v0.52.0 - 2023-10-23
620
+
621
+ ### Changed
622
+
623
+ - Streaming results and `AsyncQueue` objects can be used by several consumers. Each consumer will receive all values. This means that you can e.g. forward the same text stream to speech generation and the client.
624
+
625
+ ## v0.51.0 - 2023-10-23
626
+
627
+ ElevenLabs improvements.
628
+
629
+ ### Added
630
+
631
+ - ElevenLabs model settings `outputFormat` and `optimizeStreamingLatency`.
632
+
633
+ ### Fixed
634
+
635
+ - Default ElevenLabs model is `eleven_monolingual_v1`.
636
+
637
+ ## v0.50.0 - 2023-10-22
638
+
639
+ ### Added
640
+
641
+ - `parentCallId` event property
642
+ - Tracing for `useTool`, `useToolOrGenerateText`, `upsertIntoVectorIndex`, and `guard`
643
+
644
+ ### Changed
645
+
646
+ - **breaking change**: rename `embedding` event type to `embed`
647
+ - **breaking change**: rename `image-generation` event type to `generate-image`
648
+ - **breaking change**: rename `speech-generation` event type to `generate-speech`
649
+ - **breaking change**: rename `speech-streaming` event type to `stream-speech`
650
+ - **breaking change**: rename `structure-generation` event type to `generate-structure`
651
+ - **breaking change**: rename `structure-or-text-generation` event type to `generate-structure-or-text`
652
+ - **breaking change**: rename `structure-streaming` event type to `stream-structure`
653
+ - **breaking change**: rename `text-generation` event type to `generate-text`
654
+ - **breaking change**: rename `text-streaming` event type to `stream-text`
655
+ - **breaking change**: rename `transcription` event type to `generate-transcription`
656
+
657
+ ## v0.49.0 - 2023-10-21
658
+
659
+ ### Added
660
+
661
+ - Speech synthesis streaming supports string inputs.
662
+ - Observability for speech synthesis streaming.
663
+
664
+ ### Changed
665
+
666
+ - **breaking change**: split `synthesizeSpeech` into `generateSpeech` and `streamSpeech` functions
667
+ - **breaking change**: renamed `speech-synthesis` event to `speech-generation`
668
+ - **breaking change**: renamed `transcribe` to `generateTranscription`
669
+ - **breaking change**: renamed `LmntSpeechSynthesisModel` to `LmntSpeechModel`
670
+ - **breaking change**: renamed `ElevenLabesSpeechSynthesisModel` to `ElevenLabsSpeechModel`
671
+ - **breaking change**: renamed `OpenAITextGenerationModel` to `OpenAICompletionModel`
672
+
673
+ ### Removed
674
+
675
+ - **breaking change**: `describeImage` model function. Use `generateText` instead (with e.g. `HuggingFaceImageDescriptionModel`).
676
+
677
+ ## v0.48.0 - 2023-10-20
678
+
679
+ ### Added
680
+
681
+ - Duplex streaming for speech synthesis.
682
+ - Elevenlabs duplex streaming support.
683
+
684
+ ### Changed
685
+
686
+ - Schema is using data in return type (breaking change for tools).
687
+
688
+ ## v0.47.0 - 2023-10-14
689
+
690
+ ### Added
691
+
692
+ - Prompt formats for image generation. You can use `.withPromptFormat()` or `.withBasicPrompt()` to apply a prompt format to an image generation model.
693
+
694
+ ### Changed
695
+
696
+ - **breaking change**: `generateImage` returns a Buffer with the binary image data instead of a base-64 encoded string. You can call `.asBase64Text()` on the response to get a base64 encoded string.
697
+
698
+ ## v0.46.0 - 2023-10-14
699
+
700
+ ### Added
701
+
702
+ - `.withChatPrompt()` and `.withInstructionPrompt()` shorthand methods.
703
+
704
+ ## v0.45.0 - 2023-10-14
705
+
706
+ ### Changed
707
+
708
+ - Updated Zod to 3.22.4. You need to use Zod 3.22.4 or higher in your project.
709
+
710
+ ## v0.44.0 - 2023-10-13
711
+
712
+ ### Added
713
+
714
+ - Store runs in AsyncLocalStorage for convienience (Node.js only).
715
+
716
+ ## v0.43.0 - 2023-10-12
717
+
718
+ ### Added
719
+
720
+ - Guard function.
721
+
722
+ ## v0.42.0 - 2023-10-11
723
+
724
+ ### Added
725
+
726
+ - Anthropic model support (Claude 2, Claude instant).
727
+
728
+ ## v0.41.0 - 2023-10-05
729
+
730
+ ### Changed
731
+
732
+ **breaking change**: generics simplification to enable dynamic model usage. Models can be used more easily as function parameters.
733
+
734
+ - `output` renamed to `value` in `asFullResponse()`
735
+ - model settings can no longer be configured as a model options parameter. Use `.withSettings()` instead.
736
+
737
+ ## v0.40.0 - 2023-10-04
738
+
739
+ ### Changed
740
+
741
+ **breaking change**: moved Pinecone integration into `@modelfusion/pinecone` module.
742
+
743
+ ## v0.39.0 - 2023-10-03
744
+
745
+ ### Added
746
+
747
+ - `readEventSource` for parsing a server-sent event stream using the JavaScript EventSource.
748
+
749
+ ### Changed
750
+
751
+ **breaking change**: generalization to use Schema instead of Zod.
752
+
753
+ - `MemoryVectorIndex.deserialize` requires a `Schema`, e.g. `new ZodSchema` (from ModelFusion).
754
+ - `readEventSourceStream` requires a `Schema`.
755
+ - `UncheckedJsonSchema[Schema/StructureDefinition]` renamed to `Unchecked[Schema/StructureDefinition]`.
756
+
757
+ ## v0.38.0 - 2023-10-02
758
+
759
+ ### Changed
760
+
761
+ **breaking change**: Generalized embeddings beyond text embedding.
762
+
763
+ - `embedText` renamed to `embed`.
764
+ - `embedTexts` renamed to `embedMany`
765
+ - Removed filtering from `VectorIndexRetriever` query (still available as a setting).
766
+
767
+ ## v0.37.0 - 2023-10-02
768
+
769
+ ### Added
770
+
771
+ - `VectorIndexRetriever` supports a filter option that is passed to the vector index.
772
+ - `MemoryVectorIndex` supports filter functions that are applied to the objects before calculating the embeddings.
773
+
774
+ ## v0.36.0 - 2023-10-02
775
+
776
+ ### Added
777
+
778
+ - `basic-text` logger logs function ids when available.
779
+ - `retrieve` produces events for logging and observability.
780
+
781
+ ## v0.35.2 - 2023-09-27
782
+
783
+ ### Fixed
784
+
785
+ - Support empty stop sequences when calling OpenAI text and chat models.
786
+
787
+ ## v0.35.1 - 2023-09-27
788
+
789
+ ### Fixed
790
+
791
+ - Fixed bugs in `streamStructure` partial JSON parsing.
792
+
793
+ ## v0.35.0 - 2023-09-26
794
+
795
+ ### Added
796
+
797
+ - `streamStructure` for streaming structured responses, e.g. from OpenAI function calls. Thanks [@bjsi](https://github.com/bjsi) for the input!
798
+
799
+ ## v0.34.0 - 2023-09-25
800
+
801
+ ### Added
802
+
803
+ - First version of event source utilities: `AsyncQueue`, `createEventSourceStream`, `readEventSourceStream`.
804
+
805
+ ## v0.33.1 - 2023-09-24
806
+
807
+ ### Fixed
808
+
809
+ - Remove resolution part from type definitions.
810
+
811
+ ## v0.33.0 - 2023-09-19
812
+
813
+ ### Changed
814
+
815
+ **breaking change**: Generalized vector store upsert/retrieve beyond text chunks:
816
+
817
+ - `upsertTextChunks` renamed to `upsertIntoVectorStore`. Syntax has changed.
818
+ - `retrieveTextChunks` renamed to `retrieve`
819
+ - `SimilarTextChunksFromVectorIndexRetriever` renamed to `VectorIndexRetriever`
820
+
821
+ ## v0.32.0 - 2023-09-19
822
+
823
+ ### Added
824
+
825
+ - OpenAI gpt-3.5-turbo-instruct model support.
826
+ - Autocomplete for Stability AI models (thanks [@Danielwinkelmann](https://github.com/Danielwinkelmann)!)
827
+
828
+ ### Changed
829
+
830
+ - Downgrade Zod version to 3.21.4 because of https://github.com/colinhacks/zod/issues/2697
831
+
832
+ ## v0.31.0 - 2023-09-13
833
+
834
+ ### Changed
835
+
836
+ - **breaking change**: Renamed chat format construction functions to follow the pattern `map[Chat|Instruction]PromptTo[FORMAT]Format()`, e.g. `mapInstructionPromptToAlpacaFormat()`, for easy auto-completion.
837
+
838
+ ### Removed
839
+
840
+ - **breaking change**: The prompts for `generateStructure` and `generateStructureOrText` have been simplified. You can remove the `OpenAIChatPrompt.forStructureCurried` (and similar) parts.
841
+
842
+ ## v0.30.0 - 2023-09-10
843
+
844
+ ### Added
845
+
846
+ - You can directly pass JSON schemas into `generateStructure` and `generateStructureOrText` calls without validation using `UncheckedJsonSchemaStructureDefinition`. This is useful when you need more flexility and don't require type inference. See `examples/basic/src/util/schema/generate-structure-unchecked-json-schema-example.ts`.
847
+
848
+ ### Changed
849
+
850
+ - **BREAKING CHANGE**: renamed `generateJson` and `generateJsonOrText` to `generateStructure` and `generateStructureOrText`.
851
+ - **BREAKING CHANGE**: introduced `ZodSchema` and `ZodStructureDefinition`. These are required for `generateStructure` and `generateStructureOrText` calls and in tools.
852
+ - **BREAKING CHANGE**: renamed the corresponding methods and objects.
853
+
854
+ Why this breaking change?
855
+
856
+ ModelFusion is currently tied to Zod, but there are many other type checking libraries out there, and Zod does not map perfectly to JSON Schema (which is used in OpenAI function calling).
857
+ Enabling you to use JSON Schema directly in ModelFusion is a first step towards decoupling ModelFusion from Zod.
858
+ You can also configure your own schema adapters that e.g. use Ajv or another library.
859
+ Since this change already affected all JSON generation calls and tools, I included other changes that I had planned in the same area (e.g., renaming to generateStructure and making it more consistent).
860
+
861
+ ## v0.29.0 - 2023-09-09
862
+
863
+ ### Added
864
+
865
+ - `describeImage` model function for image captioning and OCR. HuggingFace provider available.
866
+
867
+ ## v0.28.0 - 2023-09-09
868
+
869
+ ### Added
870
+
871
+ - BaseUrlApiConfiguration class for setting up API configurations with custom base URLs and headers.
872
+
873
+ ## v0.27.0 - 2023-09-07
874
+
875
+ ### Added
876
+
877
+ - Support for running OpenAI on Microsoft Azure.
878
+
879
+ ### Changed
880
+
881
+ - **Breaking change**: Introduce API configuration. This affects setting the baseUrl, throttling, and retries.
882
+ - Improved Helicone support via `HeliconeOpenAIApiConfiguration`.
883
+
884
+ ## v0.26.0 - 2023-09-06
885
+
886
+ ### Added
887
+
888
+ - LMNT speech synthesis support.
889
+
890
+ ## v0.25.0 - 2023-09-05
891
+
892
+ ### Changed
893
+
894
+ - Separated cost calculation from Run.
895
+
896
+ ## v0.24.1 - 2023-09-04
897
+
898
+ ### Added
899
+
900
+ - Exposed `logitBias` setting for OpenAI chat and text generation models.
901
+
902
+ ## v0.24.0 - 2023-09-02
903
+
904
+ ### Added
905
+
906
+ - Support for fine-tuned OpenAI models (for the `davinci-002`, `babbage-002`, and `gpt-3.5-turbo` base models).
907
+
908
+ ## v0.23.0 - 2023-08-31
909
+
910
+ ### Added
911
+
912
+ - Function logging support.
913
+ - Usage information for events.
914
+ - Filtering of model settings for events.
915
+
916
+ ## v0.22.0 - 2023-08-28
917
+
918
+ ### Changed
919
+
920
+ - **Breaking change**: Restructured the function call events.
921
+
922
+ ## v0.21.0 - 2023-08-26
923
+
924
+ ### Changed
925
+
926
+ - **Breaking change**: Reworked the function observer system. See [Function observers](https://modelfusion.dev/guide/util/observer) for details on how to use the new system.
927
+
928
+ ## v0.20.0 - 2023-08-24
929
+
930
+ ### Changed
931
+
932
+ - **Breaking change**: Use `.asFullResponse()` to get full responses from model functions (replaces the `fullResponse: true` option).
933
+
934
+ ## v0.19.0 - 2023-08-23
935
+
936
+ ### Added
937
+
938
+ - Support for "babbage-002" and "davinci-002" OpenAI base models.
939
+
940
+ ### Fixed
941
+
942
+ - Choose correct tokenizer for older OpenAI text models.
943
+
944
+ ## v0.18.0 - 2023-08-22
945
+
946
+ ### Added
947
+
948
+ - Support for ElevenLabs speech synthesis parameters.
949
+
950
+ ## v0.17.0 - 2023-08-21
951
+
952
+ ### Added
953
+
954
+ - `generateSpeech` function to generate speech from text.
955
+ - ElevenLabs support.
956
+
957
+ ## v0.15.0 - 2023-08-21
958
+
959
+ ### Changed
960
+
961
+ - Introduced unified `stopSequences` and `maxCompletionTokens` properties for all text generation models. **Breaking change**: `maxCompletionTokens` and `stopSequences` are part of the base TextGenerationModel. Specific names for these properties in models have been replaced by this, e.g. `maxTokens` in OpenAI models is `maxCompletionTokens`.
962
+
963
+ ## v0.14.0 - 2023-08-17
964
+
965
+ ### Changed
966
+
967
+ - **Breaking change**: Renamed prompt mappings (and related code) to prompt format.
968
+ - Improved type inference for WebSearchTool and executeTool.
969
+
970
+ ## v0.12.0 - 2023-08-15
971
+
972
+ ### Added
973
+
974
+ - JsonTextGenerationModel and InstructionWithSchemaPrompt to support generateJson on text generation models.
975
+
976
+ ## v0.11.0 - 2023-08-14
977
+
978
+ ### Changed
979
+
980
+ - WebSearchTool signature updated.
981
+
982
+ ## v0.10.0 - 2023-08-13
983
+
984
+ ### Added
985
+
986
+ - Convenience functions to create OpenAI chat messages from tool calls and results.
987
+
988
+ ## v0.9.0 - 2023-08-13
989
+
990
+ ### Added
991
+
992
+ - `WebSearchTool` definition to support the SerpAPI tool (separate package: `@modelfusion/serpapi-tools`)
993
+
994
+ ## v0.8.0 - 2023-08-12
995
+
996
+ ### Added
997
+
998
+ - `executeTool` function that directly executes a single tool and records execution metadata.
999
+
1000
+ ### Changed
1001
+
1002
+ - Reworked event system and introduced RunFunctionEvent.
1003
+
1004
+ ## v0.7.0 - 2023-08-10
1005
+
1006
+ ### Changed
1007
+
1008
+ - **Breaking change**: Model functions return a simple object by default to make the 95% use case easier. You can use the `fullResponse` option to get a richer response object that includes the original model response and metadata.
1009
+
1010
+ ## v0.6.0 - 2023-08-07
1011
+
1012
+ ### Added
1013
+
1014
+ - `splitTextChunk` function.
1015
+
1016
+ ### Changed
1017
+
1018
+ - **Breaking change**: Restructured text splitter functions.
1019
+
1020
+ ## v0.5.0 - 2023-08-07
1021
+
1022
+ ### Added
1023
+
1024
+ - `splitTextChunks` function.
1025
+ - Chat with PDF demo.
1026
+
1027
+ ### Changed
1028
+
1029
+ - **Breaking change**: Renamed VectorIndexSimilarTextChunkRetriever to SimilarTextChunksFromVectorIndexRetriever.
1030
+ - **Breaking change**: Renamed 'content' property in TextChunk to 'text.
1031
+
1032
+ ### Removed
1033
+
1034
+ - `VectorIndexTextChunkStore`
1035
+
1036
+ ## v0.4.1 - 2023-08-06
1037
+
1038
+ ### Fixed
1039
+
1040
+ - Type inference bug in `trimChatPrompt`.
1041
+
1042
+ ## v0.4.0 - 2023-08-06
1043
+
1044
+ ### Added
1045
+
1046
+ - HuggingFace text embedding support.
1047
+
1048
+ ## v0.3.0 - 2023-08-05
1049
+
1050
+ ### Added
1051
+
1052
+ - Helicone observability integration.
1053
+
1054
+ ## v0.2.0 - 2023-08-04
1055
+
1056
+ ### Added
1057
+
1058
+ - Instruction prompts can contain optional `input` property.
1059
+ - Alpaca instruction prompt mapping.
1060
+ - Vicuna chat prompt mapping.
1061
+
1062
+ ## v0.1.1 - 2023-08-02
1063
+
1064
+ ### Changed
1065
+
1066
+ - Docs updated to ModelFusion.
1067
+
1068
+ ## v0.1.0 - 2023-08-01
1069
+
1070
+ ### Changed
1071
+
1072
+ - **Breaking Change**: Renamed to `modelfusion` (from `ai-utils.js`).
1073
+
1074
+ ## v0.0.43 - 2023-08-01
1075
+
1076
+ ### Changed
1077
+
1078
+ - **Breaking Change**: model functions return rich objects that include the result, the model response and metadata. This enables you to access the original model response easily when you need it and also use the metadata outside of runs.
1079
+
1080
+ ## v0.0.42 - 2023-07-31
1081
+
1082
+ ### Added
1083
+
1084
+ - `trimChatPrompt()` function to fit chat prompts into the context window and leave enough space for the completion.
1085
+ - `maxCompletionTokens` property on TextGenerationModels.
1086
+
1087
+ ### Changed
1088
+
1089
+ - Renamed `withMaxTokens` to `withMaxCompletionTokens` on TextGenerationModels.
1090
+
1091
+ ### Removed
1092
+
1093
+ - `composeRecentMessagesOpenAIChatPrompt` function (use `trimChatPrompt` instead).
1094
+
1095
+ ## v0.0.41 - 2023-07-30
1096
+
1097
+ ### Added
1098
+
1099
+ - ChatPrompt concept (with chat prompt mappings for text, OpenAI chat, and Llama 2 prompts).
1100
+
1101
+ ### Changed
1102
+
1103
+ - Renamed prompt mappings and changed into functions.
1104
+
1105
+ ## v0.0.40 - 2023-07-30
1106
+
1107
+ ### Added
1108
+
1109
+ - Prompt mapping support for text generation and streaming.
1110
+ - Added instruction prompt concept and mapping.
1111
+ - Option to specify context window size for Llama.cpp text generation models.
1112
+
1113
+ ### Changed
1114
+
1115
+ - Renamed 'maxTokens' to 'contextWindowSize' where applicable.
1116
+ - Restructured how tokenizers are exposed by text generation models.
1117
+
1118
+ ## v0.0.39 - 2023-07-26
1119
+
1120
+ ### Added
1121
+
1122
+ - llama.cpp embedding support.
1123
+
1124
+ ## v0.0.38 - 2023-07-24
1125
+
1126
+ ### Changed
1127
+
1128
+ - `zod` and `zod-to-json-schema` are peer dependencies and no longer included in the package.
1129
+
1130
+ ## v0.0.37 - 2023-07-23
1131
+
1132
+ ### Changed
1133
+
1134
+ - `generateJsonOrText`, `useToolOrGenerateText`, `useTool` return additional information in the response (e.g. the parameters and additional text).
1135
+
1136
+ ## v0.0.36 - 2023-07-23
1137
+
1138
+ ### Changed
1139
+
1140
+ - Renamed `callTool` to `useTool` and `callToolOrGenerateText` to `useToolOrGenerateText`.
1141
+
1142
+ ## v0.0.35 - 2023-07-22
1143
+
1144
+ ### Added
1145
+
1146
+ - `generateJsonOrText`
1147
+ - Tools: `Tool` class, `callTool`, `callToolOrGenerateText`
1148
+
1149
+ ### Changed
1150
+
1151
+ - Restructured "generateJson" arguments.
1152
+
1153
+ ## v0.0.34 - 2023-07-18
1154
+
1155
+ ### Removed
1156
+
1157
+ - `asFunction` model function variants. Use JavaScript lamba functions instead.
1158
+
1159
+ ## v0.0.33 - 2023-07-18
1160
+
1161
+ ### Added
1162
+
1163
+ - OpenAIChatAutoFunctionPrompt to call the OpenAI functions API with multiple functions in 'auto' mode.
1164
+
1165
+ ## v0.0.32 - 2023-07-15
1166
+
1167
+ ### Changed
1168
+
1169
+ - Changed the prompt format of the generateJson function.
1170
+
1171
+ ## v0.0.31 - 2023-07-14
1172
+
1173
+ ### Changed
1174
+
1175
+ - Reworked interaction with vectors stores. Removed VectorDB, renamed VectorStore to VectorIndex, and introduced upsertTextChunks and retrieveTextChunks functions.
1176
+
1177
+ ## v0.0.30 - 2023-07-13
1178
+
1179
+ ### Fixed
1180
+
1181
+ - Bugs related to performance. not being available.
1182
+
1183
+ ## v0.0.29 - 2023-07-13
1184
+
1185
+ ### Added
1186
+
1187
+ - Llama.cpp tokenization support.
1188
+
1189
+ ### Changed
1190
+
1191
+ - Split Tokenizer API into BasicTokenizer and FullTokenizer.
1192
+ - Introduce countTokens function (replacing Tokenizer.countTokens).
1193
+
1194
+ ## v0.0.28 - 2023-07-12
1195
+
1196
+ ### Added
1197
+
1198
+ - Events for streamText.
1199
+
1200
+ ## v0.0.27 - 2023-07-11
1201
+
1202
+ ### Added
1203
+
1204
+ - TextDeltaEventSource for Client/Server streaming support.
1205
+
1206
+ ### Fixed
1207
+
1208
+ - End-of-stream bug in Llama.cpp text streaming.
1209
+
1210
+ ## v0.0.26 - 2023-07-11
1211
+
1212
+ ### Added
1213
+
1214
+ - Streaming support for Cohere text generation models.
1215
+
1216
+ ## v0.0.25 - 2023-07-10
1217
+
1218
+ ### Added
1219
+
1220
+ - Streaming support for OpenAI text completion models.
1221
+ - OpenAI function streaming support (in low-level API).
1222
+
1223
+ ## v0.0.24 - 2023-07-09
1224
+
1225
+ ### Added
1226
+
1227
+ - Generalized text streaming (async string iterable, useful for command line streaming).
1228
+ - Streaming support for Llama.cpp text generation.
1229
+
1230
+ ## v0.0.23 - 2023-07-08
1231
+
1232
+ ### Added
1233
+
1234
+ - Llama.cpp text generation support.
1235
+
1236
+ ## v0.0.22 - 2023-07-08
1237
+
1238
+ ### Changed
1239
+
1240
+ - Convert all main methods (e.g. `model.generateText(...)`) to a functional API (i.e., `generateText(model, ...)`).
1241
+
1242
+ ## v0.0.21 - 2023-07-07
1243
+
1244
+ ### New
1245
+
1246
+ - JSON generation model.
1247
+
1248
+ ## v0.0.20 - 2023-07-02
1249
+
1250
+ ### New
1251
+
1252
+ - Automatic1111 image generation provider.
1253
+
1254
+ ## v0.0.19 - 2023-06-30
1255
+
1256
+ ### New
1257
+
1258
+ - Cost calculation for OpenAI image generation and transcription models.
1259
+
1260
+ ## v0.0.18 - 2023-06-28
1261
+
1262
+ ### New
1263
+
1264
+ - Cost calculation for Open AI text generation, chat and embedding models.
1265
+
1266
+ ### Changed
1267
+
1268
+ - Renamed RunContext to Run. Introduced DefaultRun.
1269
+ - Changed events and observers.
1270
+
1271
+ ## v0.0.17 - 2023-06-14
1272
+
1273
+ ### New
1274
+
1275
+ 1. Updated OpenAI models.
1276
+ 1. Low-level support for OpenAI chat functions API (via `OpenAIChatModel.callApi`).
1277
+ 1. TranscriptionModel and OpenAITranscriptionModel (using `whisper`)
1278
+
1279
+ ### Changed
1280
+
1281
+ 1. Single optional parameter for functions/method that contains run, functionId, etc.
1282
+
1283
+ ## v0.0.16 - 2023-06-13
1284
+
1285
+ ### Fixed
1286
+
1287
+ 1. Retry is not attempted when you ran out of OpenAI credits.
1288
+ 1. Vercel edge function support (switched to nanoid for unique IDs).
1289
+
1290
+ ### Changed
1291
+
1292
+ 1. Improved OpenAI chat streaming API.
1293
+ 1. Changed `asFunction` variants from namespaced functions into stand-alone functions.
1294
+
1295
+ ## v0.0.15 - 2023-06-12
1296
+
1297
+ ### Changed
1298
+
1299
+ 1. Documentation update.
1300
+
1301
+ ## v0.0.14 - 2023-06-11
1302
+
1303
+ ### Changed
1304
+
1305
+ 1. Major rework of embedding APIs.
1306
+
1307
+ ## v0.0.13 - 2023-06-10
1308
+
1309
+ ### Changed
1310
+
1311
+ 1. Major rework of text and image generation APIs.
1312
+
1313
+ ## v0.0.12 - 2023-06-06
1314
+
1315
+ ## v0.0.11 - 2023-06-05
1316
+
1317
+ ### Changed
1318
+
1319
+ 1. Various renames.
1320
+
1321
+ ## v0.0.10 - 2023-06-04
1322
+
1323
+ ### New
1324
+
1325
+ 1. Pinecone VectorDB support
1326
+ 1. Cohere tokenization support
1327
+
1328
+ ## v0.0.9 - 2023-06-03
1329
+
1330
+ ### New
1331
+
1332
+ 1. OpenAI DALL-E image generation support
1333
+ 1. `generateImage` function
1334
+ 1. Throttling and retries on model level
1335
+
1336
+ ## v0.0.8 - 2023-06-02
1337
+
1338
+ ### New
1339
+
1340
+ 1. Stability AI image generation support
1341
+ 1. Image generation Next.js example
1342
+
1343
+ ### Changed
1344
+
1345
+ 1. Updated PDF to tweet example with style transfer
1346
+
1347
+ ## v0.0.7 - 2023-06-01
1348
+
1349
+ ### New
1350
+
1351
+ 1. Hugging Face text generation support
1352
+ 1. Memory vector DB
1353
+
1354
+ ## v0.0.6 - 2023-05-31
1355
+
1356
+ ### New
1357
+
1358
+ 1. Cohere embedding API support
1359
+
1360
+ ### Changes
1361
+
1362
+ 1. Restructured retry logic
1363
+ 1. `embed` embeds many texts at once
1364
+
1365
+ ## v0.0.5 - 2023-05-30
1366
+
1367
+ ### New
1368
+
1369
+ 1. Cohere text generation support
1370
+ 1. OpenAI chat streams can be returned as delta async iterables
1371
+ 1. Documentation of integration APIs and models
1372
+
1373
+ ## v0.0.4 - 2023-05-29
1374
+
1375
+ ### New
1376
+
1377
+ 1. OpenAI embedding support
1378
+ 1. Text embedding functions
1379
+ 1. Chat streams can be returned as ReadableStream or AsyncIterable
1380
+ 1. Basic examples under `examples/basic`
1381
+ 1. Initial documentation available at [modelfusion.dev](https://modelfusion.dev)
1382
+
1383
+ ## v0.0.3 - 2023-05-28
1384
+
1385
+ ### New
1386
+
1387
+ 1. Voice recording and transcription Next.js app example.
1388
+ 1. OpenAI transcription support (Whisper).
1389
+
1390
+ ## v0.0.2 - 2023-05-27
1391
+
1392
+ ### New
1393
+
1394
+ 1. BabyAGI Example in TypeScript
1395
+ 1. TikToken for OpenAI: We've added tiktoken to aid in tokenization and token counting, including those for message and prompt overhead tokens in chat.
1396
+ 1. Tokenization-based Recursive Splitter: A new splitter that operates recursively using tokenization.
1397
+ 1. Prompt Management Utility: An enhancement to fit recent chat messages into the context window.
1398
+
1399
+ ## v0.0.1 - 2023-05-26
1400
+
1401
+ ### New
1402
+
1403
+ 1. AI Chat Example using Next.js: An example demonstrating AI chat implementation using Next.js.
1404
+ 1. PDF to Twitter Thread Example: This shows how a PDF can be converted into a Twitter thread.
1405
+ 1. OpenAI Chat Completion Streaming Support: A feature providing real-time response capabilities using OpenAI's chat completion streaming.
1406
+ 1. OpenAI Chat and Text Completion Support: This addition enables the software to handle both chat and text completions from OpenAI.
1407
+ 1. Retry Management: A feature to enhance resilience by managing retry attempts for tasks.
1408
+ 1. Task Progress Reporting and Abort Signals: This allows users to track the progress of tasks and gives the ability to abort tasks when needed.
1409
+ 1. Recursive Character Splitter: A feature to split text into characters recursively for more detailed text analysis.
1410
+ 1. Recursive Text Mapping: This enables recursive mapping of text, beneficial for tasks like summarization or extraction.
1411
+ 1. Split-Map-Filter-Reduce for Text Processing: A process chain developed for sophisticated text handling, allowing operations to split, map, filter, and reduce text data.