modelfusion 0.133.0 → 0.135.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +10854 -24
- package/index.cjs.map +1 -0
- package/index.d.cts +9547 -0
- package/index.d.ts +9547 -9
- package/index.js +10630 -9
- package/index.js.map +1 -0
- package/internal/index.cjs +865 -28
- package/internal/index.cjs.map +1 -0
- package/internal/index.d.cts +675 -0
- package/internal/index.d.ts +675 -8
- package/internal/index.js +820 -7
- package/internal/index.js.map +1 -0
- package/package.json +7 -7
- package/CHANGELOG.md +0 -2272
- package/README.md +0 -697
- package/core/DefaultRun.cjs +0 -72
- package/core/DefaultRun.d.ts +0 -24
- package/core/DefaultRun.js +0 -68
- package/core/ExecuteFunctionEvent.cjs +0 -2
- package/core/ExecuteFunctionEvent.d.ts +0 -7
- package/core/ExecuteFunctionEvent.js +0 -1
- package/core/ExtensionFunctionEvent.cjs +0 -2
- package/core/ExtensionFunctionEvent.d.ts +0 -11
- package/core/ExtensionFunctionEvent.js +0 -1
- package/core/FunctionEvent.cjs +0 -2
- package/core/FunctionEvent.d.ts +0 -85
- package/core/FunctionEvent.js +0 -1
- package/core/FunctionEventSource.cjs +0 -32
- package/core/FunctionEventSource.d.ts +0 -12
- package/core/FunctionEventSource.js +0 -28
- package/core/FunctionObserver.cjs +0 -2
- package/core/FunctionObserver.d.ts +0 -7
- package/core/FunctionObserver.js +0 -1
- package/core/FunctionOptions.cjs +0 -2
- package/core/FunctionOptions.d.ts +0 -49
- package/core/FunctionOptions.js +0 -1
- package/core/LogFormat.cjs +0 -10
- package/core/LogFormat.d.ts +0 -9
- package/core/LogFormat.js +0 -9
- package/core/ModelFusionConfiguration.cjs +0 -21
- package/core/ModelFusionConfiguration.d.ts +0 -6
- package/core/ModelFusionConfiguration.js +0 -14
- package/core/Run.cjs +0 -2
- package/core/Run.d.ts +0 -31
- package/core/Run.js +0 -1
- package/core/Vector.cjs +0 -2
- package/core/Vector.d.ts +0 -5
- package/core/Vector.js +0 -1
- package/core/api/AbortError.cjs +0 -9
- package/core/api/AbortError.d.ts +0 -3
- package/core/api/AbortError.js +0 -5
- package/core/api/AbstractApiConfiguration.cjs +0 -37
- package/core/api/AbstractApiConfiguration.d.ts +0 -17
- package/core/api/AbstractApiConfiguration.js +0 -33
- package/core/api/ApiCallError.cjs +0 -73
- package/core/api/ApiCallError.d.ts +0 -30
- package/core/api/ApiCallError.js +0 -69
- package/core/api/ApiConfiguration.cjs +0 -2
- package/core/api/ApiConfiguration.d.ts +0 -41
- package/core/api/ApiConfiguration.js +0 -1
- package/core/api/ApiFacade.cjs +0 -20
- package/core/api/ApiFacade.d.ts +0 -4
- package/core/api/ApiFacade.js +0 -4
- package/core/api/BaseUrlApiConfiguration.cjs +0 -78
- package/core/api/BaseUrlApiConfiguration.d.ts +0 -37
- package/core/api/BaseUrlApiConfiguration.js +0 -73
- package/core/api/BaseUrlApiConfiguration.test.cjs +0 -11
- package/core/api/BaseUrlApiConfiguration.test.d.ts +0 -1
- package/core/api/BaseUrlApiConfiguration.test.js +0 -9
- package/core/api/CustomHeaderProvider.cjs +0 -2
- package/core/api/CustomHeaderProvider.d.ts +0 -2
- package/core/api/CustomHeaderProvider.js +0 -1
- package/core/api/LoadAPIKeyError.cjs +0 -16
- package/core/api/LoadAPIKeyError.d.ts +0 -9
- package/core/api/LoadAPIKeyError.js +0 -12
- package/core/api/RetryError.cjs +0 -42
- package/core/api/RetryError.d.ts +0 -18
- package/core/api/RetryError.js +0 -38
- package/core/api/RetryFunction.cjs +0 -2
- package/core/api/RetryFunction.d.ts +0 -1
- package/core/api/RetryFunction.js +0 -1
- package/core/api/ThrottleFunction.cjs +0 -2
- package/core/api/ThrottleFunction.d.ts +0 -1
- package/core/api/ThrottleFunction.js +0 -1
- package/core/api/callWithRetryAndThrottle.cjs +0 -7
- package/core/api/callWithRetryAndThrottle.d.ts +0 -7
- package/core/api/callWithRetryAndThrottle.js +0 -3
- package/core/api/index.cjs +0 -42
- package/core/api/index.d.ts +0 -13
- package/core/api/index.js +0 -13
- package/core/api/loadApiKey.cjs +0 -22
- package/core/api/loadApiKey.d.ts +0 -6
- package/core/api/loadApiKey.js +0 -18
- package/core/api/postToApi.cjs +0 -185
- package/core/api/postToApi.d.ts +0 -37
- package/core/api/postToApi.js +0 -175
- package/core/api/retryNever.cjs +0 -8
- package/core/api/retryNever.d.ts +0 -4
- package/core/api/retryNever.js +0 -4
- package/core/api/retryWithExponentialBackoff.cjs +0 -50
- package/core/api/retryWithExponentialBackoff.d.ts +0 -10
- package/core/api/retryWithExponentialBackoff.js +0 -46
- package/core/api/throttleMaxConcurrency.cjs +0 -65
- package/core/api/throttleMaxConcurrency.d.ts +0 -7
- package/core/api/throttleMaxConcurrency.js +0 -61
- package/core/api/throttleOff.cjs +0 -8
- package/core/api/throttleOff.d.ts +0 -5
- package/core/api/throttleOff.js +0 -4
- package/core/cache/Cache.cjs +0 -2
- package/core/cache/Cache.d.ts +0 -12
- package/core/cache/Cache.js +0 -1
- package/core/cache/MemoryCache.cjs +0 -23
- package/core/cache/MemoryCache.d.ts +0 -15
- package/core/cache/MemoryCache.js +0 -19
- package/core/cache/index.cjs +0 -18
- package/core/cache/index.d.ts +0 -2
- package/core/cache/index.js +0 -2
- package/core/executeFunction.cjs +0 -13
- package/core/executeFunction.d.ts +0 -2
- package/core/executeFunction.js +0 -9
- package/core/executeFunctionCall.cjs +0 -86
- package/core/executeFunctionCall.d.ts +0 -10
- package/core/executeFunctionCall.js +0 -82
- package/core/getFunctionCallLogger.cjs +0 -106
- package/core/getFunctionCallLogger.d.ts +0 -3
- package/core/getFunctionCallLogger.js +0 -102
- package/core/getRun.cjs +0 -57
- package/core/getRun.d.ts +0 -9
- package/core/getRun.js +0 -29
- package/core/index.cjs +0 -44
- package/core/index.d.ts +0 -15
- package/core/index.js +0 -15
- package/core/schema/JSONParseError.cjs +0 -37
- package/core/schema/JSONParseError.d.ts +0 -15
- package/core/schema/JSONParseError.js +0 -33
- package/core/schema/JsonSchemaProducer.cjs +0 -2
- package/core/schema/JsonSchemaProducer.d.ts +0 -9
- package/core/schema/JsonSchemaProducer.js +0 -1
- package/core/schema/Schema.cjs +0 -2
- package/core/schema/Schema.d.ts +0 -20
- package/core/schema/Schema.js +0 -1
- package/core/schema/TypeValidationError.cjs +0 -36
- package/core/schema/TypeValidationError.d.ts +0 -15
- package/core/schema/TypeValidationError.js +0 -32
- package/core/schema/UncheckedSchema.cjs +0 -30
- package/core/schema/UncheckedSchema.d.ts +0 -16
- package/core/schema/UncheckedSchema.js +0 -25
- package/core/schema/ZodSchema.cjs +0 -47
- package/core/schema/ZodSchema.d.ts +0 -27
- package/core/schema/ZodSchema.js +0 -42
- package/core/schema/index.cjs +0 -24
- package/core/schema/index.d.ts +0 -8
- package/core/schema/index.js +0 -8
- package/core/schema/parseJSON.cjs +0 -48
- package/core/schema/parseJSON.d.ts +0 -57
- package/core/schema/parseJSON.js +0 -40
- package/core/schema/validateTypes.cjs +0 -65
- package/core/schema/validateTypes.d.ts +0 -34
- package/core/schema/validateTypes.js +0 -60
- package/model-function/AbstractModel.cjs +0 -22
- package/model-function/AbstractModel.d.ts +0 -13
- package/model-function/AbstractModel.js +0 -18
- package/model-function/Delta.cjs +0 -2
- package/model-function/Delta.d.ts +0 -7
- package/model-function/Delta.js +0 -1
- package/model-function/Model.cjs +0 -2
- package/model-function/Model.d.ts +0 -31
- package/model-function/Model.js +0 -1
- package/model-function/ModelCallEvent.cjs +0 -2
- package/model-function/ModelCallEvent.d.ts +0 -57
- package/model-function/ModelCallEvent.js +0 -1
- package/model-function/ModelCallMetadata.cjs +0 -2
- package/model-function/ModelCallMetadata.d.ts +0 -13
- package/model-function/ModelCallMetadata.js +0 -1
- package/model-function/ModelInformation.cjs +0 -2
- package/model-function/ModelInformation.d.ts +0 -4
- package/model-function/ModelInformation.js +0 -1
- package/model-function/PromptTemplate.cjs +0 -2
- package/model-function/PromptTemplate.d.ts +0 -9
- package/model-function/PromptTemplate.js +0 -1
- package/model-function/classify/Classifier.cjs +0 -2
- package/model-function/classify/Classifier.d.ts +0 -10
- package/model-function/classify/Classifier.js +0 -1
- package/model-function/classify/ClassifyEvent.cjs +0 -2
- package/model-function/classify/ClassifyEvent.d.ts +0 -20
- package/model-function/classify/ClassifyEvent.js +0 -1
- package/model-function/classify/EmbeddingSimilarityClassifier.cjs +0 -97
- package/model-function/classify/EmbeddingSimilarityClassifier.d.ts +0 -40
- package/model-function/classify/EmbeddingSimilarityClassifier.js +0 -93
- package/model-function/classify/classify.cjs +0 -27
- package/model-function/classify/classify.d.ts +0 -17
- package/model-function/classify/classify.js +0 -23
- package/model-function/classify/index.cjs +0 -20
- package/model-function/classify/index.d.ts +0 -4
- package/model-function/classify/index.js +0 -4
- package/model-function/embed/EmbeddingEvent.cjs +0 -2
- package/model-function/embed/EmbeddingEvent.d.ts +0 -21
- package/model-function/embed/EmbeddingEvent.js +0 -1
- package/model-function/embed/EmbeddingModel.cjs +0 -2
- package/model-function/embed/EmbeddingModel.d.ts +0 -23
- package/model-function/embed/EmbeddingModel.js +0 -1
- package/model-function/embed/embed.cjs +0 -77
- package/model-function/embed/embed.d.ts +0 -67
- package/model-function/embed/embed.js +0 -72
- package/model-function/executeStandardCall.cjs +0 -111
- package/model-function/executeStandardCall.d.ts +0 -19
- package/model-function/executeStandardCall.js +0 -107
- package/model-function/executeStreamCall.cjs +0 -180
- package/model-function/executeStreamCall.d.ts +0 -20
- package/model-function/executeStreamCall.js +0 -176
- package/model-function/generate-image/ImageGenerationEvent.cjs +0 -2
- package/model-function/generate-image/ImageGenerationEvent.d.ts +0 -18
- package/model-function/generate-image/ImageGenerationEvent.js +0 -1
- package/model-function/generate-image/ImageGenerationModel.cjs +0 -2
- package/model-function/generate-image/ImageGenerationModel.d.ts +0 -22
- package/model-function/generate-image/ImageGenerationModel.js +0 -1
- package/model-function/generate-image/PromptTemplateImageGenerationModel.cjs +0 -44
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +0 -20
- package/model-function/generate-image/PromptTemplateImageGenerationModel.js +0 -40
- package/model-function/generate-image/generateImage.cjs +0 -33
- package/model-function/generate-image/generateImage.d.ts +0 -43
- package/model-function/generate-image/generateImage.js +0 -29
- package/model-function/generate-object/ObjectFromTextGenerationModel.cjs +0 -69
- package/model-function/generate-object/ObjectFromTextGenerationModel.d.ts +0 -24
- package/model-function/generate-object/ObjectFromTextGenerationModel.js +0 -65
- package/model-function/generate-object/ObjectFromTextPromptTemplate.cjs +0 -2
- package/model-function/generate-object/ObjectFromTextPromptTemplate.d.ts +0 -30
- package/model-function/generate-object/ObjectFromTextPromptTemplate.js +0 -1
- package/model-function/generate-object/ObjectFromTextStreamingModel.cjs +0 -48
- package/model-function/generate-object/ObjectFromTextStreamingModel.d.ts +0 -19
- package/model-function/generate-object/ObjectFromTextStreamingModel.js +0 -44
- package/model-function/generate-object/ObjectGenerationEvent.cjs +0 -2
- package/model-function/generate-object/ObjectGenerationEvent.d.ts +0 -23
- package/model-function/generate-object/ObjectGenerationEvent.js +0 -1
- package/model-function/generate-object/ObjectGenerationModel.cjs +0 -2
- package/model-function/generate-object/ObjectGenerationModel.d.ts +0 -24
- package/model-function/generate-object/ObjectGenerationModel.js +0 -1
- package/model-function/generate-object/ObjectParseError.cjs +0 -36
- package/model-function/generate-object/ObjectParseError.d.ts +0 -15
- package/model-function/generate-object/ObjectParseError.js +0 -32
- package/model-function/generate-object/ObjectStream.cjs +0 -57
- package/model-function/generate-object/ObjectStream.d.ts +0 -32
- package/model-function/generate-object/ObjectStream.js +0 -52
- package/model-function/generate-object/ObjectStreamingEvent.cjs +0 -2
- package/model-function/generate-object/ObjectStreamingEvent.d.ts +0 -7
- package/model-function/generate-object/ObjectStreamingEvent.js +0 -1
- package/model-function/generate-object/ObjectValidationError.cjs +0 -44
- package/model-function/generate-object/ObjectValidationError.d.ts +0 -18
- package/model-function/generate-object/ObjectValidationError.js +0 -40
- package/model-function/generate-object/generateObject.cjs +0 -45
- package/model-function/generate-object/generateObject.d.ts +0 -56
- package/model-function/generate-object/generateObject.js +0 -41
- package/model-function/generate-object/index.cjs +0 -28
- package/model-function/generate-object/index.d.ts +0 -12
- package/model-function/generate-object/index.js +0 -12
- package/model-function/generate-object/jsonObjectPrompt.cjs +0 -51
- package/model-function/generate-object/jsonObjectPrompt.d.ts +0 -15
- package/model-function/generate-object/jsonObjectPrompt.js +0 -48
- package/model-function/generate-object/streamObject.cjs +0 -80
- package/model-function/generate-object/streamObject.d.ts +0 -57
- package/model-function/generate-object/streamObject.js +0 -76
- package/model-function/generate-speech/SpeechGenerationEvent.cjs +0 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +0 -26
- package/model-function/generate-speech/SpeechGenerationEvent.js +0 -1
- package/model-function/generate-speech/SpeechGenerationModel.cjs +0 -2
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +0 -14
- package/model-function/generate-speech/SpeechGenerationModel.js +0 -1
- package/model-function/generate-speech/generateSpeech.cjs +0 -27
- package/model-function/generate-speech/generateSpeech.d.ts +0 -34
- package/model-function/generate-speech/generateSpeech.js +0 -23
- package/model-function/generate-speech/index.cjs +0 -20
- package/model-function/generate-speech/index.d.ts +0 -4
- package/model-function/generate-speech/index.js +0 -4
- package/model-function/generate-speech/streamSpeech.cjs +0 -33
- package/model-function/generate-speech/streamSpeech.d.ts +0 -40
- package/model-function/generate-speech/streamSpeech.js +0 -29
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -24
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -40
- package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -20
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -84
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -47
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -80
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -36
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -19
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -32
- package/model-function/generate-text/TextGenerationEvent.cjs +0 -2
- package/model-function/generate-text/TextGenerationEvent.d.ts +0 -29
- package/model-function/generate-text/TextGenerationEvent.js +0 -1
- package/model-function/generate-text/TextGenerationModel.cjs +0 -9
- package/model-function/generate-text/TextGenerationModel.d.ts +0 -126
- package/model-function/generate-text/TextGenerationModel.js +0 -6
- package/model-function/generate-text/TextGenerationPromptTemplate.cjs +0 -2
- package/model-function/generate-text/TextGenerationPromptTemplate.d.ts +0 -11
- package/model-function/generate-text/TextGenerationPromptTemplate.js +0 -1
- package/model-function/generate-text/TextGenerationResult.cjs +0 -2
- package/model-function/generate-text/TextGenerationResult.d.ts +0 -11
- package/model-function/generate-text/TextGenerationResult.js +0 -1
- package/model-function/generate-text/generateText.cjs +0 -82
- package/model-function/generate-text/generateText.d.ts +0 -41
- package/model-function/generate-text/generateText.js +0 -78
- package/model-function/generate-text/index.cjs +0 -26
- package/model-function/generate-text/index.d.ts +0 -10
- package/model-function/generate-text/index.js +0 -10
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +0 -90
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +0 -51
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +0 -84
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.cjs +0 -31
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.js +0 -29
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +0 -96
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +0 -35
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +0 -90
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -44
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +0 -58
- package/model-function/generate-text/prompt-template/ChatPrompt.js +0 -41
- package/model-function/generate-text/prompt-template/ContentPart.cjs +0 -11
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +0 -31
- package/model-function/generate-text/prompt-template/ContentPart.js +0 -7
- package/model-function/generate-text/prompt-template/InstructionPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +0 -32
- package/model-function/generate-text/prompt-template/InstructionPrompt.js +0 -1
- package/model-function/generate-text/prompt-template/InvalidPromptError.cjs +0 -28
- package/model-function/generate-text/prompt-template/InvalidPromptError.d.ts +0 -13
- package/model-function/generate-text/prompt-template/InvalidPromptError.js +0 -24
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +0 -135
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +0 -55
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +0 -128
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +0 -150
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +0 -62
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +0 -143
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +0 -86
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +0 -23
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +0 -80
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +0 -2
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +0 -8
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +0 -1
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.cjs +0 -78
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.d.ts +0 -35
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.js +0 -72
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +0 -69
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +0 -23
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +0 -63
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +0 -86
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +0 -25
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +0 -80
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +0 -60
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.d.ts +0 -1
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +0 -58
- package/model-function/generate-text/prompt-template/index.cjs +0 -43
- package/model-function/generate-text/prompt-template/index.d.ts +0 -14
- package/model-function/generate-text/prompt-template/index.js +0 -14
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -46
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +0 -17
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -42
- package/model-function/generate-text/streamText.cjs +0 -54
- package/model-function/generate-text/streamText.d.ts +0 -42
- package/model-function/generate-text/streamText.js +0 -50
- package/model-function/generate-transcription/TranscriptionEvent.cjs +0 -2
- package/model-function/generate-transcription/TranscriptionEvent.d.ts +0 -18
- package/model-function/generate-transcription/TranscriptionEvent.js +0 -1
- package/model-function/generate-transcription/TranscriptionModel.cjs +0 -2
- package/model-function/generate-transcription/TranscriptionModel.d.ts +0 -14
- package/model-function/generate-transcription/TranscriptionModel.js +0 -1
- package/model-function/generate-transcription/generateTranscription.cjs +0 -22
- package/model-function/generate-transcription/generateTranscription.d.ts +0 -41
- package/model-function/generate-transcription/generateTranscription.js +0 -18
- package/model-function/index.cjs +0 -38
- package/model-function/index.d.ts +0 -22
- package/model-function/index.js +0 -22
- package/model-function/tokenize-text/Tokenizer.cjs +0 -2
- package/model-function/tokenize-text/Tokenizer.d.ts +0 -43
- package/model-function/tokenize-text/Tokenizer.js +0 -1
- package/model-function/tokenize-text/countTokens.cjs +0 -10
- package/model-function/tokenize-text/countTokens.d.ts +0 -5
- package/model-function/tokenize-text/countTokens.js +0 -6
- package/model-provider/automatic1111/Automatic1111ApiConfiguration.cjs +0 -22
- package/model-provider/automatic1111/Automatic1111ApiConfiguration.d.ts +0 -8
- package/model-provider/automatic1111/Automatic1111ApiConfiguration.js +0 -18
- package/model-provider/automatic1111/Automatic1111Error.cjs +0 -16
- package/model-provider/automatic1111/Automatic1111Error.d.ts +0 -22
- package/model-provider/automatic1111/Automatic1111Error.js +0 -13
- package/model-provider/automatic1111/Automatic1111Facade.cjs +0 -24
- package/model-provider/automatic1111/Automatic1111Facade.d.ts +0 -16
- package/model-provider/automatic1111/Automatic1111Facade.js +0 -19
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +0 -101
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +0 -68
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +0 -97
- package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.cjs +0 -12
- package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +0 -9
- package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.js +0 -8
- package/model-provider/automatic1111/index.cjs +0 -33
- package/model-provider/automatic1111/index.d.ts +0 -5
- package/model-provider/automatic1111/index.js +0 -4
- package/model-provider/cohere/CohereApiConfiguration.cjs +0 -30
- package/model-provider/cohere/CohereApiConfiguration.d.ts +0 -10
- package/model-provider/cohere/CohereApiConfiguration.js +0 -26
- package/model-provider/cohere/CohereError.cjs +0 -13
- package/model-provider/cohere/CohereError.d.ts +0 -11
- package/model-provider/cohere/CohereError.js +0 -10
- package/model-provider/cohere/CohereFacade.cjs +0 -80
- package/model-provider/cohere/CohereFacade.d.ts +0 -68
- package/model-provider/cohere/CohereFacade.js +0 -73
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +0 -170
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +0 -131
- package/model-provider/cohere/CohereTextEmbeddingModel.js +0 -166
- package/model-provider/cohere/CohereTextGenerationModel.cjs +0 -244
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +0 -379
- package/model-provider/cohere/CohereTextGenerationModel.js +0 -240
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +0 -36
- package/model-provider/cohere/CohereTextGenerationModel.test.d.ts +0 -1
- package/model-provider/cohere/CohereTextGenerationModel.test.js +0 -34
- package/model-provider/cohere/CohereTokenizer.cjs +0 -116
- package/model-provider/cohere/CohereTokenizer.d.ts +0 -113
- package/model-provider/cohere/CohereTokenizer.js +0 -112
- package/model-provider/cohere/index.cjs +0 -34
- package/model-provider/cohere/index.d.ts +0 -6
- package/model-provider/cohere/index.js +0 -5
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +0 -33
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.d.ts +0 -11
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +0 -29
- package/model-provider/elevenlabs/ElevenLabsFacade.cjs +0 -27
- package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +0 -21
- package/model-provider/elevenlabs/ElevenLabsFacade.js +0 -22
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +0 -218
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +0 -43
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +0 -214
- package/model-provider/elevenlabs/index.cjs +0 -32
- package/model-provider/elevenlabs/index.d.ts +0 -3
- package/model-provider/elevenlabs/index.js +0 -3
- package/model-provider/huggingface/HuggingFaceApiConfiguration.cjs +0 -30
- package/model-provider/huggingface/HuggingFaceApiConfiguration.d.ts +0 -10
- package/model-provider/huggingface/HuggingFaceApiConfiguration.js +0 -26
- package/model-provider/huggingface/HuggingFaceError.cjs +0 -13
- package/model-provider/huggingface/HuggingFaceError.d.ts +0 -11
- package/model-provider/huggingface/HuggingFaceError.js +0 -10
- package/model-provider/huggingface/HuggingFaceFacade.cjs +0 -64
- package/model-provider/huggingface/HuggingFaceFacade.d.ts +0 -55
- package/model-provider/huggingface/HuggingFaceFacade.js +0 -58
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +0 -131
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +0 -56
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +0 -127
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +0 -144
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +0 -84
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +0 -140
- package/model-provider/huggingface/index.cjs +0 -33
- package/model-provider/huggingface/index.d.ts +0 -5
- package/model-provider/huggingface/index.js +0 -4
- package/model-provider/index.cjs +0 -28
- package/model-provider/index.d.ts +0 -12
- package/model-provider/index.js +0 -12
- package/model-provider/llamacpp/LlamaCppApiConfiguration.cjs +0 -22
- package/model-provider/llamacpp/LlamaCppApiConfiguration.d.ts +0 -8
- package/model-provider/llamacpp/LlamaCppApiConfiguration.js +0 -18
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +0 -119
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +0 -15
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +0 -113
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +0 -326
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +0 -957
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +0 -322
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +0 -40
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +0 -1
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +0 -38
- package/model-provider/llamacpp/LlamaCppError.cjs +0 -13
- package/model-provider/llamacpp/LlamaCppError.d.ts +0 -11
- package/model-provider/llamacpp/LlamaCppError.js +0 -10
- package/model-provider/llamacpp/LlamaCppFacade.cjs +0 -55
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +0 -19
- package/model-provider/llamacpp/LlamaCppFacade.js +0 -25
- package/model-provider/llamacpp/LlamaCppGrammars.cjs +0 -86
- package/model-provider/llamacpp/LlamaCppGrammars.d.ts +0 -19
- package/model-provider/llamacpp/LlamaCppGrammars.js +0 -82
- package/model-provider/llamacpp/LlamaCppPrompt.cjs +0 -93
- package/model-provider/llamacpp/LlamaCppPrompt.d.ts +0 -47
- package/model-provider/llamacpp/LlamaCppPrompt.js +0 -65
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +0 -96
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +0 -39
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +0 -92
- package/model-provider/llamacpp/LlamaCppTokenizer.cjs +0 -64
- package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +0 -32
- package/model-provider/llamacpp/LlamaCppTokenizer.js +0 -60
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.cjs +0 -113
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.d.ts +0 -7
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.js +0 -109
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.cjs +0 -150
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.d.ts +0 -1
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.js +0 -148
- package/model-provider/llamacpp/index.cjs +0 -34
- package/model-provider/llamacpp/index.d.ts +0 -6
- package/model-provider/llamacpp/index.js +0 -5
- package/model-provider/lmnt/LmntApiConfiguration.cjs +0 -30
- package/model-provider/lmnt/LmntApiConfiguration.d.ts +0 -10
- package/model-provider/lmnt/LmntApiConfiguration.js +0 -26
- package/model-provider/lmnt/LmntFacade.cjs +0 -24
- package/model-provider/lmnt/LmntFacade.d.ts +0 -18
- package/model-provider/lmnt/LmntFacade.js +0 -19
- package/model-provider/lmnt/LmntSpeechModel.cjs +0 -103
- package/model-provider/lmnt/LmntSpeechModel.d.ts +0 -73
- package/model-provider/lmnt/LmntSpeechModel.js +0 -99
- package/model-provider/lmnt/index.cjs +0 -32
- package/model-provider/lmnt/index.d.ts +0 -3
- package/model-provider/lmnt/index.js +0 -3
- package/model-provider/mistral/MistralApiConfiguration.cjs +0 -30
- package/model-provider/mistral/MistralApiConfiguration.d.ts +0 -10
- package/model-provider/mistral/MistralApiConfiguration.js +0 -26
- package/model-provider/mistral/MistralChatModel.cjs +0 -203
- package/model-provider/mistral/MistralChatModel.d.ts +0 -357
- package/model-provider/mistral/MistralChatModel.js +0 -199
- package/model-provider/mistral/MistralChatModel.test.cjs +0 -58
- package/model-provider/mistral/MistralChatModel.test.d.ts +0 -1
- package/model-provider/mistral/MistralChatModel.test.js +0 -56
- package/model-provider/mistral/MistralChatPromptTemplate.cjs +0 -72
- package/model-provider/mistral/MistralChatPromptTemplate.d.ts +0 -16
- package/model-provider/mistral/MistralChatPromptTemplate.js +0 -66
- package/model-provider/mistral/MistralError.cjs +0 -17
- package/model-provider/mistral/MistralError.d.ts +0 -25
- package/model-provider/mistral/MistralError.js +0 -14
- package/model-provider/mistral/MistralFacade.cjs +0 -22
- package/model-provider/mistral/MistralFacade.d.ts +0 -14
- package/model-provider/mistral/MistralFacade.js +0 -16
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +0 -106
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +0 -106
- package/model-provider/mistral/MistralTextEmbeddingModel.js +0 -102
- package/model-provider/mistral/index.cjs +0 -33
- package/model-provider/mistral/index.d.ts +0 -5
- package/model-provider/mistral/index.js +0 -4
- package/model-provider/ollama/OllamaApiConfiguration.cjs +0 -22
- package/model-provider/ollama/OllamaApiConfiguration.d.ts +0 -8
- package/model-provider/ollama/OllamaApiConfiguration.js +0 -18
- package/model-provider/ollama/OllamaChatModel.cjs +0 -290
- package/model-provider/ollama/OllamaChatModel.d.ts +0 -285
- package/model-provider/ollama/OllamaChatModel.js +0 -286
- package/model-provider/ollama/OllamaChatModel.test.cjs +0 -32
- package/model-provider/ollama/OllamaChatModel.test.d.ts +0 -1
- package/model-provider/ollama/OllamaChatModel.test.js +0 -30
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +0 -103
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +0 -20
- package/model-provider/ollama/OllamaChatPromptTemplate.js +0 -96
- package/model-provider/ollama/OllamaCompletionModel.cjs +0 -296
- package/model-provider/ollama/OllamaCompletionModel.d.ts +0 -302
- package/model-provider/ollama/OllamaCompletionModel.js +0 -292
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +0 -136
- package/model-provider/ollama/OllamaCompletionModel.test.d.ts +0 -1
- package/model-provider/ollama/OllamaCompletionModel.test.js +0 -134
- package/model-provider/ollama/OllamaCompletionPrompt.cjs +0 -91
- package/model-provider/ollama/OllamaCompletionPrompt.d.ts +0 -45
- package/model-provider/ollama/OllamaCompletionPrompt.js +0 -63
- package/model-provider/ollama/OllamaError.cjs +0 -13
- package/model-provider/ollama/OllamaError.d.ts +0 -13
- package/model-provider/ollama/OllamaError.js +0 -10
- package/model-provider/ollama/OllamaFacade.cjs +0 -51
- package/model-provider/ollama/OllamaFacade.d.ts +0 -15
- package/model-provider/ollama/OllamaFacade.js +0 -21
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +0 -82
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -37
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +0 -78
- package/model-provider/ollama/OllamaTextGenerationSettings.cjs +0 -2
- package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +0 -87
- package/model-provider/ollama/OllamaTextGenerationSettings.js +0 -1
- package/model-provider/ollama/index.cjs +0 -35
- package/model-provider/ollama/index.d.ts +0 -7
- package/model-provider/ollama/index.js +0 -6
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +0 -302
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +0 -805
- package/model-provider/openai/AbstractOpenAIChatModel.js +0 -298
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +0 -180
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +0 -230
- package/model-provider/openai/AbstractOpenAICompletionModel.js +0 -176
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.cjs +0 -83
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.d.ts +0 -92
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.js +0 -79
- package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +0 -58
- package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +0 -27
- package/model-provider/openai/AzureOpenAIApiConfiguration.js +0 -54
- package/model-provider/openai/OpenAIApiConfiguration.cjs +0 -30
- package/model-provider/openai/OpenAIApiConfiguration.d.ts +0 -10
- package/model-provider/openai/OpenAIApiConfiguration.js +0 -26
- package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.cjs +0 -169
- package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.d.ts +0 -201
- package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.js +0 -162
- package/model-provider/openai/OpenAIChatMessage.cjs +0 -79
- package/model-provider/openai/OpenAIChatMessage.d.ts +0 -84
- package/model-provider/openai/OpenAIChatMessage.js +0 -76
- package/model-provider/openai/OpenAIChatModel.cjs +0 -274
- package/model-provider/openai/OpenAIChatModel.d.ts +0 -166
- package/model-provider/openai/OpenAIChatModel.js +0 -267
- package/model-provider/openai/OpenAIChatModel.test.cjs +0 -101
- package/model-provider/openai/OpenAIChatModel.test.d.ts +0 -1
- package/model-provider/openai/OpenAIChatModel.test.js +0 -99
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +0 -114
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/OpenAIChatPromptTemplate.js +0 -107
- package/model-provider/openai/OpenAICompletionModel.cjs +0 -126
- package/model-provider/openai/OpenAICompletionModel.d.ts +0 -65
- package/model-provider/openai/OpenAICompletionModel.js +0 -119
- package/model-provider/openai/OpenAICompletionModel.test.cjs +0 -59
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +0 -1
- package/model-provider/openai/OpenAICompletionModel.test.js +0 -57
- package/model-provider/openai/OpenAIError.cjs +0 -22
- package/model-provider/openai/OpenAIError.d.ts +0 -36
- package/model-provider/openai/OpenAIError.js +0 -19
- package/model-provider/openai/OpenAIFacade.cjs +0 -173
- package/model-provider/openai/OpenAIFacade.d.ts +0 -146
- package/model-provider/openai/OpenAIFacade.js +0 -160
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +0 -170
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +0 -132
- package/model-provider/openai/OpenAIImageGenerationModel.js +0 -165
- package/model-provider/openai/OpenAISpeechModel.cjs +0 -93
- package/model-provider/openai/OpenAISpeechModel.d.ts +0 -51
- package/model-provider/openai/OpenAISpeechModel.js +0 -88
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +0 -97
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +0 -54
- package/model-provider/openai/OpenAITextEmbeddingModel.js +0 -91
- package/model-provider/openai/OpenAITranscriptionModel.cjs +0 -171
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +0 -232
- package/model-provider/openai/OpenAITranscriptionModel.js +0 -166
- package/model-provider/openai/TikTokenTokenizer.cjs +0 -85
- package/model-provider/openai/TikTokenTokenizer.d.ts +0 -35
- package/model-provider/openai/TikTokenTokenizer.js +0 -78
- package/model-provider/openai/countOpenAIChatMessageTokens.cjs +0 -47
- package/model-provider/openai/countOpenAIChatMessageTokens.d.ts +0 -20
- package/model-provider/openai/countOpenAIChatMessageTokens.js +0 -42
- package/model-provider/openai/index.cjs +0 -44
- package/model-provider/openai/index.d.ts +0 -16
- package/model-provider/openai/index.js +0 -15
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +0 -39
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +0 -15
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +0 -35
- package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.cjs +0 -2
- package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.d.ts +0 -5
- package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.js +0 -1
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -98
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +0 -37
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -94
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +0 -84
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +0 -34
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +0 -80
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +0 -118
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +0 -104
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +0 -109
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +0 -27
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +0 -17
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +0 -23
- package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +0 -39
- package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +0 -15
- package/model-provider/openai-compatible/PerplexityApiConfiguration.js +0 -35
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +0 -39
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +0 -15
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +0 -35
- package/model-provider/openai-compatible/index.cjs +0 -37
- package/model-provider/openai-compatible/index.d.ts +0 -8
- package/model-provider/openai-compatible/index.js +0 -8
- package/model-provider/stability/StabilityApiConfiguration.cjs +0 -30
- package/model-provider/stability/StabilityApiConfiguration.d.ts +0 -10
- package/model-provider/stability/StabilityApiConfiguration.js +0 -26
- package/model-provider/stability/StabilityError.cjs +0 -13
- package/model-provider/stability/StabilityError.d.ts +0 -13
- package/model-provider/stability/StabilityError.js +0 -10
- package/model-provider/stability/StabilityFacade.cjs +0 -40
- package/model-provider/stability/StabilityFacade.d.ts +0 -34
- package/model-provider/stability/StabilityFacade.js +0 -35
- package/model-provider/stability/StabilityImageGenerationModel.cjs +0 -123
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +0 -111
- package/model-provider/stability/StabilityImageGenerationModel.js +0 -119
- package/model-provider/stability/StabilityImageGenerationPrompt.cjs +0 -12
- package/model-provider/stability/StabilityImageGenerationPrompt.d.ts +0 -9
- package/model-provider/stability/StabilityImageGenerationPrompt.js +0 -8
- package/model-provider/stability/index.cjs +0 -33
- package/model-provider/stability/index.d.ts +0 -5
- package/model-provider/stability/index.js +0 -4
- package/model-provider/whispercpp/WhisperCppApiConfiguration.cjs +0 -22
- package/model-provider/whispercpp/WhisperCppApiConfiguration.d.ts +0 -8
- package/model-provider/whispercpp/WhisperCppApiConfiguration.js +0 -18
- package/model-provider/whispercpp/WhisperCppFacade.cjs +0 -17
- package/model-provider/whispercpp/WhisperCppFacade.d.ts +0 -9
- package/model-provider/whispercpp/WhisperCppFacade.js +0 -12
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +0 -125
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +0 -31
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +0 -121
- package/model-provider/whispercpp/index.cjs +0 -32
- package/model-provider/whispercpp/index.d.ts +0 -3
- package/model-provider/whispercpp/index.js +0 -3
- package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +0 -30
- package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +0 -14
- package/observability/helicone/HeliconeOpenAIApiConfiguration.js +0 -26
- package/observability/index.cjs +0 -17
- package/observability/index.d.ts +0 -1
- package/observability/index.js +0 -1
- package/retriever/RetrieveEvent.cjs +0 -2
- package/retriever/RetrieveEvent.d.ts +0 -10
- package/retriever/RetrieveEvent.js +0 -1
- package/retriever/Retriever.cjs +0 -2
- package/retriever/Retriever.d.ts +0 -4
- package/retriever/Retriever.js +0 -1
- package/retriever/index.cjs +0 -18
- package/retriever/index.d.ts +0 -2
- package/retriever/index.js +0 -2
- package/retriever/retrieve.cjs +0 -15
- package/retriever/retrieve.d.ts +0 -3
- package/retriever/retrieve.js +0 -11
- package/test/JsonTestServer.cjs +0 -33
- package/test/JsonTestServer.d.ts +0 -7
- package/test/JsonTestServer.js +0 -29
- package/test/StreamingTestServer.cjs +0 -55
- package/test/StreamingTestServer.d.ts +0 -7
- package/test/StreamingTestServer.js +0 -51
- package/test/arrayFromAsync.cjs +0 -13
- package/test/arrayFromAsync.d.ts +0 -1
- package/test/arrayFromAsync.js +0 -9
- package/text-chunk/TextChunk.cjs +0 -2
- package/text-chunk/TextChunk.d.ts +0 -3
- package/text-chunk/TextChunk.js +0 -1
- package/text-chunk/index.cjs +0 -21
- package/text-chunk/index.d.ts +0 -5
- package/text-chunk/index.js +0 -5
- package/text-chunk/split/SplitFunction.cjs +0 -2
- package/text-chunk/split/SplitFunction.d.ts +0 -4
- package/text-chunk/split/SplitFunction.js +0 -1
- package/text-chunk/split/splitOnSeparator.cjs +0 -10
- package/text-chunk/split/splitOnSeparator.d.ts +0 -7
- package/text-chunk/split/splitOnSeparator.js +0 -6
- package/text-chunk/split/splitRecursively.cjs +0 -41
- package/text-chunk/split/splitRecursively.d.ts +0 -18
- package/text-chunk/split/splitRecursively.js +0 -36
- package/text-chunk/split/splitTextChunks.cjs +0 -16
- package/text-chunk/split/splitTextChunks.d.ts +0 -4
- package/text-chunk/split/splitTextChunks.js +0 -11
- package/tool/NoSuchToolDefinitionError.cjs +0 -41
- package/tool/NoSuchToolDefinitionError.d.ts +0 -17
- package/tool/NoSuchToolDefinitionError.js +0 -37
- package/tool/Tool.cjs +0 -64
- package/tool/Tool.d.ts +0 -39
- package/tool/Tool.js +0 -60
- package/tool/ToolCall.cjs +0 -2
- package/tool/ToolCall.d.ts +0 -15
- package/tool/ToolCall.js +0 -1
- package/tool/ToolCallArgumentsValidationError.cjs +0 -49
- package/tool/ToolCallArgumentsValidationError.d.ts +0 -23
- package/tool/ToolCallArgumentsValidationError.js +0 -45
- package/tool/ToolCallError.cjs +0 -34
- package/tool/ToolCallError.d.ts +0 -17
- package/tool/ToolCallError.js +0 -30
- package/tool/ToolCallGenerationError.cjs +0 -35
- package/tool/ToolCallGenerationError.d.ts +0 -15
- package/tool/ToolCallGenerationError.js +0 -31
- package/tool/ToolCallResult.cjs +0 -2
- package/tool/ToolCallResult.d.ts +0 -13
- package/tool/ToolCallResult.js +0 -1
- package/tool/ToolDefinition.cjs +0 -2
- package/tool/ToolDefinition.d.ts +0 -7
- package/tool/ToolDefinition.js +0 -1
- package/tool/ToolExecutionError.cjs +0 -42
- package/tool/ToolExecutionError.d.ts +0 -19
- package/tool/ToolExecutionError.js +0 -38
- package/tool/WebSearchTool.cjs +0 -56
- package/tool/WebSearchTool.d.ts +0 -54
- package/tool/WebSearchTool.js +0 -52
- package/tool/execute-tool/ExecuteToolEvent.cjs +0 -2
- package/tool/execute-tool/ExecuteToolEvent.d.ts +0 -11
- package/tool/execute-tool/ExecuteToolEvent.js +0 -1
- package/tool/execute-tool/executeTool.cjs +0 -102
- package/tool/execute-tool/executeTool.d.ts +0 -30
- package/tool/execute-tool/executeTool.js +0 -98
- package/tool/execute-tool/index.cjs +0 -18
- package/tool/execute-tool/index.d.ts +0 -2
- package/tool/execute-tool/index.js +0 -2
- package/tool/execute-tool/safeExecuteToolCall.cjs +0 -34
- package/tool/execute-tool/safeExecuteToolCall.d.ts +0 -5
- package/tool/execute-tool/safeExecuteToolCall.js +0 -30
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +0 -61
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +0 -35
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +0 -57
- package/tool/generate-tool-call/ToolCallGenerationEvent.cjs +0 -2
- package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +0 -23
- package/tool/generate-tool-call/ToolCallGenerationEvent.js +0 -1
- package/tool/generate-tool-call/ToolCallGenerationModel.cjs +0 -2
- package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +0 -19
- package/tool/generate-tool-call/ToolCallGenerationModel.js +0 -1
- package/tool/generate-tool-call/ToolCallParseError.cjs +0 -44
- package/tool/generate-tool-call/ToolCallParseError.d.ts +0 -18
- package/tool/generate-tool-call/ToolCallParseError.js +0 -40
- package/tool/generate-tool-call/generateToolCall.cjs +0 -65
- package/tool/generate-tool-call/generateToolCall.d.ts +0 -21
- package/tool/generate-tool-call/generateToolCall.js +0 -61
- package/tool/generate-tool-call/index.cjs +0 -22
- package/tool/generate-tool-call/index.d.ts +0 -6
- package/tool/generate-tool-call/index.js +0 -6
- package/tool/generate-tool-call/jsonToolCallPrompt.cjs +0 -30
- package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +0 -5
- package/tool/generate-tool-call/jsonToolCallPrompt.js +0 -27
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +0 -62
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +0 -31
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +0 -58
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.cjs +0 -2
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +0 -23
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.js +0 -1
- package/tool/generate-tool-calls/ToolCallsGenerationModel.cjs +0 -2
- package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +0 -21
- package/tool/generate-tool-calls/ToolCallsGenerationModel.js +0 -1
- package/tool/generate-tool-calls/ToolCallsParseError.cjs +0 -36
- package/tool/generate-tool-calls/ToolCallsParseError.d.ts +0 -15
- package/tool/generate-tool-calls/ToolCallsParseError.js +0 -32
- package/tool/generate-tool-calls/ToolCallsPromptTemplate.cjs +0 -2
- package/tool/generate-tool-calls/ToolCallsPromptTemplate.d.ts +0 -12
- package/tool/generate-tool-calls/ToolCallsPromptTemplate.js +0 -1
- package/tool/generate-tool-calls/generateToolCalls.cjs +0 -63
- package/tool/generate-tool-calls/generateToolCalls.d.ts +0 -39
- package/tool/generate-tool-calls/generateToolCalls.js +0 -59
- package/tool/generate-tool-calls/index.cjs +0 -22
- package/tool/generate-tool-calls/index.d.ts +0 -6
- package/tool/generate-tool-calls/index.js +0 -6
- package/tool/index.cjs +0 -31
- package/tool/index.d.ts +0 -15
- package/tool/index.js +0 -15
- package/tool/run-tool/RunToolEvent.cjs +0 -2
- package/tool/run-tool/RunToolEvent.d.ts +0 -7
- package/tool/run-tool/RunToolEvent.js +0 -1
- package/tool/run-tool/index.cjs +0 -18
- package/tool/run-tool/index.d.ts +0 -2
- package/tool/run-tool/index.js +0 -2
- package/tool/run-tool/runTool.cjs +0 -30
- package/tool/run-tool/runTool.d.ts +0 -20
- package/tool/run-tool/runTool.js +0 -26
- package/tool/run-tools/RunToolsEvent.cjs +0 -2
- package/tool/run-tools/RunToolsEvent.d.ts +0 -7
- package/tool/run-tools/RunToolsEvent.js +0 -1
- package/tool/run-tools/index.cjs +0 -18
- package/tool/run-tools/index.d.ts +0 -2
- package/tool/run-tools/index.js +0 -2
- package/tool/run-tools/runTools.cjs +0 -54
- package/tool/run-tools/runTools.d.ts +0 -22
- package/tool/run-tools/runTools.js +0 -50
- package/util/AsyncQueue.cjs +0 -121
- package/util/AsyncQueue.d.ts +0 -51
- package/util/AsyncQueue.js +0 -117
- package/util/AsyncQueue.test.cjs +0 -137
- package/util/AsyncQueue.test.d.ts +0 -1
- package/util/AsyncQueue.test.js +0 -135
- package/util/DurationMeasurement.cjs +0 -48
- package/util/DurationMeasurement.d.ts +0 -6
- package/util/DurationMeasurement.js +0 -44
- package/util/ErrorHandler.cjs +0 -2
- package/util/ErrorHandler.d.ts +0 -1
- package/util/ErrorHandler.js +0 -1
- package/util/SafeResult.cjs +0 -2
- package/util/SafeResult.d.ts +0 -8
- package/util/SafeResult.js +0 -1
- package/util/SimpleWebSocket.cjs +0 -48
- package/util/SimpleWebSocket.d.ts +0 -12
- package/util/SimpleWebSocket.js +0 -21
- package/util/audio/AudioMimeType.cjs +0 -2
- package/util/audio/AudioMimeType.d.ts +0 -1
- package/util/audio/AudioMimeType.js +0 -1
- package/util/audio/getAudioFileExtension.cjs +0 -29
- package/util/audio/getAudioFileExtension.d.ts +0 -1
- package/util/audio/getAudioFileExtension.js +0 -25
- package/util/audio/index.cjs +0 -18
- package/util/audio/index.d.ts +0 -2
- package/util/audio/index.js +0 -2
- package/util/cosineSimilarity.cjs +0 -26
- package/util/cosineSimilarity.d.ts +0 -11
- package/util/cosineSimilarity.js +0 -22
- package/util/delay.cjs +0 -7
- package/util/delay.d.ts +0 -1
- package/util/delay.js +0 -3
- package/util/detectRuntime.cjs +0 -21
- package/util/detectRuntime.d.ts +0 -1
- package/util/detectRuntime.js +0 -17
- package/util/fixJson.cjs +0 -334
- package/util/fixJson.d.ts +0 -1
- package/util/fixJson.js +0 -330
- package/util/fixJson.test.cjs +0 -188
- package/util/fixJson.test.d.ts +0 -1
- package/util/fixJson.test.js +0 -183
- package/util/format/DataContent.cjs +0 -27
- package/util/format/DataContent.d.ts +0 -7
- package/util/format/DataContent.js +0 -22
- package/util/format/UInt8Utils.cjs +0 -40
- package/util/format/UInt8Utils.d.ts +0 -2
- package/util/format/UInt8Utils.js +0 -35
- package/util/format/index.cjs +0 -17
- package/util/format/index.d.ts +0 -1
- package/util/format/index.js +0 -1
- package/util/getErrorMessage.cjs +0 -16
- package/util/getErrorMessage.d.ts +0 -1
- package/util/getErrorMessage.js +0 -12
- package/util/index.cjs +0 -22
- package/util/index.d.ts +0 -6
- package/util/index.js +0 -6
- package/util/isDeepEqualData.cjs +0 -53
- package/util/isDeepEqualData.d.ts +0 -8
- package/util/isDeepEqualData.js +0 -49
- package/util/isDeepEqualData.test.cjs +0 -107
- package/util/isDeepEqualData.test.d.ts +0 -1
- package/util/isDeepEqualData.test.js +0 -102
- package/util/never.cjs +0 -6
- package/util/never.d.ts +0 -1
- package/util/never.js +0 -2
- package/util/parsePartialJson.cjs +0 -29
- package/util/parsePartialJson.d.ts +0 -1
- package/util/parsePartialJson.js +0 -22
- package/util/runSafe.cjs +0 -15
- package/util/runSafe.d.ts +0 -2
- package/util/runSafe.js +0 -11
- package/util/runSafe.test.cjs +0 -58
- package/util/runSafe.test.d.ts +0 -1
- package/util/runSafe.test.js +0 -56
- package/util/streaming/EventSourceParserStream.cjs +0 -34
- package/util/streaming/EventSourceParserStream.d.ts +0 -15
- package/util/streaming/EventSourceParserStream.js +0 -30
- package/util/streaming/convertReadableStreamToAsyncIterable.cjs +0 -19
- package/util/streaming/convertReadableStreamToAsyncIterable.d.ts +0 -1
- package/util/streaming/convertReadableStreamToAsyncIterable.js +0 -15
- package/util/streaming/createEventSourceResponseHandler.cjs +0 -9
- package/util/streaming/createEventSourceResponseHandler.d.ts +0 -4
- package/util/streaming/createEventSourceResponseHandler.js +0 -5
- package/util/streaming/createEventSourceStream.cjs +0 -19
- package/util/streaming/createEventSourceStream.d.ts +0 -1
- package/util/streaming/createEventSourceStream.js +0 -15
- package/util/streaming/createJsonStreamResponseHandler.cjs +0 -9
- package/util/streaming/createJsonStreamResponseHandler.d.ts +0 -4
- package/util/streaming/createJsonStreamResponseHandler.js +0 -5
- package/util/streaming/index.cjs +0 -17
- package/util/streaming/index.d.ts +0 -1
- package/util/streaming/index.js +0 -1
- package/util/streaming/parseEventSourceStream.cjs +0 -12
- package/util/streaming/parseEventSourceStream.d.ts +0 -4
- package/util/streaming/parseEventSourceStream.js +0 -8
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +0 -52
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +0 -6
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +0 -48
- package/util/streaming/parseJsonStream.cjs +0 -35
- package/util/streaming/parseJsonStream.d.ts +0 -7
- package/util/streaming/parseJsonStream.js +0 -31
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +0 -21
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +0 -6
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +0 -17
- package/vector-index/UpsertIntoVectorIndexEvent.cjs +0 -2
- package/vector-index/UpsertIntoVectorIndexEvent.d.ts +0 -9
- package/vector-index/UpsertIntoVectorIndexEvent.js +0 -1
- package/vector-index/VectorIndex.cjs +0 -2
- package/vector-index/VectorIndex.d.ts +0 -19
- package/vector-index/VectorIndex.js +0 -1
- package/vector-index/VectorIndexRetriever.cjs +0 -54
- package/vector-index/VectorIndexRetriever.d.ts +0 -20
- package/vector-index/VectorIndexRetriever.js +0 -50
- package/vector-index/index.cjs +0 -21
- package/vector-index/index.d.ts +0 -5
- package/vector-index/index.js +0 -5
- package/vector-index/memory/MemoryVectorIndex.cjs +0 -69
- package/vector-index/memory/MemoryVectorIndex.d.ts +0 -32
- package/vector-index/memory/MemoryVectorIndex.js +0 -65
- package/vector-index/upsertIntoVectorIndex.cjs +0 -28
- package/vector-index/upsertIntoVectorIndex.d.ts +0 -11
- package/vector-index/upsertIntoVectorIndex.js +0 -24
package/CHANGELOG.md
DELETED
@@ -1,2272 +0,0 @@
|
|
1
|
-
# Changelog
|
2
|
-
|
3
|
-
## v0.133.0 - 2024-01-26
|
4
|
-
|
5
|
-
### Added
|
6
|
-
|
7
|
-
- Support for OpenAI embedding custom dimensions.
|
8
|
-
|
9
|
-
### Changed
|
10
|
-
|
11
|
-
- **breaking change**: renamed `embeddingDimensions` setting to `dimensions`
|
12
|
-
|
13
|
-
## v0.132.0 - 2024-01-25
|
14
|
-
|
15
|
-
### Added
|
16
|
-
|
17
|
-
- Support for OpenAI `text-embedding-3-small` and `text-embedding-3-large` embedding models.
|
18
|
-
- Support for OpenAI `gpt-4-turbo-preview`, `gpt-4-0125-preview`, and `gpt-3.5-turbo-0125` chat models.
|
19
|
-
|
20
|
-
## v0.131.1 - 2024-01-25
|
21
|
-
|
22
|
-
### Fixed
|
23
|
-
|
24
|
-
- Add `type-fest` as dependency to fix type inference errors.
|
25
|
-
|
26
|
-
## v0.131.0 - 2024-01-23
|
27
|
-
|
28
|
-
### Added
|
29
|
-
|
30
|
-
- `ObjectStreamResponse` and `ObjectStreamFromResponse` serialization functions for using server-generated object streams in web applications.
|
31
|
-
|
32
|
-
Server example:
|
33
|
-
|
34
|
-
```ts
|
35
|
-
export async function POST(req: Request) {
|
36
|
-
const { myArgs } = await req.json();
|
37
|
-
|
38
|
-
const objectStream = await streamObject({
|
39
|
-
// ...
|
40
|
-
});
|
41
|
-
|
42
|
-
// serialize the object stream to a response:
|
43
|
-
return new ObjectStreamResponse(objectStream);
|
44
|
-
}
|
45
|
-
```
|
46
|
-
|
47
|
-
Client example:
|
48
|
-
|
49
|
-
```ts
|
50
|
-
const response = await fetch("/api/stream-object-openai", {
|
51
|
-
method: "POST",
|
52
|
-
body: JSON.stringify({ myArgs }),
|
53
|
-
});
|
54
|
-
|
55
|
-
// deserialize (result object is simpler than the full response)
|
56
|
-
const stream = ObjectStreamFromResponse({
|
57
|
-
schema: itinerarySchema,
|
58
|
-
response,
|
59
|
-
});
|
60
|
-
|
61
|
-
for await (const { partialObject } of stream) {
|
62
|
-
// do something, e.g. setting a React state
|
63
|
-
}
|
64
|
-
```
|
65
|
-
|
66
|
-
### Changed
|
67
|
-
|
68
|
-
- **breaking change**: rename `generateStructure` to `generateObject` and `streamStructure` to `streamObject`. Related names have been changed accordingly.
|
69
|
-
- **breaking change**: the `streamObject` result stream contains additional data. You need to use `stream.partialObject` or destructuring to access it:
|
70
|
-
|
71
|
-
```ts
|
72
|
-
const objectStream = await streamObject({
|
73
|
-
// ...
|
74
|
-
});
|
75
|
-
|
76
|
-
for await (const { partialObject } of objectStream) {
|
77
|
-
console.clear();
|
78
|
-
console.log(partialObject);
|
79
|
-
}
|
80
|
-
```
|
81
|
-
|
82
|
-
- **breaking change**: the result from successful `Schema` validations is stored in the `value` property (before: `data`).
|
83
|
-
|
84
|
-
## v0.130.1 - 2024-01-22
|
85
|
-
|
86
|
-
### Fixed
|
87
|
-
|
88
|
-
- Duplex speech streaming works in Vercel Edge Functions.
|
89
|
-
|
90
|
-
## v0.130.0 - 2024-01-21
|
91
|
-
|
92
|
-
### Changed
|
93
|
-
|
94
|
-
- **breaking change**: updated `generateTranscription` interface. The function now takes a `mimeType` and `audioData` (base64-encoded string, `Uint8Array`, `Buffer` or `ArrayBuffer`). Example:
|
95
|
-
|
96
|
-
```ts
|
97
|
-
import { generateTranscription, openai } from "modelfusion";
|
98
|
-
import fs from "node:fs";
|
99
|
-
|
100
|
-
const transcription = await generateTranscription({
|
101
|
-
model: openai.Transcriber({ model: "whisper-1" }),
|
102
|
-
mimeType: "audio/mp3",
|
103
|
-
audioData: await fs.promises.readFile("data/test.mp3"),
|
104
|
-
});
|
105
|
-
```
|
106
|
-
|
107
|
-
- Images in instruction and chat prompts can be `Buffer` or `ArrayBuffer` instances (in addition to base64-encoded strings and `Uint8Array` instances).
|
108
|
-
|
109
|
-
## v0.129.0 - 2024-01-20
|
110
|
-
|
111
|
-
### Changed
|
112
|
-
|
113
|
-
- **breaking change**: Usage of Node `async_hooks` has been renamed from `node:async_hooks` to `async_hooks` for easier Webpack configuration. To exclude the `async_hooks` from client-side bundling, you can use the following config for Next.js (`next.config.mjs` or `next.config.js`):
|
114
|
-
|
115
|
-
```js
|
116
|
-
/**
|
117
|
-
* @type {import('next').NextConfig}
|
118
|
-
*/
|
119
|
-
const nextConfig = {
|
120
|
-
webpack: (config, { isServer }) => {
|
121
|
-
if (isServer) {
|
122
|
-
return config;
|
123
|
-
}
|
124
|
-
|
125
|
-
config.resolve = config.resolve ?? {};
|
126
|
-
config.resolve.fallback = config.resolve.fallback ?? {};
|
127
|
-
|
128
|
-
// async hooks is not available in the browser:
|
129
|
-
config.resolve.fallback.async_hooks = false;
|
130
|
-
|
131
|
-
return config;
|
132
|
-
},
|
133
|
-
};
|
134
|
-
```
|
135
|
-
|
136
|
-
## v0.128.0 - 2024-01-20
|
137
|
-
|
138
|
-
### Changed
|
139
|
-
|
140
|
-
- **breaking change**: ModelFusion uses `Uint8Array` instead of `Buffer` for better cross-platform compatibility (see also ["Goodbye, Node.js Buffer"](https://sindresorhus.com/blog/goodbye-nodejs-buffer)). This can lead to breaking changes in your code if you use `Buffer`-specific methods.
|
141
|
-
- **breaking change**: Image content in multi-modal instruction and chat inputs (e.g. for GPT Vision) is passed in the `image` property (instead of `base64Image`) and supports both base64 strings and `Uint8Array` inputs:
|
142
|
-
|
143
|
-
```ts
|
144
|
-
const image = fs.readFileSync(path.join("data", "example-image.png"));
|
145
|
-
|
146
|
-
const textStream = await streamText({
|
147
|
-
model: openai.ChatTextGenerator({
|
148
|
-
model: "gpt-4-vision-preview",
|
149
|
-
maxGenerationTokens: 1000,
|
150
|
-
}),
|
151
|
-
|
152
|
-
prompt: [
|
153
|
-
openai.ChatMessage.user([
|
154
|
-
{ type: "text", text: "Describe the image in detail:\n\n" },
|
155
|
-
{ type: "image", image, mimeType: "image/png" },
|
156
|
-
]),
|
157
|
-
],
|
158
|
-
});
|
159
|
-
```
|
160
|
-
|
161
|
-
- OpenAI-compatible providers with predefined API configurations have a customized provider name that shows up in the events.
|
162
|
-
|
163
|
-
## v0.127.0 - 2024-01-15
|
164
|
-
|
165
|
-
### Changed
|
166
|
-
|
167
|
-
- **breaking change**: `streamStructure` returns an async iterable over deep partial objects. If you need to get the fully validated final result, you can use the `fullResponse: true` option and await the `structurePromise` value. Example:
|
168
|
-
|
169
|
-
```ts
|
170
|
-
const { structureStream, structurePromise } = await streamStructure({
|
171
|
-
model: ollama
|
172
|
-
.ChatTextGenerator({
|
173
|
-
model: "openhermes2.5-mistral",
|
174
|
-
maxGenerationTokens: 1024,
|
175
|
-
temperature: 0,
|
176
|
-
})
|
177
|
-
.asStructureGenerationModel(jsonStructurePrompt.text()),
|
178
|
-
|
179
|
-
schema: zodSchema(
|
180
|
-
z.object({
|
181
|
-
characters: z.array(
|
182
|
-
z.object({
|
183
|
-
name: z.string(),
|
184
|
-
class: z
|
185
|
-
.string()
|
186
|
-
.describe("Character class, e.g. warrior, mage, or thief."),
|
187
|
-
description: z.string(),
|
188
|
-
})
|
189
|
-
),
|
190
|
-
})
|
191
|
-
),
|
192
|
-
|
193
|
-
prompt:
|
194
|
-
"Generate 3 character descriptions for a fantasy role playing game.",
|
195
|
-
|
196
|
-
fullResponse: true,
|
197
|
-
});
|
198
|
-
|
199
|
-
for await (const partialStructure of structureStream) {
|
200
|
-
console.clear();
|
201
|
-
console.log(partialStructure);
|
202
|
-
}
|
203
|
-
|
204
|
-
const structure = await structurePromise;
|
205
|
-
|
206
|
-
console.clear();
|
207
|
-
console.log("FINAL STRUCTURE");
|
208
|
-
console.log(structure);
|
209
|
-
```
|
210
|
-
|
211
|
-
- **breaking change**: Renamed `text` value in `streamText` with `fullResponse: true` to `textPromise`.
|
212
|
-
|
213
|
-
### Fixed
|
214
|
-
|
215
|
-
- Ollama streaming.
|
216
|
-
- Ollama structure generation and streaming.
|
217
|
-
|
218
|
-
## v0.126.0 - 2024-01-15
|
219
|
-
|
220
|
-
### Changed
|
221
|
-
|
222
|
-
- **breaking change**: rename `useTool` to `runTool` and `useTools` to `runTools` to avoid confusion with React hooks.
|
223
|
-
|
224
|
-
## v0.125.0 - 2024-01-14
|
225
|
-
|
226
|
-
### Added
|
227
|
-
|
228
|
-
- Perplexity AI chat completion support. Example:
|
229
|
-
|
230
|
-
```ts
|
231
|
-
import { openaicompatible, streamText } from "modelfusion";
|
232
|
-
|
233
|
-
const textStream = await streamText({
|
234
|
-
model: openaicompatible
|
235
|
-
.ChatTextGenerator({
|
236
|
-
api: openaicompatible.PerplexityApi(),
|
237
|
-
provider: "openaicompatible-perplexity",
|
238
|
-
model: "pplx-70b-online", // online model with access to web search
|
239
|
-
maxGenerationTokens: 500,
|
240
|
-
})
|
241
|
-
.withTextPrompt(),
|
242
|
-
|
243
|
-
prompt: "What is RAG in AI?",
|
244
|
-
});
|
245
|
-
```
|
246
|
-
|
247
|
-
## v0.124.0 - 2024-01-13
|
248
|
-
|
249
|
-
### Added
|
250
|
-
|
251
|
-
- [Embedding-support for OpenAI-compatible providers](https://modelfusion.dev/integration/model-provider/openaicompatible/#embed-text). You can for example use the Together AI embedding endpoint:
|
252
|
-
|
253
|
-
```ts
|
254
|
-
import { embed, openaicompatible } from "modelfusion";
|
255
|
-
|
256
|
-
const embedding = await embed({
|
257
|
-
model: openaicompatible.TextEmbedder({
|
258
|
-
api: openaicompatible.TogetherAIApi(),
|
259
|
-
provider: "openaicompatible-togetherai",
|
260
|
-
model: "togethercomputer/m2-bert-80M-8k-retrieval",
|
261
|
-
}),
|
262
|
-
value: "At first, Nox didn't know what to do with the pup.",
|
263
|
-
});
|
264
|
-
```
|
265
|
-
|
266
|
-
## v0.123.0 - 2024-01-13
|
267
|
-
|
268
|
-
### Added
|
269
|
-
|
270
|
-
- `classify` model function ([docs](https://modelfusion.dev/guide/function/classify)) for classifying values. The `SemanticClassifier` has been renamed to `EmbeddingSimilarityClassifier` and can be used in conjunction with `classify`:
|
271
|
-
|
272
|
-
```ts
|
273
|
-
import { classify, EmbeddingSimilarityClassifier, openai } from "modelfusion";
|
274
|
-
|
275
|
-
const classifier = new EmbeddingSimilarityClassifier({
|
276
|
-
embeddingModel: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
|
277
|
-
similarityThreshold: 0.82,
|
278
|
-
clusters: [
|
279
|
-
{
|
280
|
-
name: "politics" as const,
|
281
|
-
values: [
|
282
|
-
"they will save the country!",
|
283
|
-
// ...
|
284
|
-
],
|
285
|
-
},
|
286
|
-
{
|
287
|
-
name: "chitchat" as const,
|
288
|
-
values: [
|
289
|
-
"how's the weather today?",
|
290
|
-
// ...
|
291
|
-
],
|
292
|
-
},
|
293
|
-
],
|
294
|
-
});
|
295
|
-
|
296
|
-
// strongly typed result:
|
297
|
-
const result = await classify({
|
298
|
-
model: classifier,
|
299
|
-
value: "don't you love politics?",
|
300
|
-
});
|
301
|
-
```
|
302
|
-
|
303
|
-
## v0.122.0 - 2024-01-13
|
304
|
-
|
305
|
-
### Changed
|
306
|
-
|
307
|
-
- **breaking change**: Switch from positional parameters to named parameters (parameter object) for all model and tool functions. The parameter object is the first and only parameter of the function. Additional options (last parameter before) are now part of the parameter object. Example:
|
308
|
-
|
309
|
-
```ts
|
310
|
-
// old:
|
311
|
-
const text = await generateText(
|
312
|
-
openai
|
313
|
-
.ChatTextGenerator({
|
314
|
-
model: "gpt-3.5-turbo",
|
315
|
-
maxGenerationTokens: 1000,
|
316
|
-
})
|
317
|
-
.withTextPrompt(),
|
318
|
-
|
319
|
-
"Write a short story about a robot learning to love",
|
320
|
-
|
321
|
-
{
|
322
|
-
functionId: "example-function",
|
323
|
-
}
|
324
|
-
);
|
325
|
-
|
326
|
-
// new:
|
327
|
-
const text = await generateText({
|
328
|
-
model: openai
|
329
|
-
.ChatTextGenerator({
|
330
|
-
model: "gpt-3.5-turbo",
|
331
|
-
maxGenerationTokens: 1000,
|
332
|
-
})
|
333
|
-
.withTextPrompt(),
|
334
|
-
|
335
|
-
prompt: "Write a short story about a robot learning to love",
|
336
|
-
|
337
|
-
functionId: "example-function",
|
338
|
-
});
|
339
|
-
```
|
340
|
-
|
341
|
-
This change was made to make the API more flexible and to allow for future extensions.
|
342
|
-
|
343
|
-
## v0.121.2 - 2024-01-11
|
344
|
-
|
345
|
-
### Fixed
|
346
|
-
|
347
|
-
- Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@Necmttn](https://github.com/Necmttn) for the bugfix!
|
348
|
-
|
349
|
-
## v0.121.1 - 2024-01-10
|
350
|
-
|
351
|
-
### Fixed
|
352
|
-
|
353
|
-
- Ollama response schema for repeated calls with Ollama 0.1.19 chat models. Thanks [@jakedetels](https://github.com/jakedetels) for the bug report!
|
354
|
-
|
355
|
-
## v0.121.0 - 2024-01-09
|
356
|
-
|
357
|
-
### Added
|
358
|
-
|
359
|
-
- Synthia prompt template
|
360
|
-
|
361
|
-
### Changed
|
362
|
-
|
363
|
-
- **breaking change**: Renamed `parentCallId` function parameter to `callId` to enable options pass-through.
|
364
|
-
- Better output filtering for `detailed-object` log format (e.g. via `modelfusion.setLogFormat("detailed-object")`)
|
365
|
-
|
366
|
-
## v0.120.0 - 2024-01-09
|
367
|
-
|
368
|
-
### Added
|
369
|
-
|
370
|
-
- `OllamaCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `ollama.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
|
371
|
-
|
372
|
-
```ts
|
373
|
-
const model = ollama
|
374
|
-
.CompletionTextGenerator({
|
375
|
-
model: "mistral",
|
376
|
-
promptTemplate: ollama.prompt.Mistral,
|
377
|
-
raw: true, // required when using custom prompt template
|
378
|
-
maxGenerationTokens: 120,
|
379
|
-
})
|
380
|
-
.withTextPrompt();
|
381
|
-
```
|
382
|
-
|
383
|
-
### Removed
|
384
|
-
|
385
|
-
- **breaking change**: removed `.withTextPromptTemplate` on `OllamaCompletionModel`.
|
386
|
-
|
387
|
-
## v0.119.1 - 2024-01-08
|
388
|
-
|
389
|
-
### Fixed
|
390
|
-
|
391
|
-
- Incorrect export. Thanks [@mloenow](https://github.com/mloenow) for the fix!
|
392
|
-
|
393
|
-
## v0.119.0 - 2024-01-07
|
394
|
-
|
395
|
-
### Added
|
396
|
-
|
397
|
-
- Schema-specific GBNF grammar generator for `LlamaCppCompletionModel`. When using `jsonStructurePrompt`, it automatically uses a GBNF grammar for the JSON schema that you provide. Example:
|
398
|
-
|
399
|
-
```ts
|
400
|
-
const structure = await generateStructure(
|
401
|
-
llamacpp
|
402
|
-
.CompletionTextGenerator({
|
403
|
-
// run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
|
404
|
-
promptTemplate: llamacpp.prompt.ChatML,
|
405
|
-
maxGenerationTokens: 1024,
|
406
|
-
temperature: 0,
|
407
|
-
})
|
408
|
-
// automatically restrict the output to your schema using GBNF:
|
409
|
-
.asStructureGenerationModel(jsonStructurePrompt.text()),
|
410
|
-
|
411
|
-
zodSchema(
|
412
|
-
z.array(
|
413
|
-
z.object({
|
414
|
-
name: z.string(),
|
415
|
-
class: z
|
416
|
-
.string()
|
417
|
-
.describe("Character class, e.g. warrior, mage, or thief."),
|
418
|
-
description: z.string(),
|
419
|
-
})
|
420
|
-
)
|
421
|
-
),
|
422
|
-
|
423
|
-
"Generate 3 character descriptions for a fantasy role playing game. "
|
424
|
-
);
|
425
|
-
```
|
426
|
-
|
427
|
-
## v0.118.0 - 2024-01-07
|
428
|
-
|
429
|
-
### Added
|
430
|
-
|
431
|
-
- `LlamaCppCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `llamacpp.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
|
432
|
-
|
433
|
-
```ts
|
434
|
-
const model = llamacpp
|
435
|
-
.CompletionTextGenerator({
|
436
|
-
// run https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF with llama.cpp
|
437
|
-
promptTemplate: llamacpp.prompt.ChatML,
|
438
|
-
contextWindowSize: 4096,
|
439
|
-
maxGenerationTokens: 512,
|
440
|
-
})
|
441
|
-
.withChatPrompt();
|
442
|
-
```
|
443
|
-
|
444
|
-
### Changed
|
445
|
-
|
446
|
-
- **breaking change**: renamed `response` to `rawResponse` when using `fullResponse: true` setting.
|
447
|
-
- **breaking change**: renamed `llamacpp.TextGenerator` to `llamacpp.CompletionTextGenerator`.
|
448
|
-
|
449
|
-
### Removed
|
450
|
-
|
451
|
-
- **breaking change**: removed `.withTextPromptTemplate` on `LlamaCppCompletionModel`.
|
452
|
-
|
453
|
-
## v0.117.0 - 2024-01-06
|
454
|
-
|
455
|
-
### Added
|
456
|
-
|
457
|
-
- Predefined Llama.cpp GBNF grammars:
|
458
|
-
|
459
|
-
- `llamacpp.grammar.json`: Restricts the output to JSON.
|
460
|
-
- `llamacpp.grammar.jsonArray`: Restricts the output to a JSON array.
|
461
|
-
- `llamacpp.grammar.list`: Restricts the output to a newline-separated list where each line starts with `- `.
|
462
|
-
|
463
|
-
- Llama.cpp structure generation support:
|
464
|
-
|
465
|
-
```ts
|
466
|
-
const structure = await generateStructure(
|
467
|
-
llamacpp
|
468
|
-
.TextGenerator({
|
469
|
-
// run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
|
470
|
-
maxGenerationTokens: 1024,
|
471
|
-
temperature: 0,
|
472
|
-
})
|
473
|
-
.withTextPromptTemplate(ChatMLPrompt.instruction()) // needed for jsonStructurePrompt.text()
|
474
|
-
.asStructureGenerationModel(jsonStructurePrompt.text()), // automatically restrict the output to JSON
|
475
|
-
|
476
|
-
zodSchema(
|
477
|
-
z.object({
|
478
|
-
characters: z.array(
|
479
|
-
z.object({
|
480
|
-
name: z.string(),
|
481
|
-
class: z
|
482
|
-
.string()
|
483
|
-
.describe("Character class, e.g. warrior, mage, or thief."),
|
484
|
-
description: z.string(),
|
485
|
-
})
|
486
|
-
),
|
487
|
-
})
|
488
|
-
),
|
489
|
-
|
490
|
-
"Generate 3 character descriptions for a fantasy role playing game. "
|
491
|
-
);
|
492
|
-
```
|
493
|
-
|
494
|
-
## v0.116.0 - 2024-01-05
|
495
|
-
|
496
|
-
### Added
|
497
|
-
|
498
|
-
- Semantic classifier. An easy way to determine a class of a text using embeddings. Example:
|
499
|
-
|
500
|
-
```ts
|
501
|
-
import { SemanticClassifier, openai } from "modelfusion";
|
502
|
-
|
503
|
-
const classifier = new SemanticClassifier({
|
504
|
-
embeddingModel: openai.TextEmbedder({
|
505
|
-
model: "text-embedding-ada-002",
|
506
|
-
}),
|
507
|
-
similarityThreshold: 0.82,
|
508
|
-
clusters: [
|
509
|
-
{
|
510
|
-
name: "politics" as const,
|
511
|
-
values: [
|
512
|
-
"isn't politics the best thing ever",
|
513
|
-
"why don't you tell me about your political opinions",
|
514
|
-
"don't you just love the president",
|
515
|
-
"don't you just hate the president",
|
516
|
-
"they're going to destroy this country!",
|
517
|
-
"they will save the country!",
|
518
|
-
],
|
519
|
-
},
|
520
|
-
{
|
521
|
-
name: "chitchat" as const,
|
522
|
-
values: [
|
523
|
-
"how's the weather today?",
|
524
|
-
"how are things going?",
|
525
|
-
"lovely weather today",
|
526
|
-
"the weather is horrendous",
|
527
|
-
"let's go to the chippy",
|
528
|
-
],
|
529
|
-
},
|
530
|
-
],
|
531
|
-
});
|
532
|
-
|
533
|
-
console.log(await classifier.classify("don't you love politics?")); // politics
|
534
|
-
console.log(await classifier.classify("how's the weather today?")); // chitchat
|
535
|
-
console.log(
|
536
|
-
await classifier.classify("I'm interested in learning about llama 2")
|
537
|
-
); // null
|
538
|
-
```
|
539
|
-
|
540
|
-
## v0.115.0 - 2024-01-05
|
541
|
-
|
542
|
-
### Removed
|
543
|
-
|
544
|
-
- Anthropic support. Anthropic has a strong stance against open-source models and against non-US AI. I will not support them by providing a ModelFusion integration.
|
545
|
-
|
546
|
-
## v0.114.1 - 2024-01-05
|
547
|
-
|
548
|
-
### Fixed
|
549
|
-
|
550
|
-
- Together AI text generation and text streaming using OpenAI-compatible chat models.
|
551
|
-
|
552
|
-
## v0.114.0 - 2024-01-05
|
553
|
-
|
554
|
-
### Added
|
555
|
-
|
556
|
-
- Custom call header support for APIs. You can pass a `customCallHeaders` function into API configurations to add custom headers. The function is called with `functionType`, `functionId`, `run`, and `callId` parameters. Example for Helicone:
|
557
|
-
|
558
|
-
```ts
|
559
|
-
const text = await generateText(
|
560
|
-
openai
|
561
|
-
.ChatTextGenerator({
|
562
|
-
api: new HeliconeOpenAIApiConfiguration({
|
563
|
-
customCallHeaders: ({ functionId, callId }) => ({
|
564
|
-
"Helicone-Property-FunctionId": functionId,
|
565
|
-
"Helicone-Property-CallId": callId,
|
566
|
-
}),
|
567
|
-
}),
|
568
|
-
model: "gpt-3.5-turbo",
|
569
|
-
temperature: 0.7,
|
570
|
-
maxGenerationTokens: 500,
|
571
|
-
})
|
572
|
-
.withTextPrompt(),
|
573
|
-
|
574
|
-
"Write a short story about a robot learning to love",
|
575
|
-
|
576
|
-
{ functionId: "example-function" }
|
577
|
-
);
|
578
|
-
```
|
579
|
-
|
580
|
-
- Rudimentary caching support for `generateText`. You can use a `MemoryCache` to store the response of a `generateText` call. Example:
|
581
|
-
|
582
|
-
```ts
|
583
|
-
import { MemoryCache, generateText, ollama } from "modelfusion";
|
584
|
-
|
585
|
-
const model = ollama
|
586
|
-
.ChatTextGenerator({ model: "llama2:chat", maxGenerationTokens: 100 })
|
587
|
-
.withTextPrompt();
|
588
|
-
|
589
|
-
const cache = new MemoryCache();
|
590
|
-
|
591
|
-
const text1 = await generateText(
|
592
|
-
model,
|
593
|
-
"Write a short story about a robot learning to love:",
|
594
|
-
{ cache }
|
595
|
-
);
|
596
|
-
|
597
|
-
console.log(text1);
|
598
|
-
|
599
|
-
// 2nd call will use cached response:
|
600
|
-
const text2 = await generateText(
|
601
|
-
model,
|
602
|
-
"Write a short story about a robot learning to love:", // same text
|
603
|
-
{ cache }
|
604
|
-
);
|
605
|
-
|
606
|
-
console.log(text2);
|
607
|
-
```
|
608
|
-
|
609
|
-
- `validateTypes` and `safeValidateTypes` helpers that perform type checking of an object against a `Schema` (e.g., a `zodSchema`).
|
610
|
-
|
611
|
-
## v0.113.0 - 2024-01-03
|
612
|
-
|
613
|
-
[Structure generation](https://modelfusion.dev/guide/function/generate-structure) improvements.
|
614
|
-
|
615
|
-
### Added
|
616
|
-
|
617
|
-
- `.asStructureGenerationModel(...)` function to `OpenAIChatModel` and `OllamaChatModel` to create structure generation models from chat models.
|
618
|
-
- `jsonStructurePrompt` helper function to create structure generation models.
|
619
|
-
|
620
|
-
### Example
|
621
|
-
|
622
|
-
```ts
|
623
|
-
import {
|
624
|
-
generateStructure,
|
625
|
-
jsonStructurePrompt,
|
626
|
-
ollama,
|
627
|
-
zodSchema,
|
628
|
-
} from "modelfusion";
|
629
|
-
|
630
|
-
const structure = await generateStructure(
|
631
|
-
ollama
|
632
|
-
.ChatTextGenerator({
|
633
|
-
model: "openhermes2.5-mistral",
|
634
|
-
maxGenerationTokens: 1024,
|
635
|
-
temperature: 0,
|
636
|
-
})
|
637
|
-
.asStructureGenerationModel(jsonStructurePrompt.text()),
|
638
|
-
|
639
|
-
zodSchema(
|
640
|
-
z.object({
|
641
|
-
characters: z.array(
|
642
|
-
z.object({
|
643
|
-
name: z.string(),
|
644
|
-
class: z
|
645
|
-
.string()
|
646
|
-
.describe("Character class, e.g. warrior, mage, or thief."),
|
647
|
-
description: z.string(),
|
648
|
-
})
|
649
|
-
),
|
650
|
-
})
|
651
|
-
),
|
652
|
-
|
653
|
-
"Generate 3 character descriptions for a fantasy role playing game. "
|
654
|
-
);
|
655
|
-
```
|
656
|
-
|
657
|
-
## v0.112.0 - 2024-01-02
|
658
|
-
|
659
|
-
### Changed
|
660
|
-
|
661
|
-
- **breaking change**: renamed `useToolsOrGenerateText` to `useTools`
|
662
|
-
- **breaking change**: renamed `generateToolCallsOrText` to `generateToolCalls`
|
663
|
-
|
664
|
-
### Removed
|
665
|
-
|
666
|
-
- Restriction on tool names. OpenAI tool calls do not have such a restriction.
|
667
|
-
|
668
|
-
## v0.111.0 - 2024-01-01
|
669
|
-
|
670
|
-
Reworked API configuration support.
|
671
|
-
|
672
|
-
### Added
|
673
|
-
|
674
|
-
- All providers now have an `Api` function that you can call to create custom API configurations. The base URL set up is more flexible and allows you to override parts of the base URL selectively.
|
675
|
-
- `api` namespace with retry and throttle configurations
|
676
|
-
|
677
|
-
### Changed
|
678
|
-
|
679
|
-
- Updated Cohere models.
|
680
|
-
- Updated LMNT API calls to LMNT `v1` API.
|
681
|
-
- **breaking change**: Renamed `throttleUnlimitedConcurrency` to `throttleOff`.
|
682
|
-
|
683
|
-
## v0.110.0 - 2023-12-30
|
684
|
-
|
685
|
-
### Changed
|
686
|
-
|
687
|
-
- **breaking change**: renamed `modelfusion/extension` to `modelfusion/internal`. This requires updating `modelfusion-experimental` (if used) to `v0.3.0`
|
688
|
-
|
689
|
-
### Removed
|
690
|
-
|
691
|
-
- Deprecated OpenAI completion models that will be deactivated on January 4, 2024.
|
692
|
-
|
693
|
-
## v0.109.0 - 2023-12-30
|
694
|
-
|
695
|
-
### Added
|
696
|
-
|
697
|
-
- [Open AI compatible completion model](https://modelfusion.dev/integration/model-provider/openaicompatible/). It e.g. works with Fireworks AI.
|
698
|
-
- Together AI API configuration (for Open AI compatible chat models):
|
699
|
-
|
700
|
-
```ts
|
701
|
-
import {
|
702
|
-
TogetherAIApiConfiguration,
|
703
|
-
openaicompatible,
|
704
|
-
streamText,
|
705
|
-
} from "modelfusion";
|
706
|
-
|
707
|
-
const textStream = await streamText(
|
708
|
-
openaicompatible
|
709
|
-
.ChatTextGenerator({
|
710
|
-
api: new TogetherAIApiConfiguration(),
|
711
|
-
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
712
|
-
})
|
713
|
-
.withTextPrompt(),
|
714
|
-
|
715
|
-
"Write a story about a robot learning to love"
|
716
|
-
);
|
717
|
-
```
|
718
|
-
|
719
|
-
- Updated Llama.cpp model settings. GBNF grammars can be passed into the `grammar` setting:
|
720
|
-
|
721
|
-
```ts
|
722
|
-
const text = await generateText(
|
723
|
-
llamacpp
|
724
|
-
.TextGenerator({
|
725
|
-
maxGenerationTokens: 512,
|
726
|
-
temperature: 0,
|
727
|
-
// simple list grammar:
|
728
|
-
grammar: `root ::= ("- " item)+
|
729
|
-
item ::= [^\\n]+ "\\n"`,
|
730
|
-
})
|
731
|
-
.withTextPromptTemplate(MistralInstructPrompt.text()),
|
732
|
-
|
733
|
-
"List 5 ingredients for a lasagna:\n\n"
|
734
|
-
);
|
735
|
-
```
|
736
|
-
|
737
|
-
## v0.107.0 - 2023-12-29
|
738
|
-
|
739
|
-
### Added
|
740
|
-
|
741
|
-
- Mistral instruct prompt template
|
742
|
-
|
743
|
-
### Changed
|
744
|
-
|
745
|
-
- **breaking change**: Renamed `LlamaCppTextGenerationModel` to `LlamaCppCompletionModel`.
|
746
|
-
|
747
|
-
### Fixed
|
748
|
-
|
749
|
-
- Updated `LlamaCppCompletionModel` to the latest llama.cpp version.
|
750
|
-
- Fixed formatting of system prompt for chats in Llama2 2 prompt template.
|
751
|
-
|
752
|
-
## v0.106.0 - 2023-12-28
|
753
|
-
|
754
|
-
Experimental features that are unlikely to become stable before v1.0 have been moved to a separate `modelfusion-experimental` package.
|
755
|
-
|
756
|
-
### Removed
|
757
|
-
|
758
|
-
- Cost calculation
|
759
|
-
- `guard` function
|
760
|
-
- Browser and server features (incl. flow)
|
761
|
-
- `summarizeRecursively` function
|
762
|
-
|
763
|
-
## v0.105.0 - 2023-12-26
|
764
|
-
|
765
|
-
### Added
|
766
|
-
|
767
|
-
- Tool call support for chat prompts. Assistant messages can contain tool calls, and tool messages can contain tool call results. Tool calls can be used to implement e.g. agents:
|
768
|
-
|
769
|
-
```ts
|
770
|
-
const chat: ChatPrompt = {
|
771
|
-
system: "You are ...",
|
772
|
-
messages: [ChatMessage.user({ text: instruction })],
|
773
|
-
};
|
774
|
-
|
775
|
-
while (true) {
|
776
|
-
const { text, toolResults } = await useToolsOrGenerateText(
|
777
|
-
openai
|
778
|
-
.ChatTextGenerator({ model: "gpt-4-1106-preview" })
|
779
|
-
.withChatPrompt(),
|
780
|
-
tools, // array of tools
|
781
|
-
chat
|
782
|
-
);
|
783
|
-
|
784
|
-
// add the assistant and tool messages to the chat:
|
785
|
-
chat.messages.push(
|
786
|
-
ChatMessage.assistant({ text, toolResults }),
|
787
|
-
ChatMessage.tool({ toolResults })
|
788
|
-
);
|
789
|
-
|
790
|
-
if (toolResults == null) {
|
791
|
-
return; // no more actions, break loop
|
792
|
-
}
|
793
|
-
|
794
|
-
// ... (handle tool results)
|
795
|
-
}
|
796
|
-
```
|
797
|
-
|
798
|
-
- `streamText` returns a `text` promise when invoked with `fullResponse: true`. After the streaming has finished, the promise resolves with the full text.
|
799
|
-
|
800
|
-
```ts
|
801
|
-
const { text, textStream } = await streamText(
|
802
|
-
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }).withTextPrompt(),
|
803
|
-
"Write a short story about a robot learning to love:",
|
804
|
-
{ fullResponse: true }
|
805
|
-
);
|
806
|
-
|
807
|
-
// ... (handle streaming)
|
808
|
-
|
809
|
-
console.log(await text); // full text
|
810
|
-
```
|
811
|
-
|
812
|
-
## v0.104.0 - 2023-12-24
|
813
|
-
|
814
|
-
### Changed
|
815
|
-
|
816
|
-
- **breaking change**: Unified text and multimodal prompt templates. `[Text/MultiModal]InstructionPrompt` is now `InstructionPrompt`, and `[Text/MultiModalChatPrompt]` is now `ChatPrompt`.
|
817
|
-
- More flexible chat prompts: The chat prompt validation is now chat template specific and validated at runtime. E.g. the Llama2 prompt template only supports turns of user and assistant messages, whereas other formats are more flexible.
|
818
|
-
|
819
|
-
## v0.103.0 - 2023-12-23
|
820
|
-
|
821
|
-
### Added
|
822
|
-
|
823
|
-
- `finishReason` support for `generateText`.
|
824
|
-
|
825
|
-
The finish reason can be `stop` (the model stopped because it generated a stop sequence), `length` (the model stopped because it generated the maximum number of tokens), `content-filter` (the model stopped because the content filter detected a violation), `tool-calls` (the model stopped because it triggered a tool call), `error` (the model stopped because of an error), `other` (the model stopped for another reason), or `unknown` (the model stop reason is not know or the model does not support finish reasons).
|
826
|
-
|
827
|
-
You can extract it from the full response when using `fullResponse: true`:
|
828
|
-
|
829
|
-
```ts
|
830
|
-
const { text, finishReason } = await generateText(
|
831
|
-
openai
|
832
|
-
.ChatTextGenerator({ model: "gpt-3.5-turbo", maxGenerationTokens: 200 })
|
833
|
-
.withTextPrompt(),
|
834
|
-
"Write a short story about a robot learning to love:",
|
835
|
-
{ fullResponse: true }
|
836
|
-
);
|
837
|
-
```
|
838
|
-
|
839
|
-
## v0.102.0 - 2023-12-22
|
840
|
-
|
841
|
-
### Added
|
842
|
-
|
843
|
-
- You can specify `numberOfGenerations` on image generation models and create multiple images by using the `fullResponse: true` option. Example:
|
844
|
-
|
845
|
-
```ts
|
846
|
-
// generate 2 images:
|
847
|
-
const { images } = await generateImage(
|
848
|
-
openai.ImageGenerator({
|
849
|
-
model: "dall-e-3",
|
850
|
-
numberOfGenerations: 2,
|
851
|
-
size: "1024x1024",
|
852
|
-
}),
|
853
|
-
"the wicked witch of the west in the style of early 19th century painting",
|
854
|
-
{ fullResponse: true }
|
855
|
-
);
|
856
|
-
```
|
857
|
-
|
858
|
-
- **breaking change**: Image generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
|
859
|
-
|
860
|
-
## v0.101.0 - 2023-12-22
|
861
|
-
|
862
|
-
### Changed
|
863
|
-
|
864
|
-
- Automatic1111 Stable Diffusion Web UI configuration has separate configuration of host, port, and path.
|
865
|
-
|
866
|
-
### Fixed
|
867
|
-
|
868
|
-
- Automatic1111 Stable Diffusion Web UI uses negative prompt and seed.
|
869
|
-
|
870
|
-
## v0.100.0 - 2023-12-17
|
871
|
-
|
872
|
-
### Added
|
873
|
-
|
874
|
-
- `ollama.ChatTextGenerator` model that calls the Ollama chat API.
|
875
|
-
- Ollama chat messages and prompts are exposed through `ollama.ChatMessage` and `ollama.ChatPrompt`
|
876
|
-
- OpenAI chat messages and prompts are exposed through `openai.ChatMessage` and `openai.ChatPrompt`
|
877
|
-
- Mistral chat messages and prompts are exposed through `mistral.ChatMessage` and `mistral.ChatPrompt`
|
878
|
-
|
879
|
-
### Changed
|
880
|
-
|
881
|
-
- **breaking change**: renamed `ollama.TextGenerator` to `ollama.CompletionTextGenerator`
|
882
|
-
- **breaking change**: renamed `mistral.TextGenerator` to `mistral.ChatTextGenerator`
|
883
|
-
|
884
|
-
## v0.99.0 - 2023-12-16
|
885
|
-
|
886
|
-
### Added
|
887
|
-
|
888
|
-
- You can specify `numberOfGenerations` on text generation models and access multiple generations by using the `fullResponse: true` option. Example:
|
889
|
-
|
890
|
-
```ts
|
891
|
-
// generate 2 texts:
|
892
|
-
const { texts } = await generateText(
|
893
|
-
openai.CompletionTextGenerator({
|
894
|
-
model: "gpt-3.5-turbo-instruct",
|
895
|
-
numberOfGenerations: 2,
|
896
|
-
maxGenerationTokens: 1000,
|
897
|
-
}),
|
898
|
-
"Write a short story about a robot learning to love:\n\n",
|
899
|
-
{ fullResponse: true }
|
900
|
-
);
|
901
|
-
```
|
902
|
-
|
903
|
-
- **breaking change**: Text generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
|
904
|
-
|
905
|
-
### Changed
|
906
|
-
|
907
|
-
- **breaking change**: Renamed `maxCompletionTokens` text generation model setting to `maxGenerationTokens`.
|
908
|
-
|
909
|
-
## v0.98.0 - 2023-12-16
|
910
|
-
|
911
|
-
### Changed
|
912
|
-
|
913
|
-
- **breaking change**: `responseType` option was changed into `fullResponse` option and uses a boolean value to make discovery easy. The response values from the full response have been renamed for clarity. For base64 image generation, you can use the `imageBase64` value from the full response:
|
914
|
-
|
915
|
-
```ts
|
916
|
-
const { imageBase64 } = await generateImage(model, prompt, {
|
917
|
-
fullResponse: true,
|
918
|
-
});
|
919
|
-
```
|
920
|
-
|
921
|
-
### Improved
|
922
|
-
|
923
|
-
- Better docs for the OpenAI chat settings. Thanks [@bearjaws](https://github.com/bearjaws) for the contribution!
|
924
|
-
|
925
|
-
### Fixed
|
926
|
-
|
927
|
-
- Streaming OpenAI chat text generation when setting `n:2` or higher returns only the stream from the first choice.
|
928
|
-
|
929
|
-
## v0.97.0 - 2023-12-14
|
930
|
-
|
931
|
-
### Added
|
932
|
-
|
933
|
-
- **breaking change**: Ollama image (vision) support. This changes the Ollama prompt format. You can add `.withTextPrompt()` to existing Ollama text generators to get a text prompt like before.
|
934
|
-
|
935
|
-
Vision example:
|
936
|
-
|
937
|
-
```ts
|
938
|
-
import { ollama, streamText } from "modelfusion";
|
939
|
-
|
940
|
-
const textStream = await streamText(
|
941
|
-
ollama.TextGenerator({
|
942
|
-
model: "bakllava",
|
943
|
-
maxCompletionTokens: 1024,
|
944
|
-
temperature: 0,
|
945
|
-
}),
|
946
|
-
{
|
947
|
-
prompt: "Describe the image in detail",
|
948
|
-
images: [image], // base-64 encoded png or jpeg
|
949
|
-
}
|
950
|
-
);
|
951
|
-
```
|
952
|
-
|
953
|
-
### Changed
|
954
|
-
|
955
|
-
- **breaking change**: Switch Ollama settings to camelCase to align with the rest of the library.
|
956
|
-
|
957
|
-
## v0.96.0 - 2023-12-14
|
958
|
-
|
959
|
-
### Added
|
960
|
-
|
961
|
-
- [Mistral platform support](https://modelfusion.dev/integration/model-provider/mistral)
|
962
|
-
|
963
|
-
## v0.95.0 - 2023-12-10
|
964
|
-
|
965
|
-
### Added
|
966
|
-
|
967
|
-
- `cachePrompt` parameter for llama.cpp models. Thanks [@djwhitt](https://github.com/djwhitt) for the contribution!
|
968
|
-
|
969
|
-
## v0.94.0 - 2023-12-10
|
970
|
-
|
971
|
-
### Added
|
972
|
-
|
973
|
-
- Prompt template for neural-chat models.
|
974
|
-
|
975
|
-
## v0.93.0 - 2023-12-10
|
976
|
-
|
977
|
-
### Added
|
978
|
-
|
979
|
-
- Optional response prefix for instruction prompts to guide the LLM response.
|
980
|
-
|
981
|
-
### Changed
|
982
|
-
|
983
|
-
- **breaking change**: Renamed prompt format to prompt template to align with the commonly used language (e.g. from model cards).
|
984
|
-
|
985
|
-
## v0.92.1 - 2023-12-10
|
986
|
-
|
987
|
-
### Changed
|
988
|
-
|
989
|
-
- Improved Ollama error handling.
|
990
|
-
|
991
|
-
## v0.92.0 - 2023-12-09
|
992
|
-
|
993
|
-
### Changed
|
994
|
-
|
995
|
-
- **breaking change**: setting global function observers and global logging has changed.
|
996
|
-
You can call methods on a `modelfusion` import:
|
997
|
-
|
998
|
-
```ts
|
999
|
-
import { modelfusion } from "modelfusion";
|
1000
|
-
|
1001
|
-
modelfusion.setLogFormat("basic-text");
|
1002
|
-
```
|
1003
|
-
|
1004
|
-
- Cleaned output when using `detailed-object` log format.
|
1005
|
-
|
1006
|
-
## v0.91.0 - 2023-12-09
|
1007
|
-
|
1008
|
-
### Added
|
1009
|
-
|
1010
|
-
- `Whisper.cpp` [transcription (speech-to-text) model](https://modelfusion.dev/integration/model-provider/whispercpp) support.
|
1011
|
-
|
1012
|
-
```ts
|
1013
|
-
import { generateTranscription, whispercpp } from "modelfusion";
|
1014
|
-
|
1015
|
-
const data = await fs.promises.readFile("data/test.wav");
|
1016
|
-
|
1017
|
-
const transcription = await generateTranscription(whispercpp.Transcriber(), {
|
1018
|
-
type: "wav",
|
1019
|
-
data,
|
1020
|
-
});
|
1021
|
-
```
|
1022
|
-
|
1023
|
-
### Improved
|
1024
|
-
|
1025
|
-
- Better error reporting.
|
1026
|
-
|
1027
|
-
## v0.90.0 - 2023-12-03
|
1028
|
-
|
1029
|
-
### Added
|
1030
|
-
|
1031
|
-
- Temperature and language settings to OpenAI transcription model.
|
1032
|
-
|
1033
|
-
## v0.89.0 - 2023-11-30
|
1034
|
-
|
1035
|
-
### Added
|
1036
|
-
|
1037
|
-
- `maxValuesPerCall` setting for `OpenAITextEmbeddingModel` to enable different configurations, e.g. for Azure. Thanks [@nanotronic](https://github.com/nanotronic) for the contribution!
|
1038
|
-
|
1039
|
-
## v0.88.0 - 2023-11-28
|
1040
|
-
|
1041
|
-
### Added
|
1042
|
-
|
1043
|
-
- Multi-modal chat prompts. Supported by OpenAI vision chat models and by BakLLaVA prompt format.
|
1044
|
-
|
1045
|
-
### Changed
|
1046
|
-
|
1047
|
-
- **breaking change**: renamed `ChatPrompt` to `TextChatPrompt` to distinguish it from multi-modal chat prompts.
|
1048
|
-
|
1049
|
-
## v0.87.0 - 2023-11-27
|
1050
|
-
|
1051
|
-
### Added
|
1052
|
-
|
1053
|
-
- **experimental**: `modelfusion/extension` export with functions and classes that are necessary to implement providers in 3rd party node modules. See [lgrammel/modelfusion-example-provider](https://github.com/lgrammel/modelfusion-example-provider) for an example.
|
1054
|
-
|
1055
|
-
## v0.85.0 - 2023-11-26
|
1056
|
-
|
1057
|
-
### Added
|
1058
|
-
|
1059
|
-
- `OpenAIChatMessage` function call support.
|
1060
|
-
|
1061
|
-
## v0.84.0 - 2023-11-26
|
1062
|
-
|
1063
|
-
### Added
|
1064
|
-
|
1065
|
-
- Support for OpenAI-compatible chat APIs. See [OpenAI Compatible](https://modelfusion.dev/integration/model-provider/openaicompatible) for details.
|
1066
|
-
|
1067
|
-
```ts
|
1068
|
-
import {
|
1069
|
-
BaseUrlApiConfiguration,
|
1070
|
-
openaicompatible,
|
1071
|
-
generateText,
|
1072
|
-
} from "modelfusion";
|
1073
|
-
|
1074
|
-
const text = await generateText(
|
1075
|
-
openaicompatible
|
1076
|
-
.ChatTextGenerator({
|
1077
|
-
api: new BaseUrlApiConfiguration({
|
1078
|
-
baseUrl: "https://api.fireworks.ai/inference/v1",
|
1079
|
-
headers: {
|
1080
|
-
Authorization: `Bearer ${process.env.FIREWORKS_API_KEY}`,
|
1081
|
-
},
|
1082
|
-
}),
|
1083
|
-
model: "accounts/fireworks/models/mistral-7b",
|
1084
|
-
})
|
1085
|
-
.withTextPrompt(),
|
1086
|
-
|
1087
|
-
"Write a story about a robot learning to love"
|
1088
|
-
);
|
1089
|
-
```
|
1090
|
-
|
1091
|
-
## v0.83.0 - 2023-11-26
|
1092
|
-
|
1093
|
-
### Added
|
1094
|
-
|
1095
|
-
- Introduce `uncheckedSchema()` facade function as an easier way to create unchecked ModelFusion schemas. This aligns the API with `zodSchema()`.
|
1096
|
-
|
1097
|
-
### Changed
|
1098
|
-
|
1099
|
-
- **breaking change**: Renamed `InstructionPrompt` interface to `MultiModalInstructionPrompt` to clearly distinguish it from `TextInstructionPrompt`.
|
1100
|
-
- **breaking change**: Renamed `.withBasicPrompt` methods for image generation models to `.withTextPrompt` to align with text generation models.
|
1101
|
-
|
1102
|
-
## v0.82.0 - 2023-11-25
|
1103
|
-
|
1104
|
-
### Added
|
1105
|
-
|
1106
|
-
- Introduce `zodSchema()` facade function as an easier way to create new ModelFusion Zod schemas. This clearly distinguishes it from `ZodSchema` that is also part of the zod library.
|
1107
|
-
|
1108
|
-
## v0.81.0 - 2023-11-25
|
1109
|
-
|
1110
|
-
**breaking change**: `generateStructure` and `streamStructure` redesign. The new API does not require function calling and `StructureDefinition` objects any more. This makes it more flexible and it can be used in 3 ways:
|
1111
|
-
|
1112
|
-
- with OpenAI function calling:
|
1113
|
-
|
1114
|
-
```ts
|
1115
|
-
const model = openai
|
1116
|
-
.ChatTextGenerator({ model: "gpt-3.5-turbo" })
|
1117
|
-
.asFunctionCallStructureGenerationModel({
|
1118
|
-
fnName: "...",
|
1119
|
-
fnDescription: "...",
|
1120
|
-
});
|
1121
|
-
```
|
1122
|
-
|
1123
|
-
- with OpenAI JSON format:
|
1124
|
-
|
1125
|
-
```ts
|
1126
|
-
const model = openai
|
1127
|
-
.ChatTextGenerator({
|
1128
|
-
model: "gpt-4-1106-preview",
|
1129
|
-
temperature: 0,
|
1130
|
-
maxCompletionTokens: 1024,
|
1131
|
-
responseFormat: { type: "json_object" },
|
1132
|
-
})
|
1133
|
-
.asStructureGenerationModel(
|
1134
|
-
jsonStructurePrompt((instruction: string, schema) => [
|
1135
|
-
OpenAIChatMessage.system(
|
1136
|
-
"JSON schema: \n" +
|
1137
|
-
JSON.stringify(schema.getJsonSchema()) +
|
1138
|
-
"\n\n" +
|
1139
|
-
"Respond only using JSON that matches the above schema."
|
1140
|
-
),
|
1141
|
-
OpenAIChatMessage.user(instruction),
|
1142
|
-
])
|
1143
|
-
);
|
1144
|
-
```
|
1145
|
-
|
1146
|
-
- with Ollama (and a capable model, e.g., OpenHermes 2.5):
|
1147
|
-
```ts
|
1148
|
-
const model = ollama
|
1149
|
-
.TextGenerator({
|
1150
|
-
model: "openhermes2.5-mistral",
|
1151
|
-
maxCompletionTokens: 1024,
|
1152
|
-
temperature: 0,
|
1153
|
-
format: "json",
|
1154
|
-
raw: true,
|
1155
|
-
stopSequences: ["\n\n"], // prevent infinite generation
|
1156
|
-
})
|
1157
|
-
.withPromptFormat(ChatMLPromptFormat.instruction())
|
1158
|
-
.asStructureGenerationModel(
|
1159
|
-
jsonStructurePrompt((instruction: string, schema) => ({
|
1160
|
-
system:
|
1161
|
-
"JSON schema: \n" +
|
1162
|
-
JSON.stringify(schema.getJsonSchema()) +
|
1163
|
-
"\n\n" +
|
1164
|
-
"Respond only using JSON that matches the above schema.",
|
1165
|
-
instruction,
|
1166
|
-
}))
|
1167
|
-
);
|
1168
|
-
```
|
1169
|
-
|
1170
|
-
See [generateStructure](https://modelfusion.dev/guide/function/generate-structure) for details on the new API.
|
1171
|
-
|
1172
|
-
## v0.80.0 - 2023-11-24
|
1173
|
-
|
1174
|
-
### Changed
|
1175
|
-
|
1176
|
-
- **breaking change**: Restructured multi-modal instruction prompts and `OpenAIChatMessage.user()`
|
1177
|
-
|
1178
|
-
## v0.79.0 - 2023-11-23
|
1179
|
-
|
1180
|
-
### Added
|
1181
|
-
|
1182
|
-
- Multi-tool usage from open source models
|
1183
|
-
|
1184
|
-
Use `TextGenerationToolCallsOrGenerateTextModel` and related helper methods `.asToolCallsOrTextGenerationModel()` to create custom prompts & parsers.
|
1185
|
-
|
1186
|
-
Examples:
|
1187
|
-
|
1188
|
-
- `examples/basic/src/model-provider/ollama/ollama-use-tools-or-generate-text-openhermes-example.ts`
|
1189
|
-
- `examples/basic/src/model-provider/llamacpp/llamacpp-use-tools-or-generate-text-openhermes-example.ts`
|
1190
|
-
|
1191
|
-
Example prompt format:
|
1192
|
-
|
1193
|
-
- `examples/basic/src/tool/prompts/open-hermes.ts` for OpenHermes 2.5
|
1194
|
-
|
1195
|
-
## v0.78.0 - 2023-11-23
|
1196
|
-
|
1197
|
-
### Removed
|
1198
|
-
|
1199
|
-
- **breaking change**: Removed `FunctionListToolCallPromptFormat`. See `examples/basic/src/model-provide/ollama/ollama-use-tool-mistral-example.ts` for how to implement a `ToolCallPromptFormat` for your tool.
|
1200
|
-
|
1201
|
-
## v0.77.0 - 2023-11-23
|
1202
|
-
|
1203
|
-
### Changed
|
1204
|
-
|
1205
|
-
- **breaking change**: Rename `Speech` to `SpeechGenerator` in facades
|
1206
|
-
- **breaking change**: Rename `Transcription` to `Transcriber` in facades
|
1207
|
-
|
1208
|
-
## v0.76.0 - 2023-11-23
|
1209
|
-
|
1210
|
-
### Added
|
1211
|
-
|
1212
|
-
- Anthropic Claude 2.1 support
|
1213
|
-
|
1214
|
-
## v0.75.0 - 2023-11-22
|
1215
|
-
|
1216
|
-
Introducing model provider facades:
|
1217
|
-
|
1218
|
-
```ts
|
1219
|
-
const image = await generateImage(
|
1220
|
-
openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
|
1221
|
-
"the wicked witch of the west in the style of early 19th century painting"
|
1222
|
-
);
|
1223
|
-
```
|
1224
|
-
|
1225
|
-
### Added
|
1226
|
-
|
1227
|
-
- Model provider facades. You can e.g. use `ollama.TextGenerator(...)` instead of `new OllamaTextGenerationModel(...)`.
|
1228
|
-
|
1229
|
-
### Changed
|
1230
|
-
|
1231
|
-
- **breaking change**: Fixed method name `isParallizable` to `isParallelizable` in `EmbeddingModel`.
|
1232
|
-
|
1233
|
-
### Removed
|
1234
|
-
|
1235
|
-
- **breaking change**: removed `HuggingFaceImageDescriptionModel`. Image description models will be replaced by multi-modal vision models.
|
1236
|
-
|
1237
|
-
## v0.74.1 - 2023-11-22
|
1238
|
-
|
1239
|
-
### Improved
|
1240
|
-
|
1241
|
-
- Increase OpenAI chat streaming resilience.
|
1242
|
-
|
1243
|
-
## v0.74.0 - 2023-11-21
|
1244
|
-
|
1245
|
-
Prompt format and tool calling improvements.
|
1246
|
-
|
1247
|
-
### Added
|
1248
|
-
|
1249
|
-
- text prompt format. Use simple text prompts, e.g. with `OpenAIChatModel`:
|
1250
|
-
```ts
|
1251
|
-
const textStream = await streamText(
|
1252
|
-
new OpenAIChatModel({
|
1253
|
-
model: "gpt-3.5-turbo",
|
1254
|
-
}).withTextPrompt(),
|
1255
|
-
"Write a short story about a robot learning to love."
|
1256
|
-
);
|
1257
|
-
```
|
1258
|
-
- `.withTextPromptFormat` to `LlamaCppTextGenerationModel` for simplified prompt construction:
|
1259
|
-
```ts
|
1260
|
-
const textStream = await streamText(
|
1261
|
-
new LlamaCppTextGenerationModel({
|
1262
|
-
// ...
|
1263
|
-
}).withTextPromptFormat(Llama2PromptFormat.text()),
|
1264
|
-
"Write a short story about a robot learning to love."
|
1265
|
-
);
|
1266
|
-
```
|
1267
|
-
- `.asToolCallGenerationModel()` to `OllamaTextGenerationModel` to simplify tool calls.
|
1268
|
-
|
1269
|
-
### Improved
|
1270
|
-
|
1271
|
-
- better error reporting when using exponent backoff retries
|
1272
|
-
|
1273
|
-
### Removed
|
1274
|
-
|
1275
|
-
- **breaking change**: removed `input` from `InstructionPrompt` (was Alpaca-specific, `AlpacaPromptFormat` still supports it)
|
1276
|
-
|
1277
|
-
## v0.73.1 - 2023-11-19
|
1278
|
-
|
1279
|
-
Remove section newlines from Llama 2 prompt format.
|
1280
|
-
|
1281
|
-
## v0.73.0 - 2023-11-19
|
1282
|
-
|
1283
|
-
Ollama edge case and error handling improvements.
|
1284
|
-
|
1285
|
-
## v0.72.0 - 2023-11-19
|
1286
|
-
|
1287
|
-
**Breaking change**: the tool calling API has been reworked to support multiple parallel tool calls. This required multiple breaking changes (see below). Check out the updated [tools documentation](https://modelfusion.dev/guide/tools/) for details.
|
1288
|
-
|
1289
|
-
### Changed
|
1290
|
-
|
1291
|
-
- `Tool` has `parameters` and `returnType` schemas (instead of `inputSchema` and `outputSchema`).
|
1292
|
-
- `useTool` uses `generateToolCall` under the hood. The return value and error handling has changed.
|
1293
|
-
- `useToolOrGenerateText` has been renamed to `useToolsOrGenerateText`. It uses `generateToolCallsOrText` under the hood. The return value and error handling has changed. It can invoke several tools in parallel and returns an array of tool results.
|
1294
|
-
- The `maxRetries` parameter in `guard` has been replaced by a `maxAttempt` parameter.
|
1295
|
-
|
1296
|
-
### Removed
|
1297
|
-
|
1298
|
-
- `generateStructureOrText` has been removed.
|
1299
|
-
|
1300
|
-
## v0.71.0 - 2023-11-17
|
1301
|
-
|
1302
|
-
### Added
|
1303
|
-
|
1304
|
-
- Experimental generateToolCallsOrText function for generating a multiple parallel tool call using the OpenAI chat/tools API.
|
1305
|
-
|
1306
|
-
## v0.70.0 - 2023-11-16
|
1307
|
-
|
1308
|
-
### Added
|
1309
|
-
|
1310
|
-
- ChatML prompt format.
|
1311
|
-
|
1312
|
-
### Changed
|
1313
|
-
|
1314
|
-
- **breaking change**: `ChatPrompt` structure and terminology has changed to align more closely with OpenAI and similar chat prompts. This is also in preparation for integrating images and function calls results into chat prompts.
|
1315
|
-
- **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-styles) for documentation of the new prompt formats.
|
1316
|
-
|
1317
|
-
## v0.69.0 - 2023-11-15
|
1318
|
-
|
1319
|
-
### Added
|
1320
|
-
|
1321
|
-
- Experimental generateToolCall function for generating a single tool call using the OpenAI chat/tools API.
|
1322
|
-
|
1323
|
-
## v0.68.0 - 2023-11-14
|
1324
|
-
|
1325
|
-
### Changed
|
1326
|
-
|
1327
|
-
- Refactored JSON parsing to use abstracted schemas. You can use `parseJSON` and `safeParseJSON` to securely parse JSON objects and optionally type-check them using any schema (e.g. a Zod schema).
|
1328
|
-
|
1329
|
-
## v0.67.0 - 2023-11-12
|
1330
|
-
|
1331
|
-
### Added
|
1332
|
-
|
1333
|
-
- Ollama 0.1.9 support: `format` (for forcing JSON output) and `raw` settings
|
1334
|
-
- Improved Ollama settings documentation
|
1335
|
-
|
1336
|
-
## v0.66.0 - 2023-11-12
|
1337
|
-
|
1338
|
-
### Added
|
1339
|
-
|
1340
|
-
- Support for fine-tuned OpenAI `gpt-4-0613` models
|
1341
|
-
- Support for `trimWhitespace` model setting in `streamText` calls
|
1342
|
-
|
1343
|
-
## v0.65.0 - 2023-11-12
|
1344
|
-
|
1345
|
-
### Added
|
1346
|
-
|
1347
|
-
- Image support for `OpenAIChatMessage.user`
|
1348
|
-
- `mapInstructionPromptToBakLLaVA1ForLlamaCppFormat` prompt format
|
1349
|
-
|
1350
|
-
### Changed
|
1351
|
-
|
1352
|
-
- **breaking change**: `VisionInstructionPrompt` was replaced by an optional `image` field in `InstructionPrompt`.
|
1353
|
-
|
1354
|
-
## v0.64.0 - 2023-11-11
|
1355
|
-
|
1356
|
-
### Added
|
1357
|
-
|
1358
|
-
- Support for OpenAI vision model.
|
1359
|
-
- Example: `examples/basic/src/model-provider/openai/openai-chat-stream-text-vision-example.ts`
|
1360
|
-
|
1361
|
-
## v0.63.0 - 2023-11-08
|
1362
|
-
|
1363
|
-
### Added
|
1364
|
-
|
1365
|
-
- Support for OpenAI chat completion `seed` and `responseFormat` options.
|
1366
|
-
|
1367
|
-
## v0.62.0 - 2023-11-08
|
1368
|
-
|
1369
|
-
### Added
|
1370
|
-
|
1371
|
-
- OpenAI speech generation support. Shoutout to [@bjsi](https://github.com/bjsi) for the awesome contribution!
|
1372
|
-
|
1373
|
-
## v0.61.0 - 2023-11-07
|
1374
|
-
|
1375
|
-
### Added
|
1376
|
-
|
1377
|
-
- OpenAI `gpt-3.5-turbo-1106`, `gpt-4-1106-preview`, `gpt-4-vision-preview` chat models.
|
1378
|
-
- OpenAI `Dalle-E-3` image model.
|
1379
|
-
|
1380
|
-
### Changed
|
1381
|
-
|
1382
|
-
- **breaking change**: `OpenAIImageGenerationModel` requires a `model` parameter.
|
1383
|
-
|
1384
|
-
## v0.60.0 - 2023-11-06
|
1385
|
-
|
1386
|
-
### Added
|
1387
|
-
|
1388
|
-
- Support image input for multi-modal Llama.cpp models (e.g. Llava, Bakllava).
|
1389
|
-
|
1390
|
-
### Changed
|
1391
|
-
|
1392
|
-
- **breaking change**: Llama.cpp prompt format has changed to support images. Use `.withTextPrompt()` to get a text prompt format.
|
1393
|
-
|
1394
|
-
## v0.59.0 - 2023-11-06
|
1395
|
-
|
1396
|
-
### Added
|
1397
|
-
|
1398
|
-
- ElevenLabs `eleven_turbo_v2` support.
|
1399
|
-
|
1400
|
-
## v0.58 - 2023-11-05
|
1401
|
-
|
1402
|
-
### Fixed
|
1403
|
-
|
1404
|
-
- **breaking change**: Uncaught errors were caused by custom Promises. ModelFusion uses only standard Promises. To get full responses from model function, you need to use the `{ returnType: "full" }` option instead of calling `.asFullResponse()` on the result.
|
1405
|
-
|
1406
|
-
## v0.57.1 - 2023-11-05
|
1407
|
-
|
1408
|
-
### Improved
|
1409
|
-
|
1410
|
-
- ModelFusion server error logging and reporting.
|
1411
|
-
|
1412
|
-
### Fixed
|
1413
|
-
|
1414
|
-
- ModelFusion server creates directory for runs automatically when errors are thrown.
|
1415
|
-
|
1416
|
-
## v0.57.0 - 2023-11-04
|
1417
|
-
|
1418
|
-
### Added
|
1419
|
-
|
1420
|
-
- Support for [Cohere v3 embeddings](https://txt.cohere.com/introducing-embed-v3/).
|
1421
|
-
|
1422
|
-
## v0.56.0 - 2023-11-04
|
1423
|
-
|
1424
|
-
### Added
|
1425
|
-
|
1426
|
-
- [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text embeddings.
|
1427
|
-
|
1428
|
-
## v0.55.1 - 2023-11-04
|
1429
|
-
|
1430
|
-
### Fixed
|
1431
|
-
|
1432
|
-
- Llama.cpp embeddings are invoked sequentially to avoid rejection by the server.
|
1433
|
-
|
1434
|
-
## v0.55.0 - 2023-11-04
|
1435
|
-
|
1436
|
-
### Added
|
1437
|
-
|
1438
|
-
- [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text generation and text streaming.
|
1439
|
-
|
1440
|
-
## v0.54.0 - 2023-10-29
|
1441
|
-
|
1442
|
-
Adding experimental ModelFusion server, flows, and browser utils.
|
1443
|
-
|
1444
|
-
### Added
|
1445
|
-
|
1446
|
-
- ModelFusion server (separate export 'modelfusion/server') with a Fastify plugin for running ModelFusion flows on a server.
|
1447
|
-
- ModelFusion flows.
|
1448
|
-
- ModelFusion browser utils (separate export 'modelfusion/browser') for dealing with audio data and invoking ModelFusion flows on the server (`invokeFlow`).
|
1449
|
-
|
1450
|
-
### Changed
|
1451
|
-
|
1452
|
-
- **breaking change**: `readEventSource` and `readEventSourceStream` are part of 'modelfusion/browser'.
|
1453
|
-
|
1454
|
-
## v0.53.2 - 2023-10-26
|
1455
|
-
|
1456
|
-
### Added
|
1457
|
-
|
1458
|
-
- Prompt callback option for `streamStructure`
|
1459
|
-
|
1460
|
-
### Improved
|
1461
|
-
|
1462
|
-
- Inline JSDoc comments for the model functions.
|
1463
|
-
|
1464
|
-
## v0.53.1 - 2023-10-25
|
1465
|
-
|
1466
|
-
### Fixed
|
1467
|
-
|
1468
|
-
- Abort signals and errors during streaming are caught and forwarded correctly.
|
1469
|
-
|
1470
|
-
## v0.53.0 - 2023-10-23
|
1471
|
-
|
1472
|
-
### Added
|
1473
|
-
|
1474
|
-
- `executeFunction` utility function for tracing execution time, parameters, and result of composite functions and non-ModelFusion functions.
|
1475
|
-
|
1476
|
-
## v0.52.0 - 2023-10-23
|
1477
|
-
|
1478
|
-
### Changed
|
1479
|
-
|
1480
|
-
- Streaming results and `AsyncQueue` objects can be used by several consumers. Each consumer will receive all values. This means that you can e.g. forward the same text stream to speech generation and the client.
|
1481
|
-
|
1482
|
-
## v0.51.0 - 2023-10-23
|
1483
|
-
|
1484
|
-
ElevenLabs improvements.
|
1485
|
-
|
1486
|
-
### Added
|
1487
|
-
|
1488
|
-
- ElevenLabs model settings `outputFormat` and `optimizeStreamingLatency`.
|
1489
|
-
|
1490
|
-
### Fixed
|
1491
|
-
|
1492
|
-
- Default ElevenLabs model is `eleven_monolingual_v1`.
|
1493
|
-
|
1494
|
-
## v0.50.0 - 2023-10-22
|
1495
|
-
|
1496
|
-
### Added
|
1497
|
-
|
1498
|
-
- `parentCallId` event property
|
1499
|
-
- Tracing for `useTool`, `useToolOrGenerateText`, `upsertIntoVectorIndex`, and `guard`
|
1500
|
-
|
1501
|
-
### Changed
|
1502
|
-
|
1503
|
-
- **breaking change**: rename `embedding` event type to `embed`
|
1504
|
-
- **breaking change**: rename `image-generation` event type to `generate-image`
|
1505
|
-
- **breaking change**: rename `speech-generation` event type to `generate-speech`
|
1506
|
-
- **breaking change**: rename `speech-streaming` event type to `stream-speech`
|
1507
|
-
- **breaking change**: rename `structure-generation` event type to `generate-structure`
|
1508
|
-
- **breaking change**: rename `structure-or-text-generation` event type to `generate-structure-or-text`
|
1509
|
-
- **breaking change**: rename `structure-streaming` event type to `stream-structure`
|
1510
|
-
- **breaking change**: rename `text-generation` event type to `generate-text`
|
1511
|
-
- **breaking change**: rename `text-streaming` event type to `stream-text`
|
1512
|
-
- **breaking change**: rename `transcription` event type to `generate-transcription`
|
1513
|
-
|
1514
|
-
## v0.49.0 - 2023-10-21
|
1515
|
-
|
1516
|
-
### Added
|
1517
|
-
|
1518
|
-
- Speech synthesis streaming supports string inputs.
|
1519
|
-
- Observability for speech synthesis streaming.
|
1520
|
-
|
1521
|
-
### Changed
|
1522
|
-
|
1523
|
-
- **breaking change**: split `synthesizeSpeech` into `generateSpeech` and `streamSpeech` functions
|
1524
|
-
- **breaking change**: renamed `speech-synthesis` event to `speech-generation`
|
1525
|
-
- **breaking change**: renamed `transcribe` to `generateTranscription`
|
1526
|
-
- **breaking change**: renamed `LmntSpeechSynthesisModel` to `LmntSpeechModel`
|
1527
|
-
- **breaking change**: renamed `ElevenLabesSpeechSynthesisModel` to `ElevenLabsSpeechModel`
|
1528
|
-
- **breaking change**: renamed `OpenAITextGenerationModel` to `OpenAICompletionModel`
|
1529
|
-
|
1530
|
-
### Removed
|
1531
|
-
|
1532
|
-
- **breaking change**: `describeImage` model function. Use `generateText` instead (with e.g. `HuggingFaceImageDescriptionModel`).
|
1533
|
-
|
1534
|
-
## v0.48.0 - 2023-10-20
|
1535
|
-
|
1536
|
-
### Added
|
1537
|
-
|
1538
|
-
- Duplex streaming for speech synthesis.
|
1539
|
-
- Elevenlabs duplex streaming support.
|
1540
|
-
|
1541
|
-
### Changed
|
1542
|
-
|
1543
|
-
- Schema is using data in return type (breaking change for tools).
|
1544
|
-
|
1545
|
-
## v0.47.0 - 2023-10-14
|
1546
|
-
|
1547
|
-
### Added
|
1548
|
-
|
1549
|
-
- Prompt formats for image generation. You can use `.withPromptFormat()` or `.withBasicPrompt()` to apply a prompt format to an image generation model.
|
1550
|
-
|
1551
|
-
### Changed
|
1552
|
-
|
1553
|
-
- **breaking change**: `generateImage` returns a Buffer with the binary image data instead of a base-64 encoded string. You can call `.asBase64Text()` on the response to get a base64 encoded string.
|
1554
|
-
|
1555
|
-
## v0.46.0 - 2023-10-14
|
1556
|
-
|
1557
|
-
### Added
|
1558
|
-
|
1559
|
-
- `.withChatPrompt()` and `.withInstructionPrompt()` shorthand methods.
|
1560
|
-
|
1561
|
-
## v0.45.0 - 2023-10-14
|
1562
|
-
|
1563
|
-
### Changed
|
1564
|
-
|
1565
|
-
- Updated Zod to 3.22.4. You need to use Zod 3.22.4 or higher in your project.
|
1566
|
-
|
1567
|
-
## v0.44.0 - 2023-10-13
|
1568
|
-
|
1569
|
-
### Added
|
1570
|
-
|
1571
|
-
- Store runs in AsyncLocalStorage for convienience (Node.js only).
|
1572
|
-
|
1573
|
-
## v0.43.0 - 2023-10-12
|
1574
|
-
|
1575
|
-
### Added
|
1576
|
-
|
1577
|
-
- Guard function.
|
1578
|
-
|
1579
|
-
## v0.42.0 - 2023-10-11
|
1580
|
-
|
1581
|
-
### Added
|
1582
|
-
|
1583
|
-
- Anthropic model support (Claude 2, Claude instant).
|
1584
|
-
|
1585
|
-
## v0.41.0 - 2023-10-05
|
1586
|
-
|
1587
|
-
### Changed
|
1588
|
-
|
1589
|
-
**breaking change**: generics simplification to enable dynamic model usage. Models can be used more easily as function parameters.
|
1590
|
-
|
1591
|
-
- `output` renamed to `value` in `asFullResponse()`
|
1592
|
-
- model settings can no longer be configured as a model options parameter. Use `.withSettings()` instead.
|
1593
|
-
|
1594
|
-
## v0.40.0 - 2023-10-04
|
1595
|
-
|
1596
|
-
### Changed
|
1597
|
-
|
1598
|
-
**breaking change**: moved Pinecone integration into `@modelfusion/pinecone` module.
|
1599
|
-
|
1600
|
-
## v0.39.0 - 2023-10-03
|
1601
|
-
|
1602
|
-
### Added
|
1603
|
-
|
1604
|
-
- `readEventSource` for parsing a server-sent event stream using the JavaScript EventSource.
|
1605
|
-
|
1606
|
-
### Changed
|
1607
|
-
|
1608
|
-
**breaking change**: generalization to use Schema instead of Zod.
|
1609
|
-
|
1610
|
-
- `MemoryVectorIndex.deserialize` requires a `Schema`, e.g. `new ZodSchema` (from ModelFusion).
|
1611
|
-
- `readEventSourceStream` requires a `Schema`.
|
1612
|
-
- `UncheckedJsonSchema[Schema/StructureDefinition]` renamed to `Unchecked[Schema/StructureDefinition]`.
|
1613
|
-
|
1614
|
-
## v0.38.0 - 2023-10-02
|
1615
|
-
|
1616
|
-
### Changed
|
1617
|
-
|
1618
|
-
**breaking change**: Generalized embeddings beyond text embedding.
|
1619
|
-
|
1620
|
-
- `embedText` renamed to `embed`.
|
1621
|
-
- `embedTexts` renamed to `embedMany`
|
1622
|
-
- Removed filtering from `VectorIndexRetriever` query (still available as a setting).
|
1623
|
-
|
1624
|
-
## v0.37.0 - 2023-10-02
|
1625
|
-
|
1626
|
-
### Added
|
1627
|
-
|
1628
|
-
- `VectorIndexRetriever` supports a filter option that is passed to the vector index.
|
1629
|
-
- `MemoryVectorIndex` supports filter functions that are applied to the objects before calculating the embeddings.
|
1630
|
-
|
1631
|
-
## v0.36.0 - 2023-10-02
|
1632
|
-
|
1633
|
-
### Added
|
1634
|
-
|
1635
|
-
- `basic-text` logger logs function ids when available.
|
1636
|
-
- `retrieve` produces events for logging and observability.
|
1637
|
-
|
1638
|
-
## v0.35.2 - 2023-09-27
|
1639
|
-
|
1640
|
-
### Fixed
|
1641
|
-
|
1642
|
-
- Support empty stop sequences when calling OpenAI text and chat models.
|
1643
|
-
|
1644
|
-
## v0.35.1 - 2023-09-27
|
1645
|
-
|
1646
|
-
### Fixed
|
1647
|
-
|
1648
|
-
- Fixed bugs in `streamStructure` partial JSON parsing.
|
1649
|
-
|
1650
|
-
## v0.35.0 - 2023-09-26
|
1651
|
-
|
1652
|
-
### Added
|
1653
|
-
|
1654
|
-
- `streamStructure` for streaming structured responses, e.g. from OpenAI function calls. Thanks [@bjsi](https://github.com/bjsi) for the input!
|
1655
|
-
|
1656
|
-
## v0.34.0 - 2023-09-25
|
1657
|
-
|
1658
|
-
### Added
|
1659
|
-
|
1660
|
-
- First version of event source utilities: `AsyncQueue`, `createEventSourceStream`, `readEventSourceStream`.
|
1661
|
-
|
1662
|
-
## v0.33.1 - 2023-09-24
|
1663
|
-
|
1664
|
-
### Fixed
|
1665
|
-
|
1666
|
-
- Remove resolution part from type definitions.
|
1667
|
-
|
1668
|
-
## v0.33.0 - 2023-09-19
|
1669
|
-
|
1670
|
-
### Changed
|
1671
|
-
|
1672
|
-
**breaking change**: Generalized vector store upsert/retrieve beyond text chunks:
|
1673
|
-
|
1674
|
-
- `upsertTextChunks` renamed to `upsertIntoVectorStore`. Syntax has changed.
|
1675
|
-
- `retrieveTextChunks` renamed to `retrieve`
|
1676
|
-
- `SimilarTextChunksFromVectorIndexRetriever` renamed to `VectorIndexRetriever`
|
1677
|
-
|
1678
|
-
## v0.32.0 - 2023-09-19
|
1679
|
-
|
1680
|
-
### Added
|
1681
|
-
|
1682
|
-
- OpenAI gpt-3.5-turbo-instruct model support.
|
1683
|
-
- Autocomplete for Stability AI models (thanks [@Danielwinkelmann](https://github.com/Danielwinkelmann)!)
|
1684
|
-
|
1685
|
-
### Changed
|
1686
|
-
|
1687
|
-
- Downgrade Zod version to 3.21.4 because of https://github.com/colinhacks/zod/issues/2697
|
1688
|
-
|
1689
|
-
## v0.31.0 - 2023-09-13
|
1690
|
-
|
1691
|
-
### Changed
|
1692
|
-
|
1693
|
-
- **breaking change**: Renamed chat format construction functions to follow the pattern `map[Chat|Instruction]PromptTo[FORMAT]Format()`, e.g. `mapInstructionPromptToAlpacaFormat()`, for easy auto-completion.
|
1694
|
-
|
1695
|
-
### Removed
|
1696
|
-
|
1697
|
-
- **breaking change**: The prompts for `generateStructure` and `generateStructureOrText` have been simplified. You can remove the `OpenAIChatPrompt.forStructureCurried` (and similar) parts.
|
1698
|
-
|
1699
|
-
## v0.30.0 - 2023-09-10
|
1700
|
-
|
1701
|
-
### Added
|
1702
|
-
|
1703
|
-
- You can directly pass JSON schemas into `generateStructure` and `generateStructureOrText` calls without validation using `UncheckedJsonSchemaStructureDefinition`. This is useful when you need more flexility and don't require type inference. See `examples/basic/src/util/schema/generate-structure-unchecked-json-schema-example.ts`.
|
1704
|
-
|
1705
|
-
### Changed
|
1706
|
-
|
1707
|
-
- **BREAKING CHANGE**: renamed `generateJson` and `generateJsonOrText` to `generateStructure` and `generateStructureOrText`.
|
1708
|
-
- **BREAKING CHANGE**: introduced `ZodSchema` and `ZodStructureDefinition`. These are required for `generateStructure` and `generateStructureOrText` calls and in tools.
|
1709
|
-
- **BREAKING CHANGE**: renamed the corresponding methods and objects.
|
1710
|
-
|
1711
|
-
Why this breaking change?
|
1712
|
-
|
1713
|
-
ModelFusion is currently tied to Zod, but there are many other type checking libraries out there, and Zod does not map perfectly to JSON Schema (which is used in OpenAI function calling).
|
1714
|
-
Enabling you to use JSON Schema directly in ModelFusion is a first step towards decoupling ModelFusion from Zod.
|
1715
|
-
You can also configure your own schema adapters that e.g. use Ajv or another library.
|
1716
|
-
Since this change already affected all JSON generation calls and tools, I included other changes that I had planned in the same area (e.g., renaming to generateStructure and making it more consistent).
|
1717
|
-
|
1718
|
-
## v0.29.0 - 2023-09-09
|
1719
|
-
|
1720
|
-
### Added
|
1721
|
-
|
1722
|
-
- `describeImage` model function for image captioning and OCR. HuggingFace provider available.
|
1723
|
-
|
1724
|
-
## v0.28.0 - 2023-09-09
|
1725
|
-
|
1726
|
-
### Added
|
1727
|
-
|
1728
|
-
- BaseUrlApiConfiguration class for setting up API configurations with custom base URLs and headers.
|
1729
|
-
|
1730
|
-
## v0.27.0 - 2023-09-07
|
1731
|
-
|
1732
|
-
### Added
|
1733
|
-
|
1734
|
-
- Support for running OpenAI on Microsoft Azure.
|
1735
|
-
|
1736
|
-
### Changed
|
1737
|
-
|
1738
|
-
- **Breaking change**: Introduce API configuration. This affects setting the baseUrl, throttling, and retries.
|
1739
|
-
- Improved Helicone support via `HeliconeOpenAIApiConfiguration`.
|
1740
|
-
|
1741
|
-
## v0.26.0 - 2023-09-06
|
1742
|
-
|
1743
|
-
### Added
|
1744
|
-
|
1745
|
-
- LMNT speech synthesis support.
|
1746
|
-
|
1747
|
-
## v0.25.0 - 2023-09-05
|
1748
|
-
|
1749
|
-
### Changed
|
1750
|
-
|
1751
|
-
- Separated cost calculation from Run.
|
1752
|
-
|
1753
|
-
## v0.24.1 - 2023-09-04
|
1754
|
-
|
1755
|
-
### Added
|
1756
|
-
|
1757
|
-
- Exposed `logitBias` setting for OpenAI chat and text generation models.
|
1758
|
-
|
1759
|
-
## v0.24.0 - 2023-09-02
|
1760
|
-
|
1761
|
-
### Added
|
1762
|
-
|
1763
|
-
- Support for fine-tuned OpenAI models (for the `davinci-002`, `babbage-002`, and `gpt-3.5-turbo` base models).
|
1764
|
-
|
1765
|
-
## v0.23.0 - 2023-08-31
|
1766
|
-
|
1767
|
-
### Added
|
1768
|
-
|
1769
|
-
- Function logging support.
|
1770
|
-
- Usage information for events.
|
1771
|
-
- Filtering of model settings for events.
|
1772
|
-
|
1773
|
-
## v0.22.0 - 2023-08-28
|
1774
|
-
|
1775
|
-
### Changed
|
1776
|
-
|
1777
|
-
- **Breaking change**: Restructured the function call events.
|
1778
|
-
|
1779
|
-
## v0.21.0 - 2023-08-26
|
1780
|
-
|
1781
|
-
### Changed
|
1782
|
-
|
1783
|
-
- **Breaking change**: Reworked the function observer system. See [Function observers](https://modelfusion.dev/guide/util/observer) for details on how to use the new system.
|
1784
|
-
|
1785
|
-
## v0.20.0 - 2023-08-24
|
1786
|
-
|
1787
|
-
### Changed
|
1788
|
-
|
1789
|
-
- **Breaking change**: Use `.asFullResponse()` to get full responses from model functions (replaces the `fullResponse: true` option).
|
1790
|
-
|
1791
|
-
## v0.19.0 - 2023-08-23
|
1792
|
-
|
1793
|
-
### Added
|
1794
|
-
|
1795
|
-
- Support for "babbage-002" and "davinci-002" OpenAI base models.
|
1796
|
-
|
1797
|
-
### Fixed
|
1798
|
-
|
1799
|
-
- Choose correct tokenizer for older OpenAI text models.
|
1800
|
-
|
1801
|
-
## v0.18.0 - 2023-08-22
|
1802
|
-
|
1803
|
-
### Added
|
1804
|
-
|
1805
|
-
- Support for ElevenLabs speech synthesis parameters.
|
1806
|
-
|
1807
|
-
## v0.17.0 - 2023-08-21
|
1808
|
-
|
1809
|
-
### Added
|
1810
|
-
|
1811
|
-
- `generateSpeech` function to generate speech from text.
|
1812
|
-
- ElevenLabs support.
|
1813
|
-
|
1814
|
-
## v0.15.0 - 2023-08-21
|
1815
|
-
|
1816
|
-
### Changed
|
1817
|
-
|
1818
|
-
- Introduced unified `stopSequences` and `maxCompletionTokens` properties for all text generation models. **Breaking change**: `maxCompletionTokens` and `stopSequences` are part of the base TextGenerationModel. Specific names for these properties in models have been replaced by this, e.g. `maxTokens` in OpenAI models is `maxCompletionTokens`.
|
1819
|
-
|
1820
|
-
## v0.14.0 - 2023-08-17
|
1821
|
-
|
1822
|
-
### Changed
|
1823
|
-
|
1824
|
-
- **Breaking change**: Renamed prompt mappings (and related code) to prompt format.
|
1825
|
-
- Improved type inference for WebSearchTool and executeTool.
|
1826
|
-
|
1827
|
-
## v0.12.0 - 2023-08-15
|
1828
|
-
|
1829
|
-
### Added
|
1830
|
-
|
1831
|
-
- JsonTextGenerationModel and InstructionWithSchemaPrompt to support generateJson on text generation models.
|
1832
|
-
|
1833
|
-
## v0.11.0 - 2023-08-14
|
1834
|
-
|
1835
|
-
### Changed
|
1836
|
-
|
1837
|
-
- WebSearchTool signature updated.
|
1838
|
-
|
1839
|
-
## v0.10.0 - 2023-08-13
|
1840
|
-
|
1841
|
-
### Added
|
1842
|
-
|
1843
|
-
- Convenience functions to create OpenAI chat messages from tool calls and results.
|
1844
|
-
|
1845
|
-
## v0.9.0 - 2023-08-13
|
1846
|
-
|
1847
|
-
### Added
|
1848
|
-
|
1849
|
-
- `WebSearchTool` definition to support the SerpAPI tool (separate package: `@modelfusion/serpapi-tools`)
|
1850
|
-
|
1851
|
-
## v0.8.0 - 2023-08-12
|
1852
|
-
|
1853
|
-
### Added
|
1854
|
-
|
1855
|
-
- `executeTool` function that directly executes a single tool and records execution metadata.
|
1856
|
-
|
1857
|
-
### Changed
|
1858
|
-
|
1859
|
-
- Reworked event system and introduced RunFunctionEvent.
|
1860
|
-
|
1861
|
-
## v0.7.0 - 2023-08-10
|
1862
|
-
|
1863
|
-
### Changed
|
1864
|
-
|
1865
|
-
- **Breaking change**: Model functions return a simple object by default to make the 95% use case easier. You can use the `fullResponse` option to get a richer response object that includes the original model response and metadata.
|
1866
|
-
|
1867
|
-
## v0.6.0 - 2023-08-07
|
1868
|
-
|
1869
|
-
### Added
|
1870
|
-
|
1871
|
-
- `splitTextChunk` function.
|
1872
|
-
|
1873
|
-
### Changed
|
1874
|
-
|
1875
|
-
- **Breaking change**: Restructured text splitter functions.
|
1876
|
-
|
1877
|
-
## v0.5.0 - 2023-08-07
|
1878
|
-
|
1879
|
-
### Added
|
1880
|
-
|
1881
|
-
- `splitTextChunks` function.
|
1882
|
-
- Chat with PDF demo.
|
1883
|
-
|
1884
|
-
### Changed
|
1885
|
-
|
1886
|
-
- **Breaking change**: Renamed VectorIndexSimilarTextChunkRetriever to SimilarTextChunksFromVectorIndexRetriever.
|
1887
|
-
- **Breaking change**: Renamed 'content' property in TextChunk to 'text.
|
1888
|
-
|
1889
|
-
### Removed
|
1890
|
-
|
1891
|
-
- `VectorIndexTextChunkStore`
|
1892
|
-
|
1893
|
-
## v0.4.1 - 2023-08-06
|
1894
|
-
|
1895
|
-
### Fixed
|
1896
|
-
|
1897
|
-
- Type inference bug in `trimChatPrompt`.
|
1898
|
-
|
1899
|
-
## v0.4.0 - 2023-08-06
|
1900
|
-
|
1901
|
-
### Added
|
1902
|
-
|
1903
|
-
- HuggingFace text embedding support.
|
1904
|
-
|
1905
|
-
## v0.3.0 - 2023-08-05
|
1906
|
-
|
1907
|
-
### Added
|
1908
|
-
|
1909
|
-
- Helicone observability integration.
|
1910
|
-
|
1911
|
-
## v0.2.0 - 2023-08-04
|
1912
|
-
|
1913
|
-
### Added
|
1914
|
-
|
1915
|
-
- Instruction prompts can contain optional `input` property.
|
1916
|
-
- Alpaca instruction prompt mapping.
|
1917
|
-
- Vicuna chat prompt mapping.
|
1918
|
-
|
1919
|
-
## v0.1.1 - 2023-08-02
|
1920
|
-
|
1921
|
-
### Changed
|
1922
|
-
|
1923
|
-
- Docs updated to ModelFusion.
|
1924
|
-
|
1925
|
-
## v0.1.0 - 2023-08-01
|
1926
|
-
|
1927
|
-
### Changed
|
1928
|
-
|
1929
|
-
- **Breaking Change**: Renamed to `modelfusion` (from `ai-utils.js`).
|
1930
|
-
|
1931
|
-
## v0.0.43 - 2023-08-01
|
1932
|
-
|
1933
|
-
### Changed
|
1934
|
-
|
1935
|
-
- **Breaking Change**: model functions return rich objects that include the result, the model response and metadata. This enables you to access the original model response easily when you need it and also use the metadata outside of runs.
|
1936
|
-
|
1937
|
-
## v0.0.42 - 2023-07-31
|
1938
|
-
|
1939
|
-
### Added
|
1940
|
-
|
1941
|
-
- `trimChatPrompt()` function to fit chat prompts into the context window and leave enough space for the completion.
|
1942
|
-
- `maxCompletionTokens` property on TextGenerationModels.
|
1943
|
-
|
1944
|
-
### Changed
|
1945
|
-
|
1946
|
-
- Renamed `withMaxTokens` to `withMaxCompletionTokens` on TextGenerationModels.
|
1947
|
-
|
1948
|
-
### Removed
|
1949
|
-
|
1950
|
-
- `composeRecentMessagesOpenAIChatPrompt` function (use `trimChatPrompt` instead).
|
1951
|
-
|
1952
|
-
## v0.0.41 - 2023-07-30
|
1953
|
-
|
1954
|
-
### Added
|
1955
|
-
|
1956
|
-
- ChatPrompt concept (with chat prompt mappings for text, OpenAI chat, and Llama 2 prompts).
|
1957
|
-
|
1958
|
-
### Changed
|
1959
|
-
|
1960
|
-
- Renamed prompt mappings and changed into functions.
|
1961
|
-
|
1962
|
-
## v0.0.40 - 2023-07-30
|
1963
|
-
|
1964
|
-
### Added
|
1965
|
-
|
1966
|
-
- Prompt mapping support for text generation and streaming.
|
1967
|
-
- Added instruction prompt concept and mapping.
|
1968
|
-
- Option to specify context window size for Llama.cpp text generation models.
|
1969
|
-
|
1970
|
-
### Changed
|
1971
|
-
|
1972
|
-
- Renamed 'maxTokens' to 'contextWindowSize' where applicable.
|
1973
|
-
- Restructured how tokenizers are exposed by text generation models.
|
1974
|
-
|
1975
|
-
## v0.0.39 - 2023-07-26
|
1976
|
-
|
1977
|
-
### Added
|
1978
|
-
|
1979
|
-
- llama.cpp embedding support.
|
1980
|
-
|
1981
|
-
## v0.0.38 - 2023-07-24
|
1982
|
-
|
1983
|
-
### Changed
|
1984
|
-
|
1985
|
-
- `zod` and `zod-to-json-schema` are peer dependencies and no longer included in the package.
|
1986
|
-
|
1987
|
-
## v0.0.37 - 2023-07-23
|
1988
|
-
|
1989
|
-
### Changed
|
1990
|
-
|
1991
|
-
- `generateJsonOrText`, `useToolOrGenerateText`, `useTool` return additional information in the response (e.g. the parameters and additional text).
|
1992
|
-
|
1993
|
-
## v0.0.36 - 2023-07-23
|
1994
|
-
|
1995
|
-
### Changed
|
1996
|
-
|
1997
|
-
- Renamed `callTool` to `useTool` and `callToolOrGenerateText` to `useToolOrGenerateText`.
|
1998
|
-
|
1999
|
-
## v0.0.35 - 2023-07-22
|
2000
|
-
|
2001
|
-
### Added
|
2002
|
-
|
2003
|
-
- `generateJsonOrText`
|
2004
|
-
- Tools: `Tool` class, `callTool`, `callToolOrGenerateText`
|
2005
|
-
|
2006
|
-
### Changed
|
2007
|
-
|
2008
|
-
- Restructured "generateJson" arguments.
|
2009
|
-
|
2010
|
-
## v0.0.34 - 2023-07-18
|
2011
|
-
|
2012
|
-
### Removed
|
2013
|
-
|
2014
|
-
- `asFunction` model function variants. Use JavaScript lamba functions instead.
|
2015
|
-
|
2016
|
-
## v0.0.33 - 2023-07-18
|
2017
|
-
|
2018
|
-
### Added
|
2019
|
-
|
2020
|
-
- OpenAIChatAutoFunctionPrompt to call the OpenAI functions API with multiple functions in 'auto' mode.
|
2021
|
-
|
2022
|
-
## v0.0.32 - 2023-07-15
|
2023
|
-
|
2024
|
-
### Changed
|
2025
|
-
|
2026
|
-
- Changed the prompt format of the generateJson function.
|
2027
|
-
|
2028
|
-
## v0.0.31 - 2023-07-14
|
2029
|
-
|
2030
|
-
### Changed
|
2031
|
-
|
2032
|
-
- Reworked interaction with vectors stores. Removed VectorDB, renamed VectorStore to VectorIndex, and introduced upsertTextChunks and retrieveTextChunks functions.
|
2033
|
-
|
2034
|
-
## v0.0.30 - 2023-07-13
|
2035
|
-
|
2036
|
-
### Fixed
|
2037
|
-
|
2038
|
-
- Bugs related to performance. not being available.
|
2039
|
-
|
2040
|
-
## v0.0.29 - 2023-07-13
|
2041
|
-
|
2042
|
-
### Added
|
2043
|
-
|
2044
|
-
- Llama.cpp tokenization support.
|
2045
|
-
|
2046
|
-
### Changed
|
2047
|
-
|
2048
|
-
- Split Tokenizer API into BasicTokenizer and FullTokenizer.
|
2049
|
-
- Introduce countTokens function (replacing Tokenizer.countTokens).
|
2050
|
-
|
2051
|
-
## v0.0.28 - 2023-07-12
|
2052
|
-
|
2053
|
-
### Added
|
2054
|
-
|
2055
|
-
- Events for streamText.
|
2056
|
-
|
2057
|
-
## v0.0.27 - 2023-07-11
|
2058
|
-
|
2059
|
-
### Added
|
2060
|
-
|
2061
|
-
- TextDeltaEventSource for Client/Server streaming support.
|
2062
|
-
|
2063
|
-
### Fixed
|
2064
|
-
|
2065
|
-
- End-of-stream bug in Llama.cpp text streaming.
|
2066
|
-
|
2067
|
-
## v0.0.26 - 2023-07-11
|
2068
|
-
|
2069
|
-
### Added
|
2070
|
-
|
2071
|
-
- Streaming support for Cohere text generation models.
|
2072
|
-
|
2073
|
-
## v0.0.25 - 2023-07-10
|
2074
|
-
|
2075
|
-
### Added
|
2076
|
-
|
2077
|
-
- Streaming support for OpenAI text completion models.
|
2078
|
-
- OpenAI function streaming support (in low-level API).
|
2079
|
-
|
2080
|
-
## v0.0.24 - 2023-07-09
|
2081
|
-
|
2082
|
-
### Added
|
2083
|
-
|
2084
|
-
- Generalized text streaming (async string iterable, useful for command line streaming).
|
2085
|
-
- Streaming support for Llama.cpp text generation.
|
2086
|
-
|
2087
|
-
## v0.0.23 - 2023-07-08
|
2088
|
-
|
2089
|
-
### Added
|
2090
|
-
|
2091
|
-
- Llama.cpp text generation support.
|
2092
|
-
|
2093
|
-
## v0.0.22 - 2023-07-08
|
2094
|
-
|
2095
|
-
### Changed
|
2096
|
-
|
2097
|
-
- Convert all main methods (e.g. `model.generateText(...)`) to a functional API (i.e., `generateText(model, ...)`).
|
2098
|
-
|
2099
|
-
## v0.0.21 - 2023-07-07
|
2100
|
-
|
2101
|
-
### New
|
2102
|
-
|
2103
|
-
- JSON generation model.
|
2104
|
-
|
2105
|
-
## v0.0.20 - 2023-07-02
|
2106
|
-
|
2107
|
-
### New
|
2108
|
-
|
2109
|
-
- Automatic1111 image generation provider.
|
2110
|
-
|
2111
|
-
## v0.0.19 - 2023-06-30
|
2112
|
-
|
2113
|
-
### New
|
2114
|
-
|
2115
|
-
- Cost calculation for OpenAI image generation and transcription models.
|
2116
|
-
|
2117
|
-
## v0.0.18 - 2023-06-28
|
2118
|
-
|
2119
|
-
### New
|
2120
|
-
|
2121
|
-
- Cost calculation for Open AI text generation, chat and embedding models.
|
2122
|
-
|
2123
|
-
### Changed
|
2124
|
-
|
2125
|
-
- Renamed RunContext to Run. Introduced DefaultRun.
|
2126
|
-
- Changed events and observers.
|
2127
|
-
|
2128
|
-
## v0.0.17 - 2023-06-14
|
2129
|
-
|
2130
|
-
### New
|
2131
|
-
|
2132
|
-
1. Updated OpenAI models.
|
2133
|
-
1. Low-level support for OpenAI chat functions API (via `OpenAIChatModel.callApi`).
|
2134
|
-
1. TranscriptionModel and OpenAITranscriptionModel (using `whisper`)
|
2135
|
-
|
2136
|
-
### Changed
|
2137
|
-
|
2138
|
-
1. Single optional parameter for functions/method that contains run, functionId, etc.
|
2139
|
-
|
2140
|
-
## v0.0.16 - 2023-06-13
|
2141
|
-
|
2142
|
-
### Fixed
|
2143
|
-
|
2144
|
-
1. Retry is not attempted when you ran out of OpenAI credits.
|
2145
|
-
1. Vercel edge function support (switched to nanoid for unique IDs).
|
2146
|
-
|
2147
|
-
### Changed
|
2148
|
-
|
2149
|
-
1. Improved OpenAI chat streaming API.
|
2150
|
-
1. Changed `asFunction` variants from namespaced functions into stand-alone functions.
|
2151
|
-
|
2152
|
-
## v0.0.15 - 2023-06-12
|
2153
|
-
|
2154
|
-
### Changed
|
2155
|
-
|
2156
|
-
1. Documentation update.
|
2157
|
-
|
2158
|
-
## v0.0.14 - 2023-06-11
|
2159
|
-
|
2160
|
-
### Changed
|
2161
|
-
|
2162
|
-
1. Major rework of embedding APIs.
|
2163
|
-
|
2164
|
-
## v0.0.13 - 2023-06-10
|
2165
|
-
|
2166
|
-
### Changed
|
2167
|
-
|
2168
|
-
1. Major rework of text and image generation APIs.
|
2169
|
-
|
2170
|
-
## v0.0.12 - 2023-06-06
|
2171
|
-
|
2172
|
-
## v0.0.11 - 2023-06-05
|
2173
|
-
|
2174
|
-
### Changed
|
2175
|
-
|
2176
|
-
1. Various renames.
|
2177
|
-
|
2178
|
-
## v0.0.10 - 2023-06-04
|
2179
|
-
|
2180
|
-
### New
|
2181
|
-
|
2182
|
-
1. Pinecone VectorDB support
|
2183
|
-
1. Cohere tokenization support
|
2184
|
-
|
2185
|
-
## v0.0.9 - 2023-06-03
|
2186
|
-
|
2187
|
-
### New
|
2188
|
-
|
2189
|
-
1. OpenAI DALL-E image generation support
|
2190
|
-
1. `generateImage` function
|
2191
|
-
1. Throttling and retries on model level
|
2192
|
-
|
2193
|
-
## v0.0.8 - 2023-06-02
|
2194
|
-
|
2195
|
-
### New
|
2196
|
-
|
2197
|
-
1. Stability AI image generation support
|
2198
|
-
1. Image generation Next.js example
|
2199
|
-
|
2200
|
-
### Changed
|
2201
|
-
|
2202
|
-
1. Updated PDF to tweet example with style transfer
|
2203
|
-
|
2204
|
-
## v0.0.7 - 2023-06-01
|
2205
|
-
|
2206
|
-
### New
|
2207
|
-
|
2208
|
-
1. Hugging Face text generation support
|
2209
|
-
1. Memory vector DB
|
2210
|
-
|
2211
|
-
## v0.0.6 - 2023-05-31
|
2212
|
-
|
2213
|
-
### New
|
2214
|
-
|
2215
|
-
1. Cohere embedding API support
|
2216
|
-
|
2217
|
-
### Changes
|
2218
|
-
|
2219
|
-
1. Restructured retry logic
|
2220
|
-
1. `embed` embeds many texts at once
|
2221
|
-
|
2222
|
-
## v0.0.5 - 2023-05-30
|
2223
|
-
|
2224
|
-
### New
|
2225
|
-
|
2226
|
-
1. Cohere text generation support
|
2227
|
-
1. OpenAI chat streams can be returned as delta async iterables
|
2228
|
-
1. Documentation of integration APIs and models
|
2229
|
-
|
2230
|
-
## v0.0.4 - 2023-05-29
|
2231
|
-
|
2232
|
-
### New
|
2233
|
-
|
2234
|
-
1. OpenAI embedding support
|
2235
|
-
1. Text embedding functions
|
2236
|
-
1. Chat streams can be returned as ReadableStream or AsyncIterable
|
2237
|
-
1. Basic examples under `examples/basic`
|
2238
|
-
1. Initial documentation available at [modelfusion.dev](https://modelfusion.dev)
|
2239
|
-
|
2240
|
-
## v0.0.3 - 2023-05-28
|
2241
|
-
|
2242
|
-
### New
|
2243
|
-
|
2244
|
-
1. Voice recording and transcription Next.js app example.
|
2245
|
-
1. OpenAI transcription support (Whisper).
|
2246
|
-
|
2247
|
-
## v0.0.2 - 2023-05-27
|
2248
|
-
|
2249
|
-
### New
|
2250
|
-
|
2251
|
-
1. BabyAGI Example in TypeScript
|
2252
|
-
1. TikToken for OpenAI: We've added tiktoken to aid in tokenization and token counting, including those for message and prompt overhead tokens in chat.
|
2253
|
-
1. Tokenization-based Recursive Splitter: A new splitter that operates recursively using tokenization.
|
2254
|
-
1. Prompt Management Utility: An enhancement to fit recent chat messages into the context window.
|
2255
|
-
|
2256
|
-
## v0.0.1 - 2023-05-26
|
2257
|
-
|
2258
|
-
### New
|
2259
|
-
|
2260
|
-
1. AI Chat Example using Next.js: An example demonstrating AI chat implementation using Next.js.
|
2261
|
-
1. PDF to Twitter Thread Example: This shows how a PDF can be converted into a Twitter thread.
|
2262
|
-
1. OpenAI Chat Completion Streaming Support: A feature providing real-time response capabilities using OpenAI's chat completion streaming.
|
2263
|
-
1. OpenAI Chat and Text Completion Support: This addition enables the software to handle both chat and text completions from OpenAI.
|
2264
|
-
1. Retry Management: A feature to enhance resilience by managing retry attempts for tasks.
|
2265
|
-
1. Task Progress Reporting and Abort Signals: This allows users to track the progress of tasks and gives the ability to abort tasks when needed.
|
2266
|
-
1. Recursive Character Splitter: A feature to split text into characters recursively for more detailed text analysis.
|
2267
|
-
1. Recursive Text Mapping: This enables recursive mapping of text, beneficial for tasks like summarization or extraction.
|
2268
|
-
1. Split-Map-Filter-Reduce for Text Processing: A process chain developed for sophisticated text handling, allowing operations to split, map, filter, and reduce text data.
|
2269
|
-
|
2270
|
-
```
|
2271
|
-
|
2272
|
-
```
|