modelfusion 0.133.0 → 0.135.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (972) hide show
  1. package/index.cjs +10854 -24
  2. package/index.cjs.map +1 -0
  3. package/index.d.cts +9547 -0
  4. package/index.d.ts +9547 -9
  5. package/index.js +10630 -9
  6. package/index.js.map +1 -0
  7. package/internal/index.cjs +865 -28
  8. package/internal/index.cjs.map +1 -0
  9. package/internal/index.d.cts +675 -0
  10. package/internal/index.d.ts +675 -8
  11. package/internal/index.js +820 -7
  12. package/internal/index.js.map +1 -0
  13. package/package.json +7 -7
  14. package/CHANGELOG.md +0 -2272
  15. package/README.md +0 -697
  16. package/core/DefaultRun.cjs +0 -72
  17. package/core/DefaultRun.d.ts +0 -24
  18. package/core/DefaultRun.js +0 -68
  19. package/core/ExecuteFunctionEvent.cjs +0 -2
  20. package/core/ExecuteFunctionEvent.d.ts +0 -7
  21. package/core/ExecuteFunctionEvent.js +0 -1
  22. package/core/ExtensionFunctionEvent.cjs +0 -2
  23. package/core/ExtensionFunctionEvent.d.ts +0 -11
  24. package/core/ExtensionFunctionEvent.js +0 -1
  25. package/core/FunctionEvent.cjs +0 -2
  26. package/core/FunctionEvent.d.ts +0 -85
  27. package/core/FunctionEvent.js +0 -1
  28. package/core/FunctionEventSource.cjs +0 -32
  29. package/core/FunctionEventSource.d.ts +0 -12
  30. package/core/FunctionEventSource.js +0 -28
  31. package/core/FunctionObserver.cjs +0 -2
  32. package/core/FunctionObserver.d.ts +0 -7
  33. package/core/FunctionObserver.js +0 -1
  34. package/core/FunctionOptions.cjs +0 -2
  35. package/core/FunctionOptions.d.ts +0 -49
  36. package/core/FunctionOptions.js +0 -1
  37. package/core/LogFormat.cjs +0 -10
  38. package/core/LogFormat.d.ts +0 -9
  39. package/core/LogFormat.js +0 -9
  40. package/core/ModelFusionConfiguration.cjs +0 -21
  41. package/core/ModelFusionConfiguration.d.ts +0 -6
  42. package/core/ModelFusionConfiguration.js +0 -14
  43. package/core/Run.cjs +0 -2
  44. package/core/Run.d.ts +0 -31
  45. package/core/Run.js +0 -1
  46. package/core/Vector.cjs +0 -2
  47. package/core/Vector.d.ts +0 -5
  48. package/core/Vector.js +0 -1
  49. package/core/api/AbortError.cjs +0 -9
  50. package/core/api/AbortError.d.ts +0 -3
  51. package/core/api/AbortError.js +0 -5
  52. package/core/api/AbstractApiConfiguration.cjs +0 -37
  53. package/core/api/AbstractApiConfiguration.d.ts +0 -17
  54. package/core/api/AbstractApiConfiguration.js +0 -33
  55. package/core/api/ApiCallError.cjs +0 -73
  56. package/core/api/ApiCallError.d.ts +0 -30
  57. package/core/api/ApiCallError.js +0 -69
  58. package/core/api/ApiConfiguration.cjs +0 -2
  59. package/core/api/ApiConfiguration.d.ts +0 -41
  60. package/core/api/ApiConfiguration.js +0 -1
  61. package/core/api/ApiFacade.cjs +0 -20
  62. package/core/api/ApiFacade.d.ts +0 -4
  63. package/core/api/ApiFacade.js +0 -4
  64. package/core/api/BaseUrlApiConfiguration.cjs +0 -78
  65. package/core/api/BaseUrlApiConfiguration.d.ts +0 -37
  66. package/core/api/BaseUrlApiConfiguration.js +0 -73
  67. package/core/api/BaseUrlApiConfiguration.test.cjs +0 -11
  68. package/core/api/BaseUrlApiConfiguration.test.d.ts +0 -1
  69. package/core/api/BaseUrlApiConfiguration.test.js +0 -9
  70. package/core/api/CustomHeaderProvider.cjs +0 -2
  71. package/core/api/CustomHeaderProvider.d.ts +0 -2
  72. package/core/api/CustomHeaderProvider.js +0 -1
  73. package/core/api/LoadAPIKeyError.cjs +0 -16
  74. package/core/api/LoadAPIKeyError.d.ts +0 -9
  75. package/core/api/LoadAPIKeyError.js +0 -12
  76. package/core/api/RetryError.cjs +0 -42
  77. package/core/api/RetryError.d.ts +0 -18
  78. package/core/api/RetryError.js +0 -38
  79. package/core/api/RetryFunction.cjs +0 -2
  80. package/core/api/RetryFunction.d.ts +0 -1
  81. package/core/api/RetryFunction.js +0 -1
  82. package/core/api/ThrottleFunction.cjs +0 -2
  83. package/core/api/ThrottleFunction.d.ts +0 -1
  84. package/core/api/ThrottleFunction.js +0 -1
  85. package/core/api/callWithRetryAndThrottle.cjs +0 -7
  86. package/core/api/callWithRetryAndThrottle.d.ts +0 -7
  87. package/core/api/callWithRetryAndThrottle.js +0 -3
  88. package/core/api/index.cjs +0 -42
  89. package/core/api/index.d.ts +0 -13
  90. package/core/api/index.js +0 -13
  91. package/core/api/loadApiKey.cjs +0 -22
  92. package/core/api/loadApiKey.d.ts +0 -6
  93. package/core/api/loadApiKey.js +0 -18
  94. package/core/api/postToApi.cjs +0 -185
  95. package/core/api/postToApi.d.ts +0 -37
  96. package/core/api/postToApi.js +0 -175
  97. package/core/api/retryNever.cjs +0 -8
  98. package/core/api/retryNever.d.ts +0 -4
  99. package/core/api/retryNever.js +0 -4
  100. package/core/api/retryWithExponentialBackoff.cjs +0 -50
  101. package/core/api/retryWithExponentialBackoff.d.ts +0 -10
  102. package/core/api/retryWithExponentialBackoff.js +0 -46
  103. package/core/api/throttleMaxConcurrency.cjs +0 -65
  104. package/core/api/throttleMaxConcurrency.d.ts +0 -7
  105. package/core/api/throttleMaxConcurrency.js +0 -61
  106. package/core/api/throttleOff.cjs +0 -8
  107. package/core/api/throttleOff.d.ts +0 -5
  108. package/core/api/throttleOff.js +0 -4
  109. package/core/cache/Cache.cjs +0 -2
  110. package/core/cache/Cache.d.ts +0 -12
  111. package/core/cache/Cache.js +0 -1
  112. package/core/cache/MemoryCache.cjs +0 -23
  113. package/core/cache/MemoryCache.d.ts +0 -15
  114. package/core/cache/MemoryCache.js +0 -19
  115. package/core/cache/index.cjs +0 -18
  116. package/core/cache/index.d.ts +0 -2
  117. package/core/cache/index.js +0 -2
  118. package/core/executeFunction.cjs +0 -13
  119. package/core/executeFunction.d.ts +0 -2
  120. package/core/executeFunction.js +0 -9
  121. package/core/executeFunctionCall.cjs +0 -86
  122. package/core/executeFunctionCall.d.ts +0 -10
  123. package/core/executeFunctionCall.js +0 -82
  124. package/core/getFunctionCallLogger.cjs +0 -106
  125. package/core/getFunctionCallLogger.d.ts +0 -3
  126. package/core/getFunctionCallLogger.js +0 -102
  127. package/core/getRun.cjs +0 -57
  128. package/core/getRun.d.ts +0 -9
  129. package/core/getRun.js +0 -29
  130. package/core/index.cjs +0 -44
  131. package/core/index.d.ts +0 -15
  132. package/core/index.js +0 -15
  133. package/core/schema/JSONParseError.cjs +0 -37
  134. package/core/schema/JSONParseError.d.ts +0 -15
  135. package/core/schema/JSONParseError.js +0 -33
  136. package/core/schema/JsonSchemaProducer.cjs +0 -2
  137. package/core/schema/JsonSchemaProducer.d.ts +0 -9
  138. package/core/schema/JsonSchemaProducer.js +0 -1
  139. package/core/schema/Schema.cjs +0 -2
  140. package/core/schema/Schema.d.ts +0 -20
  141. package/core/schema/Schema.js +0 -1
  142. package/core/schema/TypeValidationError.cjs +0 -36
  143. package/core/schema/TypeValidationError.d.ts +0 -15
  144. package/core/schema/TypeValidationError.js +0 -32
  145. package/core/schema/UncheckedSchema.cjs +0 -30
  146. package/core/schema/UncheckedSchema.d.ts +0 -16
  147. package/core/schema/UncheckedSchema.js +0 -25
  148. package/core/schema/ZodSchema.cjs +0 -47
  149. package/core/schema/ZodSchema.d.ts +0 -27
  150. package/core/schema/ZodSchema.js +0 -42
  151. package/core/schema/index.cjs +0 -24
  152. package/core/schema/index.d.ts +0 -8
  153. package/core/schema/index.js +0 -8
  154. package/core/schema/parseJSON.cjs +0 -48
  155. package/core/schema/parseJSON.d.ts +0 -57
  156. package/core/schema/parseJSON.js +0 -40
  157. package/core/schema/validateTypes.cjs +0 -65
  158. package/core/schema/validateTypes.d.ts +0 -34
  159. package/core/schema/validateTypes.js +0 -60
  160. package/model-function/AbstractModel.cjs +0 -22
  161. package/model-function/AbstractModel.d.ts +0 -13
  162. package/model-function/AbstractModel.js +0 -18
  163. package/model-function/Delta.cjs +0 -2
  164. package/model-function/Delta.d.ts +0 -7
  165. package/model-function/Delta.js +0 -1
  166. package/model-function/Model.cjs +0 -2
  167. package/model-function/Model.d.ts +0 -31
  168. package/model-function/Model.js +0 -1
  169. package/model-function/ModelCallEvent.cjs +0 -2
  170. package/model-function/ModelCallEvent.d.ts +0 -57
  171. package/model-function/ModelCallEvent.js +0 -1
  172. package/model-function/ModelCallMetadata.cjs +0 -2
  173. package/model-function/ModelCallMetadata.d.ts +0 -13
  174. package/model-function/ModelCallMetadata.js +0 -1
  175. package/model-function/ModelInformation.cjs +0 -2
  176. package/model-function/ModelInformation.d.ts +0 -4
  177. package/model-function/ModelInformation.js +0 -1
  178. package/model-function/PromptTemplate.cjs +0 -2
  179. package/model-function/PromptTemplate.d.ts +0 -9
  180. package/model-function/PromptTemplate.js +0 -1
  181. package/model-function/classify/Classifier.cjs +0 -2
  182. package/model-function/classify/Classifier.d.ts +0 -10
  183. package/model-function/classify/Classifier.js +0 -1
  184. package/model-function/classify/ClassifyEvent.cjs +0 -2
  185. package/model-function/classify/ClassifyEvent.d.ts +0 -20
  186. package/model-function/classify/ClassifyEvent.js +0 -1
  187. package/model-function/classify/EmbeddingSimilarityClassifier.cjs +0 -97
  188. package/model-function/classify/EmbeddingSimilarityClassifier.d.ts +0 -40
  189. package/model-function/classify/EmbeddingSimilarityClassifier.js +0 -93
  190. package/model-function/classify/classify.cjs +0 -27
  191. package/model-function/classify/classify.d.ts +0 -17
  192. package/model-function/classify/classify.js +0 -23
  193. package/model-function/classify/index.cjs +0 -20
  194. package/model-function/classify/index.d.ts +0 -4
  195. package/model-function/classify/index.js +0 -4
  196. package/model-function/embed/EmbeddingEvent.cjs +0 -2
  197. package/model-function/embed/EmbeddingEvent.d.ts +0 -21
  198. package/model-function/embed/EmbeddingEvent.js +0 -1
  199. package/model-function/embed/EmbeddingModel.cjs +0 -2
  200. package/model-function/embed/EmbeddingModel.d.ts +0 -23
  201. package/model-function/embed/EmbeddingModel.js +0 -1
  202. package/model-function/embed/embed.cjs +0 -77
  203. package/model-function/embed/embed.d.ts +0 -67
  204. package/model-function/embed/embed.js +0 -72
  205. package/model-function/executeStandardCall.cjs +0 -111
  206. package/model-function/executeStandardCall.d.ts +0 -19
  207. package/model-function/executeStandardCall.js +0 -107
  208. package/model-function/executeStreamCall.cjs +0 -180
  209. package/model-function/executeStreamCall.d.ts +0 -20
  210. package/model-function/executeStreamCall.js +0 -176
  211. package/model-function/generate-image/ImageGenerationEvent.cjs +0 -2
  212. package/model-function/generate-image/ImageGenerationEvent.d.ts +0 -18
  213. package/model-function/generate-image/ImageGenerationEvent.js +0 -1
  214. package/model-function/generate-image/ImageGenerationModel.cjs +0 -2
  215. package/model-function/generate-image/ImageGenerationModel.d.ts +0 -22
  216. package/model-function/generate-image/ImageGenerationModel.js +0 -1
  217. package/model-function/generate-image/PromptTemplateImageGenerationModel.cjs +0 -44
  218. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +0 -20
  219. package/model-function/generate-image/PromptTemplateImageGenerationModel.js +0 -40
  220. package/model-function/generate-image/generateImage.cjs +0 -33
  221. package/model-function/generate-image/generateImage.d.ts +0 -43
  222. package/model-function/generate-image/generateImage.js +0 -29
  223. package/model-function/generate-object/ObjectFromTextGenerationModel.cjs +0 -69
  224. package/model-function/generate-object/ObjectFromTextGenerationModel.d.ts +0 -24
  225. package/model-function/generate-object/ObjectFromTextGenerationModel.js +0 -65
  226. package/model-function/generate-object/ObjectFromTextPromptTemplate.cjs +0 -2
  227. package/model-function/generate-object/ObjectFromTextPromptTemplate.d.ts +0 -30
  228. package/model-function/generate-object/ObjectFromTextPromptTemplate.js +0 -1
  229. package/model-function/generate-object/ObjectFromTextStreamingModel.cjs +0 -48
  230. package/model-function/generate-object/ObjectFromTextStreamingModel.d.ts +0 -19
  231. package/model-function/generate-object/ObjectFromTextStreamingModel.js +0 -44
  232. package/model-function/generate-object/ObjectGenerationEvent.cjs +0 -2
  233. package/model-function/generate-object/ObjectGenerationEvent.d.ts +0 -23
  234. package/model-function/generate-object/ObjectGenerationEvent.js +0 -1
  235. package/model-function/generate-object/ObjectGenerationModel.cjs +0 -2
  236. package/model-function/generate-object/ObjectGenerationModel.d.ts +0 -24
  237. package/model-function/generate-object/ObjectGenerationModel.js +0 -1
  238. package/model-function/generate-object/ObjectParseError.cjs +0 -36
  239. package/model-function/generate-object/ObjectParseError.d.ts +0 -15
  240. package/model-function/generate-object/ObjectParseError.js +0 -32
  241. package/model-function/generate-object/ObjectStream.cjs +0 -57
  242. package/model-function/generate-object/ObjectStream.d.ts +0 -32
  243. package/model-function/generate-object/ObjectStream.js +0 -52
  244. package/model-function/generate-object/ObjectStreamingEvent.cjs +0 -2
  245. package/model-function/generate-object/ObjectStreamingEvent.d.ts +0 -7
  246. package/model-function/generate-object/ObjectStreamingEvent.js +0 -1
  247. package/model-function/generate-object/ObjectValidationError.cjs +0 -44
  248. package/model-function/generate-object/ObjectValidationError.d.ts +0 -18
  249. package/model-function/generate-object/ObjectValidationError.js +0 -40
  250. package/model-function/generate-object/generateObject.cjs +0 -45
  251. package/model-function/generate-object/generateObject.d.ts +0 -56
  252. package/model-function/generate-object/generateObject.js +0 -41
  253. package/model-function/generate-object/index.cjs +0 -28
  254. package/model-function/generate-object/index.d.ts +0 -12
  255. package/model-function/generate-object/index.js +0 -12
  256. package/model-function/generate-object/jsonObjectPrompt.cjs +0 -51
  257. package/model-function/generate-object/jsonObjectPrompt.d.ts +0 -15
  258. package/model-function/generate-object/jsonObjectPrompt.js +0 -48
  259. package/model-function/generate-object/streamObject.cjs +0 -80
  260. package/model-function/generate-object/streamObject.d.ts +0 -57
  261. package/model-function/generate-object/streamObject.js +0 -76
  262. package/model-function/generate-speech/SpeechGenerationEvent.cjs +0 -2
  263. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +0 -26
  264. package/model-function/generate-speech/SpeechGenerationEvent.js +0 -1
  265. package/model-function/generate-speech/SpeechGenerationModel.cjs +0 -2
  266. package/model-function/generate-speech/SpeechGenerationModel.d.ts +0 -14
  267. package/model-function/generate-speech/SpeechGenerationModel.js +0 -1
  268. package/model-function/generate-speech/generateSpeech.cjs +0 -27
  269. package/model-function/generate-speech/generateSpeech.d.ts +0 -34
  270. package/model-function/generate-speech/generateSpeech.js +0 -23
  271. package/model-function/generate-speech/index.cjs +0 -20
  272. package/model-function/generate-speech/index.d.ts +0 -4
  273. package/model-function/generate-speech/index.js +0 -4
  274. package/model-function/generate-speech/streamSpeech.cjs +0 -33
  275. package/model-function/generate-speech/streamSpeech.d.ts +0 -40
  276. package/model-function/generate-speech/streamSpeech.js +0 -29
  277. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -24
  278. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -40
  279. package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -20
  280. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -84
  281. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -47
  282. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -80
  283. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -36
  284. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -19
  285. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -32
  286. package/model-function/generate-text/TextGenerationEvent.cjs +0 -2
  287. package/model-function/generate-text/TextGenerationEvent.d.ts +0 -29
  288. package/model-function/generate-text/TextGenerationEvent.js +0 -1
  289. package/model-function/generate-text/TextGenerationModel.cjs +0 -9
  290. package/model-function/generate-text/TextGenerationModel.d.ts +0 -126
  291. package/model-function/generate-text/TextGenerationModel.js +0 -6
  292. package/model-function/generate-text/TextGenerationPromptTemplate.cjs +0 -2
  293. package/model-function/generate-text/TextGenerationPromptTemplate.d.ts +0 -11
  294. package/model-function/generate-text/TextGenerationPromptTemplate.js +0 -1
  295. package/model-function/generate-text/TextGenerationResult.cjs +0 -2
  296. package/model-function/generate-text/TextGenerationResult.d.ts +0 -11
  297. package/model-function/generate-text/TextGenerationResult.js +0 -1
  298. package/model-function/generate-text/generateText.cjs +0 -82
  299. package/model-function/generate-text/generateText.d.ts +0 -41
  300. package/model-function/generate-text/generateText.js +0 -78
  301. package/model-function/generate-text/index.cjs +0 -26
  302. package/model-function/generate-text/index.d.ts +0 -10
  303. package/model-function/generate-text/index.js +0 -10
  304. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +0 -90
  305. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +0 -51
  306. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +0 -84
  307. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.cjs +0 -31
  308. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.d.ts +0 -1
  309. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.js +0 -29
  310. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +0 -96
  311. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +0 -35
  312. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +0 -90
  313. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +0 -60
  314. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.d.ts +0 -1
  315. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +0 -58
  316. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -44
  317. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +0 -58
  318. package/model-function/generate-text/prompt-template/ChatPrompt.js +0 -41
  319. package/model-function/generate-text/prompt-template/ContentPart.cjs +0 -11
  320. package/model-function/generate-text/prompt-template/ContentPart.d.ts +0 -31
  321. package/model-function/generate-text/prompt-template/ContentPart.js +0 -7
  322. package/model-function/generate-text/prompt-template/InstructionPrompt.cjs +0 -2
  323. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +0 -32
  324. package/model-function/generate-text/prompt-template/InstructionPrompt.js +0 -1
  325. package/model-function/generate-text/prompt-template/InvalidPromptError.cjs +0 -28
  326. package/model-function/generate-text/prompt-template/InvalidPromptError.d.ts +0 -13
  327. package/model-function/generate-text/prompt-template/InvalidPromptError.js +0 -24
  328. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +0 -135
  329. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +0 -55
  330. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +0 -128
  331. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +0 -60
  332. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.d.ts +0 -1
  333. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +0 -58
  334. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +0 -150
  335. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +0 -62
  336. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +0 -143
  337. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +0 -60
  338. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts +0 -1
  339. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +0 -58
  340. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +0 -86
  341. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +0 -23
  342. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +0 -80
  343. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +0 -60
  344. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.d.ts +0 -1
  345. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +0 -58
  346. package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +0 -2
  347. package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +0 -8
  348. package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +0 -1
  349. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.cjs +0 -78
  350. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.d.ts +0 -35
  351. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.js +0 -72
  352. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.cjs +0 -60
  353. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.d.ts +0 -1
  354. package/model-function/generate-text/prompt-template/SynthiaPromptTemplate.test.js +0 -58
  355. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +0 -69
  356. package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +0 -23
  357. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +0 -63
  358. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +0 -60
  359. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.d.ts +0 -1
  360. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +0 -58
  361. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +0 -86
  362. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +0 -25
  363. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +0 -80
  364. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +0 -60
  365. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.d.ts +0 -1
  366. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +0 -58
  367. package/model-function/generate-text/prompt-template/index.cjs +0 -43
  368. package/model-function/generate-text/prompt-template/index.d.ts +0 -14
  369. package/model-function/generate-text/prompt-template/index.js +0 -14
  370. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -46
  371. package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +0 -17
  372. package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -42
  373. package/model-function/generate-text/streamText.cjs +0 -54
  374. package/model-function/generate-text/streamText.d.ts +0 -42
  375. package/model-function/generate-text/streamText.js +0 -50
  376. package/model-function/generate-transcription/TranscriptionEvent.cjs +0 -2
  377. package/model-function/generate-transcription/TranscriptionEvent.d.ts +0 -18
  378. package/model-function/generate-transcription/TranscriptionEvent.js +0 -1
  379. package/model-function/generate-transcription/TranscriptionModel.cjs +0 -2
  380. package/model-function/generate-transcription/TranscriptionModel.d.ts +0 -14
  381. package/model-function/generate-transcription/TranscriptionModel.js +0 -1
  382. package/model-function/generate-transcription/generateTranscription.cjs +0 -22
  383. package/model-function/generate-transcription/generateTranscription.d.ts +0 -41
  384. package/model-function/generate-transcription/generateTranscription.js +0 -18
  385. package/model-function/index.cjs +0 -38
  386. package/model-function/index.d.ts +0 -22
  387. package/model-function/index.js +0 -22
  388. package/model-function/tokenize-text/Tokenizer.cjs +0 -2
  389. package/model-function/tokenize-text/Tokenizer.d.ts +0 -43
  390. package/model-function/tokenize-text/Tokenizer.js +0 -1
  391. package/model-function/tokenize-text/countTokens.cjs +0 -10
  392. package/model-function/tokenize-text/countTokens.d.ts +0 -5
  393. package/model-function/tokenize-text/countTokens.js +0 -6
  394. package/model-provider/automatic1111/Automatic1111ApiConfiguration.cjs +0 -22
  395. package/model-provider/automatic1111/Automatic1111ApiConfiguration.d.ts +0 -8
  396. package/model-provider/automatic1111/Automatic1111ApiConfiguration.js +0 -18
  397. package/model-provider/automatic1111/Automatic1111Error.cjs +0 -16
  398. package/model-provider/automatic1111/Automatic1111Error.d.ts +0 -22
  399. package/model-provider/automatic1111/Automatic1111Error.js +0 -13
  400. package/model-provider/automatic1111/Automatic1111Facade.cjs +0 -24
  401. package/model-provider/automatic1111/Automatic1111Facade.d.ts +0 -16
  402. package/model-provider/automatic1111/Automatic1111Facade.js +0 -19
  403. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +0 -101
  404. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +0 -68
  405. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +0 -97
  406. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.cjs +0 -12
  407. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +0 -9
  408. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.js +0 -8
  409. package/model-provider/automatic1111/index.cjs +0 -33
  410. package/model-provider/automatic1111/index.d.ts +0 -5
  411. package/model-provider/automatic1111/index.js +0 -4
  412. package/model-provider/cohere/CohereApiConfiguration.cjs +0 -30
  413. package/model-provider/cohere/CohereApiConfiguration.d.ts +0 -10
  414. package/model-provider/cohere/CohereApiConfiguration.js +0 -26
  415. package/model-provider/cohere/CohereError.cjs +0 -13
  416. package/model-provider/cohere/CohereError.d.ts +0 -11
  417. package/model-provider/cohere/CohereError.js +0 -10
  418. package/model-provider/cohere/CohereFacade.cjs +0 -80
  419. package/model-provider/cohere/CohereFacade.d.ts +0 -68
  420. package/model-provider/cohere/CohereFacade.js +0 -73
  421. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +0 -170
  422. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +0 -131
  423. package/model-provider/cohere/CohereTextEmbeddingModel.js +0 -166
  424. package/model-provider/cohere/CohereTextGenerationModel.cjs +0 -244
  425. package/model-provider/cohere/CohereTextGenerationModel.d.ts +0 -379
  426. package/model-provider/cohere/CohereTextGenerationModel.js +0 -240
  427. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +0 -36
  428. package/model-provider/cohere/CohereTextGenerationModel.test.d.ts +0 -1
  429. package/model-provider/cohere/CohereTextGenerationModel.test.js +0 -34
  430. package/model-provider/cohere/CohereTokenizer.cjs +0 -116
  431. package/model-provider/cohere/CohereTokenizer.d.ts +0 -113
  432. package/model-provider/cohere/CohereTokenizer.js +0 -112
  433. package/model-provider/cohere/index.cjs +0 -34
  434. package/model-provider/cohere/index.d.ts +0 -6
  435. package/model-provider/cohere/index.js +0 -5
  436. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +0 -33
  437. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.d.ts +0 -11
  438. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +0 -29
  439. package/model-provider/elevenlabs/ElevenLabsFacade.cjs +0 -27
  440. package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +0 -21
  441. package/model-provider/elevenlabs/ElevenLabsFacade.js +0 -22
  442. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +0 -218
  443. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +0 -43
  444. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +0 -214
  445. package/model-provider/elevenlabs/index.cjs +0 -32
  446. package/model-provider/elevenlabs/index.d.ts +0 -3
  447. package/model-provider/elevenlabs/index.js +0 -3
  448. package/model-provider/huggingface/HuggingFaceApiConfiguration.cjs +0 -30
  449. package/model-provider/huggingface/HuggingFaceApiConfiguration.d.ts +0 -10
  450. package/model-provider/huggingface/HuggingFaceApiConfiguration.js +0 -26
  451. package/model-provider/huggingface/HuggingFaceError.cjs +0 -13
  452. package/model-provider/huggingface/HuggingFaceError.d.ts +0 -11
  453. package/model-provider/huggingface/HuggingFaceError.js +0 -10
  454. package/model-provider/huggingface/HuggingFaceFacade.cjs +0 -64
  455. package/model-provider/huggingface/HuggingFaceFacade.d.ts +0 -55
  456. package/model-provider/huggingface/HuggingFaceFacade.js +0 -58
  457. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +0 -131
  458. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +0 -56
  459. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +0 -127
  460. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +0 -144
  461. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +0 -84
  462. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +0 -140
  463. package/model-provider/huggingface/index.cjs +0 -33
  464. package/model-provider/huggingface/index.d.ts +0 -5
  465. package/model-provider/huggingface/index.js +0 -4
  466. package/model-provider/index.cjs +0 -28
  467. package/model-provider/index.d.ts +0 -12
  468. package/model-provider/index.js +0 -12
  469. package/model-provider/llamacpp/LlamaCppApiConfiguration.cjs +0 -22
  470. package/model-provider/llamacpp/LlamaCppApiConfiguration.d.ts +0 -8
  471. package/model-provider/llamacpp/LlamaCppApiConfiguration.js +0 -18
  472. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +0 -119
  473. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +0 -15
  474. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +0 -113
  475. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +0 -326
  476. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +0 -957
  477. package/model-provider/llamacpp/LlamaCppCompletionModel.js +0 -322
  478. package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +0 -40
  479. package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +0 -1
  480. package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +0 -38
  481. package/model-provider/llamacpp/LlamaCppError.cjs +0 -13
  482. package/model-provider/llamacpp/LlamaCppError.d.ts +0 -11
  483. package/model-provider/llamacpp/LlamaCppError.js +0 -10
  484. package/model-provider/llamacpp/LlamaCppFacade.cjs +0 -55
  485. package/model-provider/llamacpp/LlamaCppFacade.d.ts +0 -19
  486. package/model-provider/llamacpp/LlamaCppFacade.js +0 -25
  487. package/model-provider/llamacpp/LlamaCppGrammars.cjs +0 -86
  488. package/model-provider/llamacpp/LlamaCppGrammars.d.ts +0 -19
  489. package/model-provider/llamacpp/LlamaCppGrammars.js +0 -82
  490. package/model-provider/llamacpp/LlamaCppPrompt.cjs +0 -93
  491. package/model-provider/llamacpp/LlamaCppPrompt.d.ts +0 -47
  492. package/model-provider/llamacpp/LlamaCppPrompt.js +0 -65
  493. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +0 -96
  494. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +0 -39
  495. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +0 -92
  496. package/model-provider/llamacpp/LlamaCppTokenizer.cjs +0 -64
  497. package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +0 -32
  498. package/model-provider/llamacpp/LlamaCppTokenizer.js +0 -60
  499. package/model-provider/llamacpp/convertJsonSchemaToGBNF.cjs +0 -113
  500. package/model-provider/llamacpp/convertJsonSchemaToGBNF.d.ts +0 -7
  501. package/model-provider/llamacpp/convertJsonSchemaToGBNF.js +0 -109
  502. package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.cjs +0 -150
  503. package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.d.ts +0 -1
  504. package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.js +0 -148
  505. package/model-provider/llamacpp/index.cjs +0 -34
  506. package/model-provider/llamacpp/index.d.ts +0 -6
  507. package/model-provider/llamacpp/index.js +0 -5
  508. package/model-provider/lmnt/LmntApiConfiguration.cjs +0 -30
  509. package/model-provider/lmnt/LmntApiConfiguration.d.ts +0 -10
  510. package/model-provider/lmnt/LmntApiConfiguration.js +0 -26
  511. package/model-provider/lmnt/LmntFacade.cjs +0 -24
  512. package/model-provider/lmnt/LmntFacade.d.ts +0 -18
  513. package/model-provider/lmnt/LmntFacade.js +0 -19
  514. package/model-provider/lmnt/LmntSpeechModel.cjs +0 -103
  515. package/model-provider/lmnt/LmntSpeechModel.d.ts +0 -73
  516. package/model-provider/lmnt/LmntSpeechModel.js +0 -99
  517. package/model-provider/lmnt/index.cjs +0 -32
  518. package/model-provider/lmnt/index.d.ts +0 -3
  519. package/model-provider/lmnt/index.js +0 -3
  520. package/model-provider/mistral/MistralApiConfiguration.cjs +0 -30
  521. package/model-provider/mistral/MistralApiConfiguration.d.ts +0 -10
  522. package/model-provider/mistral/MistralApiConfiguration.js +0 -26
  523. package/model-provider/mistral/MistralChatModel.cjs +0 -203
  524. package/model-provider/mistral/MistralChatModel.d.ts +0 -357
  525. package/model-provider/mistral/MistralChatModel.js +0 -199
  526. package/model-provider/mistral/MistralChatModel.test.cjs +0 -58
  527. package/model-provider/mistral/MistralChatModel.test.d.ts +0 -1
  528. package/model-provider/mistral/MistralChatModel.test.js +0 -56
  529. package/model-provider/mistral/MistralChatPromptTemplate.cjs +0 -72
  530. package/model-provider/mistral/MistralChatPromptTemplate.d.ts +0 -16
  531. package/model-provider/mistral/MistralChatPromptTemplate.js +0 -66
  532. package/model-provider/mistral/MistralError.cjs +0 -17
  533. package/model-provider/mistral/MistralError.d.ts +0 -25
  534. package/model-provider/mistral/MistralError.js +0 -14
  535. package/model-provider/mistral/MistralFacade.cjs +0 -22
  536. package/model-provider/mistral/MistralFacade.d.ts +0 -14
  537. package/model-provider/mistral/MistralFacade.js +0 -16
  538. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +0 -106
  539. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +0 -106
  540. package/model-provider/mistral/MistralTextEmbeddingModel.js +0 -102
  541. package/model-provider/mistral/index.cjs +0 -33
  542. package/model-provider/mistral/index.d.ts +0 -5
  543. package/model-provider/mistral/index.js +0 -4
  544. package/model-provider/ollama/OllamaApiConfiguration.cjs +0 -22
  545. package/model-provider/ollama/OllamaApiConfiguration.d.ts +0 -8
  546. package/model-provider/ollama/OllamaApiConfiguration.js +0 -18
  547. package/model-provider/ollama/OllamaChatModel.cjs +0 -290
  548. package/model-provider/ollama/OllamaChatModel.d.ts +0 -285
  549. package/model-provider/ollama/OllamaChatModel.js +0 -286
  550. package/model-provider/ollama/OllamaChatModel.test.cjs +0 -32
  551. package/model-provider/ollama/OllamaChatModel.test.d.ts +0 -1
  552. package/model-provider/ollama/OllamaChatModel.test.js +0 -30
  553. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +0 -103
  554. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +0 -20
  555. package/model-provider/ollama/OllamaChatPromptTemplate.js +0 -96
  556. package/model-provider/ollama/OllamaCompletionModel.cjs +0 -296
  557. package/model-provider/ollama/OllamaCompletionModel.d.ts +0 -302
  558. package/model-provider/ollama/OllamaCompletionModel.js +0 -292
  559. package/model-provider/ollama/OllamaCompletionModel.test.cjs +0 -136
  560. package/model-provider/ollama/OllamaCompletionModel.test.d.ts +0 -1
  561. package/model-provider/ollama/OllamaCompletionModel.test.js +0 -134
  562. package/model-provider/ollama/OllamaCompletionPrompt.cjs +0 -91
  563. package/model-provider/ollama/OllamaCompletionPrompt.d.ts +0 -45
  564. package/model-provider/ollama/OllamaCompletionPrompt.js +0 -63
  565. package/model-provider/ollama/OllamaError.cjs +0 -13
  566. package/model-provider/ollama/OllamaError.d.ts +0 -13
  567. package/model-provider/ollama/OllamaError.js +0 -10
  568. package/model-provider/ollama/OllamaFacade.cjs +0 -51
  569. package/model-provider/ollama/OllamaFacade.d.ts +0 -15
  570. package/model-provider/ollama/OllamaFacade.js +0 -21
  571. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +0 -82
  572. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -37
  573. package/model-provider/ollama/OllamaTextEmbeddingModel.js +0 -78
  574. package/model-provider/ollama/OllamaTextGenerationSettings.cjs +0 -2
  575. package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +0 -87
  576. package/model-provider/ollama/OllamaTextGenerationSettings.js +0 -1
  577. package/model-provider/ollama/index.cjs +0 -35
  578. package/model-provider/ollama/index.d.ts +0 -7
  579. package/model-provider/ollama/index.js +0 -6
  580. package/model-provider/openai/AbstractOpenAIChatModel.cjs +0 -302
  581. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +0 -805
  582. package/model-provider/openai/AbstractOpenAIChatModel.js +0 -298
  583. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +0 -180
  584. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +0 -230
  585. package/model-provider/openai/AbstractOpenAICompletionModel.js +0 -176
  586. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.cjs +0 -83
  587. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.d.ts +0 -92
  588. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.js +0 -79
  589. package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +0 -58
  590. package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +0 -27
  591. package/model-provider/openai/AzureOpenAIApiConfiguration.js +0 -54
  592. package/model-provider/openai/OpenAIApiConfiguration.cjs +0 -30
  593. package/model-provider/openai/OpenAIApiConfiguration.d.ts +0 -10
  594. package/model-provider/openai/OpenAIApiConfiguration.js +0 -26
  595. package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.cjs +0 -169
  596. package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.d.ts +0 -201
  597. package/model-provider/openai/OpenAIChatFunctionCallObjectGenerationModel.js +0 -162
  598. package/model-provider/openai/OpenAIChatMessage.cjs +0 -79
  599. package/model-provider/openai/OpenAIChatMessage.d.ts +0 -84
  600. package/model-provider/openai/OpenAIChatMessage.js +0 -76
  601. package/model-provider/openai/OpenAIChatModel.cjs +0 -274
  602. package/model-provider/openai/OpenAIChatModel.d.ts +0 -166
  603. package/model-provider/openai/OpenAIChatModel.js +0 -267
  604. package/model-provider/openai/OpenAIChatModel.test.cjs +0 -101
  605. package/model-provider/openai/OpenAIChatModel.test.d.ts +0 -1
  606. package/model-provider/openai/OpenAIChatModel.test.js +0 -99
  607. package/model-provider/openai/OpenAIChatPromptTemplate.cjs +0 -114
  608. package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +0 -20
  609. package/model-provider/openai/OpenAIChatPromptTemplate.js +0 -107
  610. package/model-provider/openai/OpenAICompletionModel.cjs +0 -126
  611. package/model-provider/openai/OpenAICompletionModel.d.ts +0 -65
  612. package/model-provider/openai/OpenAICompletionModel.js +0 -119
  613. package/model-provider/openai/OpenAICompletionModel.test.cjs +0 -59
  614. package/model-provider/openai/OpenAICompletionModel.test.d.ts +0 -1
  615. package/model-provider/openai/OpenAICompletionModel.test.js +0 -57
  616. package/model-provider/openai/OpenAIError.cjs +0 -22
  617. package/model-provider/openai/OpenAIError.d.ts +0 -36
  618. package/model-provider/openai/OpenAIError.js +0 -19
  619. package/model-provider/openai/OpenAIFacade.cjs +0 -173
  620. package/model-provider/openai/OpenAIFacade.d.ts +0 -146
  621. package/model-provider/openai/OpenAIFacade.js +0 -160
  622. package/model-provider/openai/OpenAIImageGenerationModel.cjs +0 -170
  623. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +0 -132
  624. package/model-provider/openai/OpenAIImageGenerationModel.js +0 -165
  625. package/model-provider/openai/OpenAISpeechModel.cjs +0 -93
  626. package/model-provider/openai/OpenAISpeechModel.d.ts +0 -51
  627. package/model-provider/openai/OpenAISpeechModel.js +0 -88
  628. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +0 -97
  629. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +0 -54
  630. package/model-provider/openai/OpenAITextEmbeddingModel.js +0 -91
  631. package/model-provider/openai/OpenAITranscriptionModel.cjs +0 -171
  632. package/model-provider/openai/OpenAITranscriptionModel.d.ts +0 -232
  633. package/model-provider/openai/OpenAITranscriptionModel.js +0 -166
  634. package/model-provider/openai/TikTokenTokenizer.cjs +0 -85
  635. package/model-provider/openai/TikTokenTokenizer.d.ts +0 -35
  636. package/model-provider/openai/TikTokenTokenizer.js +0 -78
  637. package/model-provider/openai/countOpenAIChatMessageTokens.cjs +0 -47
  638. package/model-provider/openai/countOpenAIChatMessageTokens.d.ts +0 -20
  639. package/model-provider/openai/countOpenAIChatMessageTokens.js +0 -42
  640. package/model-provider/openai/index.cjs +0 -44
  641. package/model-provider/openai/index.d.ts +0 -16
  642. package/model-provider/openai/index.js +0 -15
  643. package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +0 -39
  644. package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +0 -15
  645. package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +0 -35
  646. package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.cjs +0 -2
  647. package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.d.ts +0 -5
  648. package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.js +0 -1
  649. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -98
  650. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +0 -37
  651. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -94
  652. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +0 -84
  653. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +0 -34
  654. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +0 -80
  655. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +0 -118
  656. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +0 -104
  657. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +0 -109
  658. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +0 -27
  659. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +0 -17
  660. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +0 -23
  661. package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +0 -39
  662. package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +0 -15
  663. package/model-provider/openai-compatible/PerplexityApiConfiguration.js +0 -35
  664. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +0 -39
  665. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +0 -15
  666. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +0 -35
  667. package/model-provider/openai-compatible/index.cjs +0 -37
  668. package/model-provider/openai-compatible/index.d.ts +0 -8
  669. package/model-provider/openai-compatible/index.js +0 -8
  670. package/model-provider/stability/StabilityApiConfiguration.cjs +0 -30
  671. package/model-provider/stability/StabilityApiConfiguration.d.ts +0 -10
  672. package/model-provider/stability/StabilityApiConfiguration.js +0 -26
  673. package/model-provider/stability/StabilityError.cjs +0 -13
  674. package/model-provider/stability/StabilityError.d.ts +0 -13
  675. package/model-provider/stability/StabilityError.js +0 -10
  676. package/model-provider/stability/StabilityFacade.cjs +0 -40
  677. package/model-provider/stability/StabilityFacade.d.ts +0 -34
  678. package/model-provider/stability/StabilityFacade.js +0 -35
  679. package/model-provider/stability/StabilityImageGenerationModel.cjs +0 -123
  680. package/model-provider/stability/StabilityImageGenerationModel.d.ts +0 -111
  681. package/model-provider/stability/StabilityImageGenerationModel.js +0 -119
  682. package/model-provider/stability/StabilityImageGenerationPrompt.cjs +0 -12
  683. package/model-provider/stability/StabilityImageGenerationPrompt.d.ts +0 -9
  684. package/model-provider/stability/StabilityImageGenerationPrompt.js +0 -8
  685. package/model-provider/stability/index.cjs +0 -33
  686. package/model-provider/stability/index.d.ts +0 -5
  687. package/model-provider/stability/index.js +0 -4
  688. package/model-provider/whispercpp/WhisperCppApiConfiguration.cjs +0 -22
  689. package/model-provider/whispercpp/WhisperCppApiConfiguration.d.ts +0 -8
  690. package/model-provider/whispercpp/WhisperCppApiConfiguration.js +0 -18
  691. package/model-provider/whispercpp/WhisperCppFacade.cjs +0 -17
  692. package/model-provider/whispercpp/WhisperCppFacade.d.ts +0 -9
  693. package/model-provider/whispercpp/WhisperCppFacade.js +0 -12
  694. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +0 -125
  695. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +0 -31
  696. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +0 -121
  697. package/model-provider/whispercpp/index.cjs +0 -32
  698. package/model-provider/whispercpp/index.d.ts +0 -3
  699. package/model-provider/whispercpp/index.js +0 -3
  700. package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +0 -30
  701. package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +0 -14
  702. package/observability/helicone/HeliconeOpenAIApiConfiguration.js +0 -26
  703. package/observability/index.cjs +0 -17
  704. package/observability/index.d.ts +0 -1
  705. package/observability/index.js +0 -1
  706. package/retriever/RetrieveEvent.cjs +0 -2
  707. package/retriever/RetrieveEvent.d.ts +0 -10
  708. package/retriever/RetrieveEvent.js +0 -1
  709. package/retriever/Retriever.cjs +0 -2
  710. package/retriever/Retriever.d.ts +0 -4
  711. package/retriever/Retriever.js +0 -1
  712. package/retriever/index.cjs +0 -18
  713. package/retriever/index.d.ts +0 -2
  714. package/retriever/index.js +0 -2
  715. package/retriever/retrieve.cjs +0 -15
  716. package/retriever/retrieve.d.ts +0 -3
  717. package/retriever/retrieve.js +0 -11
  718. package/test/JsonTestServer.cjs +0 -33
  719. package/test/JsonTestServer.d.ts +0 -7
  720. package/test/JsonTestServer.js +0 -29
  721. package/test/StreamingTestServer.cjs +0 -55
  722. package/test/StreamingTestServer.d.ts +0 -7
  723. package/test/StreamingTestServer.js +0 -51
  724. package/test/arrayFromAsync.cjs +0 -13
  725. package/test/arrayFromAsync.d.ts +0 -1
  726. package/test/arrayFromAsync.js +0 -9
  727. package/text-chunk/TextChunk.cjs +0 -2
  728. package/text-chunk/TextChunk.d.ts +0 -3
  729. package/text-chunk/TextChunk.js +0 -1
  730. package/text-chunk/index.cjs +0 -21
  731. package/text-chunk/index.d.ts +0 -5
  732. package/text-chunk/index.js +0 -5
  733. package/text-chunk/split/SplitFunction.cjs +0 -2
  734. package/text-chunk/split/SplitFunction.d.ts +0 -4
  735. package/text-chunk/split/SplitFunction.js +0 -1
  736. package/text-chunk/split/splitOnSeparator.cjs +0 -10
  737. package/text-chunk/split/splitOnSeparator.d.ts +0 -7
  738. package/text-chunk/split/splitOnSeparator.js +0 -6
  739. package/text-chunk/split/splitRecursively.cjs +0 -41
  740. package/text-chunk/split/splitRecursively.d.ts +0 -18
  741. package/text-chunk/split/splitRecursively.js +0 -36
  742. package/text-chunk/split/splitTextChunks.cjs +0 -16
  743. package/text-chunk/split/splitTextChunks.d.ts +0 -4
  744. package/text-chunk/split/splitTextChunks.js +0 -11
  745. package/tool/NoSuchToolDefinitionError.cjs +0 -41
  746. package/tool/NoSuchToolDefinitionError.d.ts +0 -17
  747. package/tool/NoSuchToolDefinitionError.js +0 -37
  748. package/tool/Tool.cjs +0 -64
  749. package/tool/Tool.d.ts +0 -39
  750. package/tool/Tool.js +0 -60
  751. package/tool/ToolCall.cjs +0 -2
  752. package/tool/ToolCall.d.ts +0 -15
  753. package/tool/ToolCall.js +0 -1
  754. package/tool/ToolCallArgumentsValidationError.cjs +0 -49
  755. package/tool/ToolCallArgumentsValidationError.d.ts +0 -23
  756. package/tool/ToolCallArgumentsValidationError.js +0 -45
  757. package/tool/ToolCallError.cjs +0 -34
  758. package/tool/ToolCallError.d.ts +0 -17
  759. package/tool/ToolCallError.js +0 -30
  760. package/tool/ToolCallGenerationError.cjs +0 -35
  761. package/tool/ToolCallGenerationError.d.ts +0 -15
  762. package/tool/ToolCallGenerationError.js +0 -31
  763. package/tool/ToolCallResult.cjs +0 -2
  764. package/tool/ToolCallResult.d.ts +0 -13
  765. package/tool/ToolCallResult.js +0 -1
  766. package/tool/ToolDefinition.cjs +0 -2
  767. package/tool/ToolDefinition.d.ts +0 -7
  768. package/tool/ToolDefinition.js +0 -1
  769. package/tool/ToolExecutionError.cjs +0 -42
  770. package/tool/ToolExecutionError.d.ts +0 -19
  771. package/tool/ToolExecutionError.js +0 -38
  772. package/tool/WebSearchTool.cjs +0 -56
  773. package/tool/WebSearchTool.d.ts +0 -54
  774. package/tool/WebSearchTool.js +0 -52
  775. package/tool/execute-tool/ExecuteToolEvent.cjs +0 -2
  776. package/tool/execute-tool/ExecuteToolEvent.d.ts +0 -11
  777. package/tool/execute-tool/ExecuteToolEvent.js +0 -1
  778. package/tool/execute-tool/executeTool.cjs +0 -102
  779. package/tool/execute-tool/executeTool.d.ts +0 -30
  780. package/tool/execute-tool/executeTool.js +0 -98
  781. package/tool/execute-tool/index.cjs +0 -18
  782. package/tool/execute-tool/index.d.ts +0 -2
  783. package/tool/execute-tool/index.js +0 -2
  784. package/tool/execute-tool/safeExecuteToolCall.cjs +0 -34
  785. package/tool/execute-tool/safeExecuteToolCall.d.ts +0 -5
  786. package/tool/execute-tool/safeExecuteToolCall.js +0 -30
  787. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +0 -61
  788. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +0 -35
  789. package/tool/generate-tool-call/TextGenerationToolCallModel.js +0 -57
  790. package/tool/generate-tool-call/ToolCallGenerationEvent.cjs +0 -2
  791. package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +0 -23
  792. package/tool/generate-tool-call/ToolCallGenerationEvent.js +0 -1
  793. package/tool/generate-tool-call/ToolCallGenerationModel.cjs +0 -2
  794. package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +0 -19
  795. package/tool/generate-tool-call/ToolCallGenerationModel.js +0 -1
  796. package/tool/generate-tool-call/ToolCallParseError.cjs +0 -44
  797. package/tool/generate-tool-call/ToolCallParseError.d.ts +0 -18
  798. package/tool/generate-tool-call/ToolCallParseError.js +0 -40
  799. package/tool/generate-tool-call/generateToolCall.cjs +0 -65
  800. package/tool/generate-tool-call/generateToolCall.d.ts +0 -21
  801. package/tool/generate-tool-call/generateToolCall.js +0 -61
  802. package/tool/generate-tool-call/index.cjs +0 -22
  803. package/tool/generate-tool-call/index.d.ts +0 -6
  804. package/tool/generate-tool-call/index.js +0 -6
  805. package/tool/generate-tool-call/jsonToolCallPrompt.cjs +0 -30
  806. package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +0 -5
  807. package/tool/generate-tool-call/jsonToolCallPrompt.js +0 -27
  808. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +0 -62
  809. package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +0 -31
  810. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +0 -58
  811. package/tool/generate-tool-calls/ToolCallsGenerationEvent.cjs +0 -2
  812. package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +0 -23
  813. package/tool/generate-tool-calls/ToolCallsGenerationEvent.js +0 -1
  814. package/tool/generate-tool-calls/ToolCallsGenerationModel.cjs +0 -2
  815. package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +0 -21
  816. package/tool/generate-tool-calls/ToolCallsGenerationModel.js +0 -1
  817. package/tool/generate-tool-calls/ToolCallsParseError.cjs +0 -36
  818. package/tool/generate-tool-calls/ToolCallsParseError.d.ts +0 -15
  819. package/tool/generate-tool-calls/ToolCallsParseError.js +0 -32
  820. package/tool/generate-tool-calls/ToolCallsPromptTemplate.cjs +0 -2
  821. package/tool/generate-tool-calls/ToolCallsPromptTemplate.d.ts +0 -12
  822. package/tool/generate-tool-calls/ToolCallsPromptTemplate.js +0 -1
  823. package/tool/generate-tool-calls/generateToolCalls.cjs +0 -63
  824. package/tool/generate-tool-calls/generateToolCalls.d.ts +0 -39
  825. package/tool/generate-tool-calls/generateToolCalls.js +0 -59
  826. package/tool/generate-tool-calls/index.cjs +0 -22
  827. package/tool/generate-tool-calls/index.d.ts +0 -6
  828. package/tool/generate-tool-calls/index.js +0 -6
  829. package/tool/index.cjs +0 -31
  830. package/tool/index.d.ts +0 -15
  831. package/tool/index.js +0 -15
  832. package/tool/run-tool/RunToolEvent.cjs +0 -2
  833. package/tool/run-tool/RunToolEvent.d.ts +0 -7
  834. package/tool/run-tool/RunToolEvent.js +0 -1
  835. package/tool/run-tool/index.cjs +0 -18
  836. package/tool/run-tool/index.d.ts +0 -2
  837. package/tool/run-tool/index.js +0 -2
  838. package/tool/run-tool/runTool.cjs +0 -30
  839. package/tool/run-tool/runTool.d.ts +0 -20
  840. package/tool/run-tool/runTool.js +0 -26
  841. package/tool/run-tools/RunToolsEvent.cjs +0 -2
  842. package/tool/run-tools/RunToolsEvent.d.ts +0 -7
  843. package/tool/run-tools/RunToolsEvent.js +0 -1
  844. package/tool/run-tools/index.cjs +0 -18
  845. package/tool/run-tools/index.d.ts +0 -2
  846. package/tool/run-tools/index.js +0 -2
  847. package/tool/run-tools/runTools.cjs +0 -54
  848. package/tool/run-tools/runTools.d.ts +0 -22
  849. package/tool/run-tools/runTools.js +0 -50
  850. package/util/AsyncQueue.cjs +0 -121
  851. package/util/AsyncQueue.d.ts +0 -51
  852. package/util/AsyncQueue.js +0 -117
  853. package/util/AsyncQueue.test.cjs +0 -137
  854. package/util/AsyncQueue.test.d.ts +0 -1
  855. package/util/AsyncQueue.test.js +0 -135
  856. package/util/DurationMeasurement.cjs +0 -48
  857. package/util/DurationMeasurement.d.ts +0 -6
  858. package/util/DurationMeasurement.js +0 -44
  859. package/util/ErrorHandler.cjs +0 -2
  860. package/util/ErrorHandler.d.ts +0 -1
  861. package/util/ErrorHandler.js +0 -1
  862. package/util/SafeResult.cjs +0 -2
  863. package/util/SafeResult.d.ts +0 -8
  864. package/util/SafeResult.js +0 -1
  865. package/util/SimpleWebSocket.cjs +0 -48
  866. package/util/SimpleWebSocket.d.ts +0 -12
  867. package/util/SimpleWebSocket.js +0 -21
  868. package/util/audio/AudioMimeType.cjs +0 -2
  869. package/util/audio/AudioMimeType.d.ts +0 -1
  870. package/util/audio/AudioMimeType.js +0 -1
  871. package/util/audio/getAudioFileExtension.cjs +0 -29
  872. package/util/audio/getAudioFileExtension.d.ts +0 -1
  873. package/util/audio/getAudioFileExtension.js +0 -25
  874. package/util/audio/index.cjs +0 -18
  875. package/util/audio/index.d.ts +0 -2
  876. package/util/audio/index.js +0 -2
  877. package/util/cosineSimilarity.cjs +0 -26
  878. package/util/cosineSimilarity.d.ts +0 -11
  879. package/util/cosineSimilarity.js +0 -22
  880. package/util/delay.cjs +0 -7
  881. package/util/delay.d.ts +0 -1
  882. package/util/delay.js +0 -3
  883. package/util/detectRuntime.cjs +0 -21
  884. package/util/detectRuntime.d.ts +0 -1
  885. package/util/detectRuntime.js +0 -17
  886. package/util/fixJson.cjs +0 -334
  887. package/util/fixJson.d.ts +0 -1
  888. package/util/fixJson.js +0 -330
  889. package/util/fixJson.test.cjs +0 -188
  890. package/util/fixJson.test.d.ts +0 -1
  891. package/util/fixJson.test.js +0 -183
  892. package/util/format/DataContent.cjs +0 -27
  893. package/util/format/DataContent.d.ts +0 -7
  894. package/util/format/DataContent.js +0 -22
  895. package/util/format/UInt8Utils.cjs +0 -40
  896. package/util/format/UInt8Utils.d.ts +0 -2
  897. package/util/format/UInt8Utils.js +0 -35
  898. package/util/format/index.cjs +0 -17
  899. package/util/format/index.d.ts +0 -1
  900. package/util/format/index.js +0 -1
  901. package/util/getErrorMessage.cjs +0 -16
  902. package/util/getErrorMessage.d.ts +0 -1
  903. package/util/getErrorMessage.js +0 -12
  904. package/util/index.cjs +0 -22
  905. package/util/index.d.ts +0 -6
  906. package/util/index.js +0 -6
  907. package/util/isDeepEqualData.cjs +0 -53
  908. package/util/isDeepEqualData.d.ts +0 -8
  909. package/util/isDeepEqualData.js +0 -49
  910. package/util/isDeepEqualData.test.cjs +0 -107
  911. package/util/isDeepEqualData.test.d.ts +0 -1
  912. package/util/isDeepEqualData.test.js +0 -102
  913. package/util/never.cjs +0 -6
  914. package/util/never.d.ts +0 -1
  915. package/util/never.js +0 -2
  916. package/util/parsePartialJson.cjs +0 -29
  917. package/util/parsePartialJson.d.ts +0 -1
  918. package/util/parsePartialJson.js +0 -22
  919. package/util/runSafe.cjs +0 -15
  920. package/util/runSafe.d.ts +0 -2
  921. package/util/runSafe.js +0 -11
  922. package/util/runSafe.test.cjs +0 -58
  923. package/util/runSafe.test.d.ts +0 -1
  924. package/util/runSafe.test.js +0 -56
  925. package/util/streaming/EventSourceParserStream.cjs +0 -34
  926. package/util/streaming/EventSourceParserStream.d.ts +0 -15
  927. package/util/streaming/EventSourceParserStream.js +0 -30
  928. package/util/streaming/convertReadableStreamToAsyncIterable.cjs +0 -19
  929. package/util/streaming/convertReadableStreamToAsyncIterable.d.ts +0 -1
  930. package/util/streaming/convertReadableStreamToAsyncIterable.js +0 -15
  931. package/util/streaming/createEventSourceResponseHandler.cjs +0 -9
  932. package/util/streaming/createEventSourceResponseHandler.d.ts +0 -4
  933. package/util/streaming/createEventSourceResponseHandler.js +0 -5
  934. package/util/streaming/createEventSourceStream.cjs +0 -19
  935. package/util/streaming/createEventSourceStream.d.ts +0 -1
  936. package/util/streaming/createEventSourceStream.js +0 -15
  937. package/util/streaming/createJsonStreamResponseHandler.cjs +0 -9
  938. package/util/streaming/createJsonStreamResponseHandler.d.ts +0 -4
  939. package/util/streaming/createJsonStreamResponseHandler.js +0 -5
  940. package/util/streaming/index.cjs +0 -17
  941. package/util/streaming/index.d.ts +0 -1
  942. package/util/streaming/index.js +0 -1
  943. package/util/streaming/parseEventSourceStream.cjs +0 -12
  944. package/util/streaming/parseEventSourceStream.d.ts +0 -4
  945. package/util/streaming/parseEventSourceStream.js +0 -8
  946. package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +0 -52
  947. package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +0 -6
  948. package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +0 -48
  949. package/util/streaming/parseJsonStream.cjs +0 -35
  950. package/util/streaming/parseJsonStream.d.ts +0 -7
  951. package/util/streaming/parseJsonStream.js +0 -31
  952. package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +0 -21
  953. package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +0 -6
  954. package/util/streaming/parseJsonStreamAsAsyncIterable.js +0 -17
  955. package/vector-index/UpsertIntoVectorIndexEvent.cjs +0 -2
  956. package/vector-index/UpsertIntoVectorIndexEvent.d.ts +0 -9
  957. package/vector-index/UpsertIntoVectorIndexEvent.js +0 -1
  958. package/vector-index/VectorIndex.cjs +0 -2
  959. package/vector-index/VectorIndex.d.ts +0 -19
  960. package/vector-index/VectorIndex.js +0 -1
  961. package/vector-index/VectorIndexRetriever.cjs +0 -54
  962. package/vector-index/VectorIndexRetriever.d.ts +0 -20
  963. package/vector-index/VectorIndexRetriever.js +0 -50
  964. package/vector-index/index.cjs +0 -21
  965. package/vector-index/index.d.ts +0 -5
  966. package/vector-index/index.js +0 -5
  967. package/vector-index/memory/MemoryVectorIndex.cjs +0 -69
  968. package/vector-index/memory/MemoryVectorIndex.d.ts +0 -32
  969. package/vector-index/memory/MemoryVectorIndex.js +0 -65
  970. package/vector-index/upsertIntoVectorIndex.cjs +0 -28
  971. package/vector-index/upsertIntoVectorIndex.d.ts +0 -11
  972. package/vector-index/upsertIntoVectorIndex.js +0 -24
package/CHANGELOG.md DELETED
@@ -1,2272 +0,0 @@
1
- # Changelog
2
-
3
- ## v0.133.0 - 2024-01-26
4
-
5
- ### Added
6
-
7
- - Support for OpenAI embedding custom dimensions.
8
-
9
- ### Changed
10
-
11
- - **breaking change**: renamed `embeddingDimensions` setting to `dimensions`
12
-
13
- ## v0.132.0 - 2024-01-25
14
-
15
- ### Added
16
-
17
- - Support for OpenAI `text-embedding-3-small` and `text-embedding-3-large` embedding models.
18
- - Support for OpenAI `gpt-4-turbo-preview`, `gpt-4-0125-preview`, and `gpt-3.5-turbo-0125` chat models.
19
-
20
- ## v0.131.1 - 2024-01-25
21
-
22
- ### Fixed
23
-
24
- - Add `type-fest` as dependency to fix type inference errors.
25
-
26
- ## v0.131.0 - 2024-01-23
27
-
28
- ### Added
29
-
30
- - `ObjectStreamResponse` and `ObjectStreamFromResponse` serialization functions for using server-generated object streams in web applications.
31
-
32
- Server example:
33
-
34
- ```ts
35
- export async function POST(req: Request) {
36
- const { myArgs } = await req.json();
37
-
38
- const objectStream = await streamObject({
39
- // ...
40
- });
41
-
42
- // serialize the object stream to a response:
43
- return new ObjectStreamResponse(objectStream);
44
- }
45
- ```
46
-
47
- Client example:
48
-
49
- ```ts
50
- const response = await fetch("/api/stream-object-openai", {
51
- method: "POST",
52
- body: JSON.stringify({ myArgs }),
53
- });
54
-
55
- // deserialize (result object is simpler than the full response)
56
- const stream = ObjectStreamFromResponse({
57
- schema: itinerarySchema,
58
- response,
59
- });
60
-
61
- for await (const { partialObject } of stream) {
62
- // do something, e.g. setting a React state
63
- }
64
- ```
65
-
66
- ### Changed
67
-
68
- - **breaking change**: rename `generateStructure` to `generateObject` and `streamStructure` to `streamObject`. Related names have been changed accordingly.
69
- - **breaking change**: the `streamObject` result stream contains additional data. You need to use `stream.partialObject` or destructuring to access it:
70
-
71
- ```ts
72
- const objectStream = await streamObject({
73
- // ...
74
- });
75
-
76
- for await (const { partialObject } of objectStream) {
77
- console.clear();
78
- console.log(partialObject);
79
- }
80
- ```
81
-
82
- - **breaking change**: the result from successful `Schema` validations is stored in the `value` property (before: `data`).
83
-
84
- ## v0.130.1 - 2024-01-22
85
-
86
- ### Fixed
87
-
88
- - Duplex speech streaming works in Vercel Edge Functions.
89
-
90
- ## v0.130.0 - 2024-01-21
91
-
92
- ### Changed
93
-
94
- - **breaking change**: updated `generateTranscription` interface. The function now takes a `mimeType` and `audioData` (base64-encoded string, `Uint8Array`, `Buffer` or `ArrayBuffer`). Example:
95
-
96
- ```ts
97
- import { generateTranscription, openai } from "modelfusion";
98
- import fs from "node:fs";
99
-
100
- const transcription = await generateTranscription({
101
- model: openai.Transcriber({ model: "whisper-1" }),
102
- mimeType: "audio/mp3",
103
- audioData: await fs.promises.readFile("data/test.mp3"),
104
- });
105
- ```
106
-
107
- - Images in instruction and chat prompts can be `Buffer` or `ArrayBuffer` instances (in addition to base64-encoded strings and `Uint8Array` instances).
108
-
109
- ## v0.129.0 - 2024-01-20
110
-
111
- ### Changed
112
-
113
- - **breaking change**: Usage of Node `async_hooks` has been renamed from `node:async_hooks` to `async_hooks` for easier Webpack configuration. To exclude the `async_hooks` from client-side bundling, you can use the following config for Next.js (`next.config.mjs` or `next.config.js`):
114
-
115
- ```js
116
- /**
117
- * @type {import('next').NextConfig}
118
- */
119
- const nextConfig = {
120
- webpack: (config, { isServer }) => {
121
- if (isServer) {
122
- return config;
123
- }
124
-
125
- config.resolve = config.resolve ?? {};
126
- config.resolve.fallback = config.resolve.fallback ?? {};
127
-
128
- // async hooks is not available in the browser:
129
- config.resolve.fallback.async_hooks = false;
130
-
131
- return config;
132
- },
133
- };
134
- ```
135
-
136
- ## v0.128.0 - 2024-01-20
137
-
138
- ### Changed
139
-
140
- - **breaking change**: ModelFusion uses `Uint8Array` instead of `Buffer` for better cross-platform compatibility (see also ["Goodbye, Node.js Buffer"](https://sindresorhus.com/blog/goodbye-nodejs-buffer)). This can lead to breaking changes in your code if you use `Buffer`-specific methods.
141
- - **breaking change**: Image content in multi-modal instruction and chat inputs (e.g. for GPT Vision) is passed in the `image` property (instead of `base64Image`) and supports both base64 strings and `Uint8Array` inputs:
142
-
143
- ```ts
144
- const image = fs.readFileSync(path.join("data", "example-image.png"));
145
-
146
- const textStream = await streamText({
147
- model: openai.ChatTextGenerator({
148
- model: "gpt-4-vision-preview",
149
- maxGenerationTokens: 1000,
150
- }),
151
-
152
- prompt: [
153
- openai.ChatMessage.user([
154
- { type: "text", text: "Describe the image in detail:\n\n" },
155
- { type: "image", image, mimeType: "image/png" },
156
- ]),
157
- ],
158
- });
159
- ```
160
-
161
- - OpenAI-compatible providers with predefined API configurations have a customized provider name that shows up in the events.
162
-
163
- ## v0.127.0 - 2024-01-15
164
-
165
- ### Changed
166
-
167
- - **breaking change**: `streamStructure` returns an async iterable over deep partial objects. If you need to get the fully validated final result, you can use the `fullResponse: true` option and await the `structurePromise` value. Example:
168
-
169
- ```ts
170
- const { structureStream, structurePromise } = await streamStructure({
171
- model: ollama
172
- .ChatTextGenerator({
173
- model: "openhermes2.5-mistral",
174
- maxGenerationTokens: 1024,
175
- temperature: 0,
176
- })
177
- .asStructureGenerationModel(jsonStructurePrompt.text()),
178
-
179
- schema: zodSchema(
180
- z.object({
181
- characters: z.array(
182
- z.object({
183
- name: z.string(),
184
- class: z
185
- .string()
186
- .describe("Character class, e.g. warrior, mage, or thief."),
187
- description: z.string(),
188
- })
189
- ),
190
- })
191
- ),
192
-
193
- prompt:
194
- "Generate 3 character descriptions for a fantasy role playing game.",
195
-
196
- fullResponse: true,
197
- });
198
-
199
- for await (const partialStructure of structureStream) {
200
- console.clear();
201
- console.log(partialStructure);
202
- }
203
-
204
- const structure = await structurePromise;
205
-
206
- console.clear();
207
- console.log("FINAL STRUCTURE");
208
- console.log(structure);
209
- ```
210
-
211
- - **breaking change**: Renamed `text` value in `streamText` with `fullResponse: true` to `textPromise`.
212
-
213
- ### Fixed
214
-
215
- - Ollama streaming.
216
- - Ollama structure generation and streaming.
217
-
218
- ## v0.126.0 - 2024-01-15
219
-
220
- ### Changed
221
-
222
- - **breaking change**: rename `useTool` to `runTool` and `useTools` to `runTools` to avoid confusion with React hooks.
223
-
224
- ## v0.125.0 - 2024-01-14
225
-
226
- ### Added
227
-
228
- - Perplexity AI chat completion support. Example:
229
-
230
- ```ts
231
- import { openaicompatible, streamText } from "modelfusion";
232
-
233
- const textStream = await streamText({
234
- model: openaicompatible
235
- .ChatTextGenerator({
236
- api: openaicompatible.PerplexityApi(),
237
- provider: "openaicompatible-perplexity",
238
- model: "pplx-70b-online", // online model with access to web search
239
- maxGenerationTokens: 500,
240
- })
241
- .withTextPrompt(),
242
-
243
- prompt: "What is RAG in AI?",
244
- });
245
- ```
246
-
247
- ## v0.124.0 - 2024-01-13
248
-
249
- ### Added
250
-
251
- - [Embedding-support for OpenAI-compatible providers](https://modelfusion.dev/integration/model-provider/openaicompatible/#embed-text). You can for example use the Together AI embedding endpoint:
252
-
253
- ```ts
254
- import { embed, openaicompatible } from "modelfusion";
255
-
256
- const embedding = await embed({
257
- model: openaicompatible.TextEmbedder({
258
- api: openaicompatible.TogetherAIApi(),
259
- provider: "openaicompatible-togetherai",
260
- model: "togethercomputer/m2-bert-80M-8k-retrieval",
261
- }),
262
- value: "At first, Nox didn't know what to do with the pup.",
263
- });
264
- ```
265
-
266
- ## v0.123.0 - 2024-01-13
267
-
268
- ### Added
269
-
270
- - `classify` model function ([docs](https://modelfusion.dev/guide/function/classify)) for classifying values. The `SemanticClassifier` has been renamed to `EmbeddingSimilarityClassifier` and can be used in conjunction with `classify`:
271
-
272
- ```ts
273
- import { classify, EmbeddingSimilarityClassifier, openai } from "modelfusion";
274
-
275
- const classifier = new EmbeddingSimilarityClassifier({
276
- embeddingModel: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
277
- similarityThreshold: 0.82,
278
- clusters: [
279
- {
280
- name: "politics" as const,
281
- values: [
282
- "they will save the country!",
283
- // ...
284
- ],
285
- },
286
- {
287
- name: "chitchat" as const,
288
- values: [
289
- "how's the weather today?",
290
- // ...
291
- ],
292
- },
293
- ],
294
- });
295
-
296
- // strongly typed result:
297
- const result = await classify({
298
- model: classifier,
299
- value: "don't you love politics?",
300
- });
301
- ```
302
-
303
- ## v0.122.0 - 2024-01-13
304
-
305
- ### Changed
306
-
307
- - **breaking change**: Switch from positional parameters to named parameters (parameter object) for all model and tool functions. The parameter object is the first and only parameter of the function. Additional options (last parameter before) are now part of the parameter object. Example:
308
-
309
- ```ts
310
- // old:
311
- const text = await generateText(
312
- openai
313
- .ChatTextGenerator({
314
- model: "gpt-3.5-turbo",
315
- maxGenerationTokens: 1000,
316
- })
317
- .withTextPrompt(),
318
-
319
- "Write a short story about a robot learning to love",
320
-
321
- {
322
- functionId: "example-function",
323
- }
324
- );
325
-
326
- // new:
327
- const text = await generateText({
328
- model: openai
329
- .ChatTextGenerator({
330
- model: "gpt-3.5-turbo",
331
- maxGenerationTokens: 1000,
332
- })
333
- .withTextPrompt(),
334
-
335
- prompt: "Write a short story about a robot learning to love",
336
-
337
- functionId: "example-function",
338
- });
339
- ```
340
-
341
- This change was made to make the API more flexible and to allow for future extensions.
342
-
343
- ## v0.121.2 - 2024-01-11
344
-
345
- ### Fixed
346
-
347
- - Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@Necmttn](https://github.com/Necmttn) for the bugfix!
348
-
349
- ## v0.121.1 - 2024-01-10
350
-
351
- ### Fixed
352
-
353
- - Ollama response schema for repeated calls with Ollama 0.1.19 chat models. Thanks [@jakedetels](https://github.com/jakedetels) for the bug report!
354
-
355
- ## v0.121.0 - 2024-01-09
356
-
357
- ### Added
358
-
359
- - Synthia prompt template
360
-
361
- ### Changed
362
-
363
- - **breaking change**: Renamed `parentCallId` function parameter to `callId` to enable options pass-through.
364
- - Better output filtering for `detailed-object` log format (e.g. via `modelfusion.setLogFormat("detailed-object")`)
365
-
366
- ## v0.120.0 - 2024-01-09
367
-
368
- ### Added
369
-
370
- - `OllamaCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `ollama.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
371
-
372
- ```ts
373
- const model = ollama
374
- .CompletionTextGenerator({
375
- model: "mistral",
376
- promptTemplate: ollama.prompt.Mistral,
377
- raw: true, // required when using custom prompt template
378
- maxGenerationTokens: 120,
379
- })
380
- .withTextPrompt();
381
- ```
382
-
383
- ### Removed
384
-
385
- - **breaking change**: removed `.withTextPromptTemplate` on `OllamaCompletionModel`.
386
-
387
- ## v0.119.1 - 2024-01-08
388
-
389
- ### Fixed
390
-
391
- - Incorrect export. Thanks [@mloenow](https://github.com/mloenow) for the fix!
392
-
393
- ## v0.119.0 - 2024-01-07
394
-
395
- ### Added
396
-
397
- - Schema-specific GBNF grammar generator for `LlamaCppCompletionModel`. When using `jsonStructurePrompt`, it automatically uses a GBNF grammar for the JSON schema that you provide. Example:
398
-
399
- ```ts
400
- const structure = await generateStructure(
401
- llamacpp
402
- .CompletionTextGenerator({
403
- // run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
404
- promptTemplate: llamacpp.prompt.ChatML,
405
- maxGenerationTokens: 1024,
406
- temperature: 0,
407
- })
408
- // automatically restrict the output to your schema using GBNF:
409
- .asStructureGenerationModel(jsonStructurePrompt.text()),
410
-
411
- zodSchema(
412
- z.array(
413
- z.object({
414
- name: z.string(),
415
- class: z
416
- .string()
417
- .describe("Character class, e.g. warrior, mage, or thief."),
418
- description: z.string(),
419
- })
420
- )
421
- ),
422
-
423
- "Generate 3 character descriptions for a fantasy role playing game. "
424
- );
425
- ```
426
-
427
- ## v0.118.0 - 2024-01-07
428
-
429
- ### Added
430
-
431
- - `LlamaCppCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `llamacpp.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
432
-
433
- ```ts
434
- const model = llamacpp
435
- .CompletionTextGenerator({
436
- // run https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF with llama.cpp
437
- promptTemplate: llamacpp.prompt.ChatML,
438
- contextWindowSize: 4096,
439
- maxGenerationTokens: 512,
440
- })
441
- .withChatPrompt();
442
- ```
443
-
444
- ### Changed
445
-
446
- - **breaking change**: renamed `response` to `rawResponse` when using `fullResponse: true` setting.
447
- - **breaking change**: renamed `llamacpp.TextGenerator` to `llamacpp.CompletionTextGenerator`.
448
-
449
- ### Removed
450
-
451
- - **breaking change**: removed `.withTextPromptTemplate` on `LlamaCppCompletionModel`.
452
-
453
- ## v0.117.0 - 2024-01-06
454
-
455
- ### Added
456
-
457
- - Predefined Llama.cpp GBNF grammars:
458
-
459
- - `llamacpp.grammar.json`: Restricts the output to JSON.
460
- - `llamacpp.grammar.jsonArray`: Restricts the output to a JSON array.
461
- - `llamacpp.grammar.list`: Restricts the output to a newline-separated list where each line starts with `- `.
462
-
463
- - Llama.cpp structure generation support:
464
-
465
- ```ts
466
- const structure = await generateStructure(
467
- llamacpp
468
- .TextGenerator({
469
- // run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
470
- maxGenerationTokens: 1024,
471
- temperature: 0,
472
- })
473
- .withTextPromptTemplate(ChatMLPrompt.instruction()) // needed for jsonStructurePrompt.text()
474
- .asStructureGenerationModel(jsonStructurePrompt.text()), // automatically restrict the output to JSON
475
-
476
- zodSchema(
477
- z.object({
478
- characters: z.array(
479
- z.object({
480
- name: z.string(),
481
- class: z
482
- .string()
483
- .describe("Character class, e.g. warrior, mage, or thief."),
484
- description: z.string(),
485
- })
486
- ),
487
- })
488
- ),
489
-
490
- "Generate 3 character descriptions for a fantasy role playing game. "
491
- );
492
- ```
493
-
494
- ## v0.116.0 - 2024-01-05
495
-
496
- ### Added
497
-
498
- - Semantic classifier. An easy way to determine a class of a text using embeddings. Example:
499
-
500
- ```ts
501
- import { SemanticClassifier, openai } from "modelfusion";
502
-
503
- const classifier = new SemanticClassifier({
504
- embeddingModel: openai.TextEmbedder({
505
- model: "text-embedding-ada-002",
506
- }),
507
- similarityThreshold: 0.82,
508
- clusters: [
509
- {
510
- name: "politics" as const,
511
- values: [
512
- "isn't politics the best thing ever",
513
- "why don't you tell me about your political opinions",
514
- "don't you just love the president",
515
- "don't you just hate the president",
516
- "they're going to destroy this country!",
517
- "they will save the country!",
518
- ],
519
- },
520
- {
521
- name: "chitchat" as const,
522
- values: [
523
- "how's the weather today?",
524
- "how are things going?",
525
- "lovely weather today",
526
- "the weather is horrendous",
527
- "let's go to the chippy",
528
- ],
529
- },
530
- ],
531
- });
532
-
533
- console.log(await classifier.classify("don't you love politics?")); // politics
534
- console.log(await classifier.classify("how's the weather today?")); // chitchat
535
- console.log(
536
- await classifier.classify("I'm interested in learning about llama 2")
537
- ); // null
538
- ```
539
-
540
- ## v0.115.0 - 2024-01-05
541
-
542
- ### Removed
543
-
544
- - Anthropic support. Anthropic has a strong stance against open-source models and against non-US AI. I will not support them by providing a ModelFusion integration.
545
-
546
- ## v0.114.1 - 2024-01-05
547
-
548
- ### Fixed
549
-
550
- - Together AI text generation and text streaming using OpenAI-compatible chat models.
551
-
552
- ## v0.114.0 - 2024-01-05
553
-
554
- ### Added
555
-
556
- - Custom call header support for APIs. You can pass a `customCallHeaders` function into API configurations to add custom headers. The function is called with `functionType`, `functionId`, `run`, and `callId` parameters. Example for Helicone:
557
-
558
- ```ts
559
- const text = await generateText(
560
- openai
561
- .ChatTextGenerator({
562
- api: new HeliconeOpenAIApiConfiguration({
563
- customCallHeaders: ({ functionId, callId }) => ({
564
- "Helicone-Property-FunctionId": functionId,
565
- "Helicone-Property-CallId": callId,
566
- }),
567
- }),
568
- model: "gpt-3.5-turbo",
569
- temperature: 0.7,
570
- maxGenerationTokens: 500,
571
- })
572
- .withTextPrompt(),
573
-
574
- "Write a short story about a robot learning to love",
575
-
576
- { functionId: "example-function" }
577
- );
578
- ```
579
-
580
- - Rudimentary caching support for `generateText`. You can use a `MemoryCache` to store the response of a `generateText` call. Example:
581
-
582
- ```ts
583
- import { MemoryCache, generateText, ollama } from "modelfusion";
584
-
585
- const model = ollama
586
- .ChatTextGenerator({ model: "llama2:chat", maxGenerationTokens: 100 })
587
- .withTextPrompt();
588
-
589
- const cache = new MemoryCache();
590
-
591
- const text1 = await generateText(
592
- model,
593
- "Write a short story about a robot learning to love:",
594
- { cache }
595
- );
596
-
597
- console.log(text1);
598
-
599
- // 2nd call will use cached response:
600
- const text2 = await generateText(
601
- model,
602
- "Write a short story about a robot learning to love:", // same text
603
- { cache }
604
- );
605
-
606
- console.log(text2);
607
- ```
608
-
609
- - `validateTypes` and `safeValidateTypes` helpers that perform type checking of an object against a `Schema` (e.g., a `zodSchema`).
610
-
611
- ## v0.113.0 - 2024-01-03
612
-
613
- [Structure generation](https://modelfusion.dev/guide/function/generate-structure) improvements.
614
-
615
- ### Added
616
-
617
- - `.asStructureGenerationModel(...)` function to `OpenAIChatModel` and `OllamaChatModel` to create structure generation models from chat models.
618
- - `jsonStructurePrompt` helper function to create structure generation models.
619
-
620
- ### Example
621
-
622
- ```ts
623
- import {
624
- generateStructure,
625
- jsonStructurePrompt,
626
- ollama,
627
- zodSchema,
628
- } from "modelfusion";
629
-
630
- const structure = await generateStructure(
631
- ollama
632
- .ChatTextGenerator({
633
- model: "openhermes2.5-mistral",
634
- maxGenerationTokens: 1024,
635
- temperature: 0,
636
- })
637
- .asStructureGenerationModel(jsonStructurePrompt.text()),
638
-
639
- zodSchema(
640
- z.object({
641
- characters: z.array(
642
- z.object({
643
- name: z.string(),
644
- class: z
645
- .string()
646
- .describe("Character class, e.g. warrior, mage, or thief."),
647
- description: z.string(),
648
- })
649
- ),
650
- })
651
- ),
652
-
653
- "Generate 3 character descriptions for a fantasy role playing game. "
654
- );
655
- ```
656
-
657
- ## v0.112.0 - 2024-01-02
658
-
659
- ### Changed
660
-
661
- - **breaking change**: renamed `useToolsOrGenerateText` to `useTools`
662
- - **breaking change**: renamed `generateToolCallsOrText` to `generateToolCalls`
663
-
664
- ### Removed
665
-
666
- - Restriction on tool names. OpenAI tool calls do not have such a restriction.
667
-
668
- ## v0.111.0 - 2024-01-01
669
-
670
- Reworked API configuration support.
671
-
672
- ### Added
673
-
674
- - All providers now have an `Api` function that you can call to create custom API configurations. The base URL set up is more flexible and allows you to override parts of the base URL selectively.
675
- - `api` namespace with retry and throttle configurations
676
-
677
- ### Changed
678
-
679
- - Updated Cohere models.
680
- - Updated LMNT API calls to LMNT `v1` API.
681
- - **breaking change**: Renamed `throttleUnlimitedConcurrency` to `throttleOff`.
682
-
683
- ## v0.110.0 - 2023-12-30
684
-
685
- ### Changed
686
-
687
- - **breaking change**: renamed `modelfusion/extension` to `modelfusion/internal`. This requires updating `modelfusion-experimental` (if used) to `v0.3.0`
688
-
689
- ### Removed
690
-
691
- - Deprecated OpenAI completion models that will be deactivated on January 4, 2024.
692
-
693
- ## v0.109.0 - 2023-12-30
694
-
695
- ### Added
696
-
697
- - [Open AI compatible completion model](https://modelfusion.dev/integration/model-provider/openaicompatible/). It e.g. works with Fireworks AI.
698
- - Together AI API configuration (for Open AI compatible chat models):
699
-
700
- ```ts
701
- import {
702
- TogetherAIApiConfiguration,
703
- openaicompatible,
704
- streamText,
705
- } from "modelfusion";
706
-
707
- const textStream = await streamText(
708
- openaicompatible
709
- .ChatTextGenerator({
710
- api: new TogetherAIApiConfiguration(),
711
- model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
712
- })
713
- .withTextPrompt(),
714
-
715
- "Write a story about a robot learning to love"
716
- );
717
- ```
718
-
719
- - Updated Llama.cpp model settings. GBNF grammars can be passed into the `grammar` setting:
720
-
721
- ```ts
722
- const text = await generateText(
723
- llamacpp
724
- .TextGenerator({
725
- maxGenerationTokens: 512,
726
- temperature: 0,
727
- // simple list grammar:
728
- grammar: `root ::= ("- " item)+
729
- item ::= [^\\n]+ "\\n"`,
730
- })
731
- .withTextPromptTemplate(MistralInstructPrompt.text()),
732
-
733
- "List 5 ingredients for a lasagna:\n\n"
734
- );
735
- ```
736
-
737
- ## v0.107.0 - 2023-12-29
738
-
739
- ### Added
740
-
741
- - Mistral instruct prompt template
742
-
743
- ### Changed
744
-
745
- - **breaking change**: Renamed `LlamaCppTextGenerationModel` to `LlamaCppCompletionModel`.
746
-
747
- ### Fixed
748
-
749
- - Updated `LlamaCppCompletionModel` to the latest llama.cpp version.
750
- - Fixed formatting of system prompt for chats in Llama2 2 prompt template.
751
-
752
- ## v0.106.0 - 2023-12-28
753
-
754
- Experimental features that are unlikely to become stable before v1.0 have been moved to a separate `modelfusion-experimental` package.
755
-
756
- ### Removed
757
-
758
- - Cost calculation
759
- - `guard` function
760
- - Browser and server features (incl. flow)
761
- - `summarizeRecursively` function
762
-
763
- ## v0.105.0 - 2023-12-26
764
-
765
- ### Added
766
-
767
- - Tool call support for chat prompts. Assistant messages can contain tool calls, and tool messages can contain tool call results. Tool calls can be used to implement e.g. agents:
768
-
769
- ```ts
770
- const chat: ChatPrompt = {
771
- system: "You are ...",
772
- messages: [ChatMessage.user({ text: instruction })],
773
- };
774
-
775
- while (true) {
776
- const { text, toolResults } = await useToolsOrGenerateText(
777
- openai
778
- .ChatTextGenerator({ model: "gpt-4-1106-preview" })
779
- .withChatPrompt(),
780
- tools, // array of tools
781
- chat
782
- );
783
-
784
- // add the assistant and tool messages to the chat:
785
- chat.messages.push(
786
- ChatMessage.assistant({ text, toolResults }),
787
- ChatMessage.tool({ toolResults })
788
- );
789
-
790
- if (toolResults == null) {
791
- return; // no more actions, break loop
792
- }
793
-
794
- // ... (handle tool results)
795
- }
796
- ```
797
-
798
- - `streamText` returns a `text` promise when invoked with `fullResponse: true`. After the streaming has finished, the promise resolves with the full text.
799
-
800
- ```ts
801
- const { text, textStream } = await streamText(
802
- openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }).withTextPrompt(),
803
- "Write a short story about a robot learning to love:",
804
- { fullResponse: true }
805
- );
806
-
807
- // ... (handle streaming)
808
-
809
- console.log(await text); // full text
810
- ```
811
-
812
- ## v0.104.0 - 2023-12-24
813
-
814
- ### Changed
815
-
816
- - **breaking change**: Unified text and multimodal prompt templates. `[Text/MultiModal]InstructionPrompt` is now `InstructionPrompt`, and `[Text/MultiModalChatPrompt]` is now `ChatPrompt`.
817
- - More flexible chat prompts: The chat prompt validation is now chat template specific and validated at runtime. E.g. the Llama2 prompt template only supports turns of user and assistant messages, whereas other formats are more flexible.
818
-
819
- ## v0.103.0 - 2023-12-23
820
-
821
- ### Added
822
-
823
- - `finishReason` support for `generateText`.
824
-
825
- The finish reason can be `stop` (the model stopped because it generated a stop sequence), `length` (the model stopped because it generated the maximum number of tokens), `content-filter` (the model stopped because the content filter detected a violation), `tool-calls` (the model stopped because it triggered a tool call), `error` (the model stopped because of an error), `other` (the model stopped for another reason), or `unknown` (the model stop reason is not know or the model does not support finish reasons).
826
-
827
- You can extract it from the full response when using `fullResponse: true`:
828
-
829
- ```ts
830
- const { text, finishReason } = await generateText(
831
- openai
832
- .ChatTextGenerator({ model: "gpt-3.5-turbo", maxGenerationTokens: 200 })
833
- .withTextPrompt(),
834
- "Write a short story about a robot learning to love:",
835
- { fullResponse: true }
836
- );
837
- ```
838
-
839
- ## v0.102.0 - 2023-12-22
840
-
841
- ### Added
842
-
843
- - You can specify `numberOfGenerations` on image generation models and create multiple images by using the `fullResponse: true` option. Example:
844
-
845
- ```ts
846
- // generate 2 images:
847
- const { images } = await generateImage(
848
- openai.ImageGenerator({
849
- model: "dall-e-3",
850
- numberOfGenerations: 2,
851
- size: "1024x1024",
852
- }),
853
- "the wicked witch of the west in the style of early 19th century painting",
854
- { fullResponse: true }
855
- );
856
- ```
857
-
858
- - **breaking change**: Image generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
859
-
860
- ## v0.101.0 - 2023-12-22
861
-
862
- ### Changed
863
-
864
- - Automatic1111 Stable Diffusion Web UI configuration has separate configuration of host, port, and path.
865
-
866
- ### Fixed
867
-
868
- - Automatic1111 Stable Diffusion Web UI uses negative prompt and seed.
869
-
870
- ## v0.100.0 - 2023-12-17
871
-
872
- ### Added
873
-
874
- - `ollama.ChatTextGenerator` model that calls the Ollama chat API.
875
- - Ollama chat messages and prompts are exposed through `ollama.ChatMessage` and `ollama.ChatPrompt`
876
- - OpenAI chat messages and prompts are exposed through `openai.ChatMessage` and `openai.ChatPrompt`
877
- - Mistral chat messages and prompts are exposed through `mistral.ChatMessage` and `mistral.ChatPrompt`
878
-
879
- ### Changed
880
-
881
- - **breaking change**: renamed `ollama.TextGenerator` to `ollama.CompletionTextGenerator`
882
- - **breaking change**: renamed `mistral.TextGenerator` to `mistral.ChatTextGenerator`
883
-
884
- ## v0.99.0 - 2023-12-16
885
-
886
- ### Added
887
-
888
- - You can specify `numberOfGenerations` on text generation models and access multiple generations by using the `fullResponse: true` option. Example:
889
-
890
- ```ts
891
- // generate 2 texts:
892
- const { texts } = await generateText(
893
- openai.CompletionTextGenerator({
894
- model: "gpt-3.5-turbo-instruct",
895
- numberOfGenerations: 2,
896
- maxGenerationTokens: 1000,
897
- }),
898
- "Write a short story about a robot learning to love:\n\n",
899
- { fullResponse: true }
900
- );
901
- ```
902
-
903
- - **breaking change**: Text generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
904
-
905
- ### Changed
906
-
907
- - **breaking change**: Renamed `maxCompletionTokens` text generation model setting to `maxGenerationTokens`.
908
-
909
- ## v0.98.0 - 2023-12-16
910
-
911
- ### Changed
912
-
913
- - **breaking change**: `responseType` option was changed into `fullResponse` option and uses a boolean value to make discovery easy. The response values from the full response have been renamed for clarity. For base64 image generation, you can use the `imageBase64` value from the full response:
914
-
915
- ```ts
916
- const { imageBase64 } = await generateImage(model, prompt, {
917
- fullResponse: true,
918
- });
919
- ```
920
-
921
- ### Improved
922
-
923
- - Better docs for the OpenAI chat settings. Thanks [@bearjaws](https://github.com/bearjaws) for the contribution!
924
-
925
- ### Fixed
926
-
927
- - Streaming OpenAI chat text generation when setting `n:2` or higher returns only the stream from the first choice.
928
-
929
- ## v0.97.0 - 2023-12-14
930
-
931
- ### Added
932
-
933
- - **breaking change**: Ollama image (vision) support. This changes the Ollama prompt format. You can add `.withTextPrompt()` to existing Ollama text generators to get a text prompt like before.
934
-
935
- Vision example:
936
-
937
- ```ts
938
- import { ollama, streamText } from "modelfusion";
939
-
940
- const textStream = await streamText(
941
- ollama.TextGenerator({
942
- model: "bakllava",
943
- maxCompletionTokens: 1024,
944
- temperature: 0,
945
- }),
946
- {
947
- prompt: "Describe the image in detail",
948
- images: [image], // base-64 encoded png or jpeg
949
- }
950
- );
951
- ```
952
-
953
- ### Changed
954
-
955
- - **breaking change**: Switch Ollama settings to camelCase to align with the rest of the library.
956
-
957
- ## v0.96.0 - 2023-12-14
958
-
959
- ### Added
960
-
961
- - [Mistral platform support](https://modelfusion.dev/integration/model-provider/mistral)
962
-
963
- ## v0.95.0 - 2023-12-10
964
-
965
- ### Added
966
-
967
- - `cachePrompt` parameter for llama.cpp models. Thanks [@djwhitt](https://github.com/djwhitt) for the contribution!
968
-
969
- ## v0.94.0 - 2023-12-10
970
-
971
- ### Added
972
-
973
- - Prompt template for neural-chat models.
974
-
975
- ## v0.93.0 - 2023-12-10
976
-
977
- ### Added
978
-
979
- - Optional response prefix for instruction prompts to guide the LLM response.
980
-
981
- ### Changed
982
-
983
- - **breaking change**: Renamed prompt format to prompt template to align with the commonly used language (e.g. from model cards).
984
-
985
- ## v0.92.1 - 2023-12-10
986
-
987
- ### Changed
988
-
989
- - Improved Ollama error handling.
990
-
991
- ## v0.92.0 - 2023-12-09
992
-
993
- ### Changed
994
-
995
- - **breaking change**: setting global function observers and global logging has changed.
996
- You can call methods on a `modelfusion` import:
997
-
998
- ```ts
999
- import { modelfusion } from "modelfusion";
1000
-
1001
- modelfusion.setLogFormat("basic-text");
1002
- ```
1003
-
1004
- - Cleaned output when using `detailed-object` log format.
1005
-
1006
- ## v0.91.0 - 2023-12-09
1007
-
1008
- ### Added
1009
-
1010
- - `Whisper.cpp` [transcription (speech-to-text) model](https://modelfusion.dev/integration/model-provider/whispercpp) support.
1011
-
1012
- ```ts
1013
- import { generateTranscription, whispercpp } from "modelfusion";
1014
-
1015
- const data = await fs.promises.readFile("data/test.wav");
1016
-
1017
- const transcription = await generateTranscription(whispercpp.Transcriber(), {
1018
- type: "wav",
1019
- data,
1020
- });
1021
- ```
1022
-
1023
- ### Improved
1024
-
1025
- - Better error reporting.
1026
-
1027
- ## v0.90.0 - 2023-12-03
1028
-
1029
- ### Added
1030
-
1031
- - Temperature and language settings to OpenAI transcription model.
1032
-
1033
- ## v0.89.0 - 2023-11-30
1034
-
1035
- ### Added
1036
-
1037
- - `maxValuesPerCall` setting for `OpenAITextEmbeddingModel` to enable different configurations, e.g. for Azure. Thanks [@nanotronic](https://github.com/nanotronic) for the contribution!
1038
-
1039
- ## v0.88.0 - 2023-11-28
1040
-
1041
- ### Added
1042
-
1043
- - Multi-modal chat prompts. Supported by OpenAI vision chat models and by BakLLaVA prompt format.
1044
-
1045
- ### Changed
1046
-
1047
- - **breaking change**: renamed `ChatPrompt` to `TextChatPrompt` to distinguish it from multi-modal chat prompts.
1048
-
1049
- ## v0.87.0 - 2023-11-27
1050
-
1051
- ### Added
1052
-
1053
- - **experimental**: `modelfusion/extension` export with functions and classes that are necessary to implement providers in 3rd party node modules. See [lgrammel/modelfusion-example-provider](https://github.com/lgrammel/modelfusion-example-provider) for an example.
1054
-
1055
- ## v0.85.0 - 2023-11-26
1056
-
1057
- ### Added
1058
-
1059
- - `OpenAIChatMessage` function call support.
1060
-
1061
- ## v0.84.0 - 2023-11-26
1062
-
1063
- ### Added
1064
-
1065
- - Support for OpenAI-compatible chat APIs. See [OpenAI Compatible](https://modelfusion.dev/integration/model-provider/openaicompatible) for details.
1066
-
1067
- ```ts
1068
- import {
1069
- BaseUrlApiConfiguration,
1070
- openaicompatible,
1071
- generateText,
1072
- } from "modelfusion";
1073
-
1074
- const text = await generateText(
1075
- openaicompatible
1076
- .ChatTextGenerator({
1077
- api: new BaseUrlApiConfiguration({
1078
- baseUrl: "https://api.fireworks.ai/inference/v1",
1079
- headers: {
1080
- Authorization: `Bearer ${process.env.FIREWORKS_API_KEY}`,
1081
- },
1082
- }),
1083
- model: "accounts/fireworks/models/mistral-7b",
1084
- })
1085
- .withTextPrompt(),
1086
-
1087
- "Write a story about a robot learning to love"
1088
- );
1089
- ```
1090
-
1091
- ## v0.83.0 - 2023-11-26
1092
-
1093
- ### Added
1094
-
1095
- - Introduce `uncheckedSchema()` facade function as an easier way to create unchecked ModelFusion schemas. This aligns the API with `zodSchema()`.
1096
-
1097
- ### Changed
1098
-
1099
- - **breaking change**: Renamed `InstructionPrompt` interface to `MultiModalInstructionPrompt` to clearly distinguish it from `TextInstructionPrompt`.
1100
- - **breaking change**: Renamed `.withBasicPrompt` methods for image generation models to `.withTextPrompt` to align with text generation models.
1101
-
1102
- ## v0.82.0 - 2023-11-25
1103
-
1104
- ### Added
1105
-
1106
- - Introduce `zodSchema()` facade function as an easier way to create new ModelFusion Zod schemas. This clearly distinguishes it from `ZodSchema` that is also part of the zod library.
1107
-
1108
- ## v0.81.0 - 2023-11-25
1109
-
1110
- **breaking change**: `generateStructure` and `streamStructure` redesign. The new API does not require function calling and `StructureDefinition` objects any more. This makes it more flexible and it can be used in 3 ways:
1111
-
1112
- - with OpenAI function calling:
1113
-
1114
- ```ts
1115
- const model = openai
1116
- .ChatTextGenerator({ model: "gpt-3.5-turbo" })
1117
- .asFunctionCallStructureGenerationModel({
1118
- fnName: "...",
1119
- fnDescription: "...",
1120
- });
1121
- ```
1122
-
1123
- - with OpenAI JSON format:
1124
-
1125
- ```ts
1126
- const model = openai
1127
- .ChatTextGenerator({
1128
- model: "gpt-4-1106-preview",
1129
- temperature: 0,
1130
- maxCompletionTokens: 1024,
1131
- responseFormat: { type: "json_object" },
1132
- })
1133
- .asStructureGenerationModel(
1134
- jsonStructurePrompt((instruction: string, schema) => [
1135
- OpenAIChatMessage.system(
1136
- "JSON schema: \n" +
1137
- JSON.stringify(schema.getJsonSchema()) +
1138
- "\n\n" +
1139
- "Respond only using JSON that matches the above schema."
1140
- ),
1141
- OpenAIChatMessage.user(instruction),
1142
- ])
1143
- );
1144
- ```
1145
-
1146
- - with Ollama (and a capable model, e.g., OpenHermes 2.5):
1147
- ```ts
1148
- const model = ollama
1149
- .TextGenerator({
1150
- model: "openhermes2.5-mistral",
1151
- maxCompletionTokens: 1024,
1152
- temperature: 0,
1153
- format: "json",
1154
- raw: true,
1155
- stopSequences: ["\n\n"], // prevent infinite generation
1156
- })
1157
- .withPromptFormat(ChatMLPromptFormat.instruction())
1158
- .asStructureGenerationModel(
1159
- jsonStructurePrompt((instruction: string, schema) => ({
1160
- system:
1161
- "JSON schema: \n" +
1162
- JSON.stringify(schema.getJsonSchema()) +
1163
- "\n\n" +
1164
- "Respond only using JSON that matches the above schema.",
1165
- instruction,
1166
- }))
1167
- );
1168
- ```
1169
-
1170
- See [generateStructure](https://modelfusion.dev/guide/function/generate-structure) for details on the new API.
1171
-
1172
- ## v0.80.0 - 2023-11-24
1173
-
1174
- ### Changed
1175
-
1176
- - **breaking change**: Restructured multi-modal instruction prompts and `OpenAIChatMessage.user()`
1177
-
1178
- ## v0.79.0 - 2023-11-23
1179
-
1180
- ### Added
1181
-
1182
- - Multi-tool usage from open source models
1183
-
1184
- Use `TextGenerationToolCallsOrGenerateTextModel` and related helper methods `.asToolCallsOrTextGenerationModel()` to create custom prompts & parsers.
1185
-
1186
- Examples:
1187
-
1188
- - `examples/basic/src/model-provider/ollama/ollama-use-tools-or-generate-text-openhermes-example.ts`
1189
- - `examples/basic/src/model-provider/llamacpp/llamacpp-use-tools-or-generate-text-openhermes-example.ts`
1190
-
1191
- Example prompt format:
1192
-
1193
- - `examples/basic/src/tool/prompts/open-hermes.ts` for OpenHermes 2.5
1194
-
1195
- ## v0.78.0 - 2023-11-23
1196
-
1197
- ### Removed
1198
-
1199
- - **breaking change**: Removed `FunctionListToolCallPromptFormat`. See `examples/basic/src/model-provide/ollama/ollama-use-tool-mistral-example.ts` for how to implement a `ToolCallPromptFormat` for your tool.
1200
-
1201
- ## v0.77.0 - 2023-11-23
1202
-
1203
- ### Changed
1204
-
1205
- - **breaking change**: Rename `Speech` to `SpeechGenerator` in facades
1206
- - **breaking change**: Rename `Transcription` to `Transcriber` in facades
1207
-
1208
- ## v0.76.0 - 2023-11-23
1209
-
1210
- ### Added
1211
-
1212
- - Anthropic Claude 2.1 support
1213
-
1214
- ## v0.75.0 - 2023-11-22
1215
-
1216
- Introducing model provider facades:
1217
-
1218
- ```ts
1219
- const image = await generateImage(
1220
- openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
1221
- "the wicked witch of the west in the style of early 19th century painting"
1222
- );
1223
- ```
1224
-
1225
- ### Added
1226
-
1227
- - Model provider facades. You can e.g. use `ollama.TextGenerator(...)` instead of `new OllamaTextGenerationModel(...)`.
1228
-
1229
- ### Changed
1230
-
1231
- - **breaking change**: Fixed method name `isParallizable` to `isParallelizable` in `EmbeddingModel`.
1232
-
1233
- ### Removed
1234
-
1235
- - **breaking change**: removed `HuggingFaceImageDescriptionModel`. Image description models will be replaced by multi-modal vision models.
1236
-
1237
- ## v0.74.1 - 2023-11-22
1238
-
1239
- ### Improved
1240
-
1241
- - Increase OpenAI chat streaming resilience.
1242
-
1243
- ## v0.74.0 - 2023-11-21
1244
-
1245
- Prompt format and tool calling improvements.
1246
-
1247
- ### Added
1248
-
1249
- - text prompt format. Use simple text prompts, e.g. with `OpenAIChatModel`:
1250
- ```ts
1251
- const textStream = await streamText(
1252
- new OpenAIChatModel({
1253
- model: "gpt-3.5-turbo",
1254
- }).withTextPrompt(),
1255
- "Write a short story about a robot learning to love."
1256
- );
1257
- ```
1258
- - `.withTextPromptFormat` to `LlamaCppTextGenerationModel` for simplified prompt construction:
1259
- ```ts
1260
- const textStream = await streamText(
1261
- new LlamaCppTextGenerationModel({
1262
- // ...
1263
- }).withTextPromptFormat(Llama2PromptFormat.text()),
1264
- "Write a short story about a robot learning to love."
1265
- );
1266
- ```
1267
- - `.asToolCallGenerationModel()` to `OllamaTextGenerationModel` to simplify tool calls.
1268
-
1269
- ### Improved
1270
-
1271
- - better error reporting when using exponent backoff retries
1272
-
1273
- ### Removed
1274
-
1275
- - **breaking change**: removed `input` from `InstructionPrompt` (was Alpaca-specific, `AlpacaPromptFormat` still supports it)
1276
-
1277
- ## v0.73.1 - 2023-11-19
1278
-
1279
- Remove section newlines from Llama 2 prompt format.
1280
-
1281
- ## v0.73.0 - 2023-11-19
1282
-
1283
- Ollama edge case and error handling improvements.
1284
-
1285
- ## v0.72.0 - 2023-11-19
1286
-
1287
- **Breaking change**: the tool calling API has been reworked to support multiple parallel tool calls. This required multiple breaking changes (see below). Check out the updated [tools documentation](https://modelfusion.dev/guide/tools/) for details.
1288
-
1289
- ### Changed
1290
-
1291
- - `Tool` has `parameters` and `returnType` schemas (instead of `inputSchema` and `outputSchema`).
1292
- - `useTool` uses `generateToolCall` under the hood. The return value and error handling has changed.
1293
- - `useToolOrGenerateText` has been renamed to `useToolsOrGenerateText`. It uses `generateToolCallsOrText` under the hood. The return value and error handling has changed. It can invoke several tools in parallel and returns an array of tool results.
1294
- - The `maxRetries` parameter in `guard` has been replaced by a `maxAttempt` parameter.
1295
-
1296
- ### Removed
1297
-
1298
- - `generateStructureOrText` has been removed.
1299
-
1300
- ## v0.71.0 - 2023-11-17
1301
-
1302
- ### Added
1303
-
1304
- - Experimental generateToolCallsOrText function for generating a multiple parallel tool call using the OpenAI chat/tools API.
1305
-
1306
- ## v0.70.0 - 2023-11-16
1307
-
1308
- ### Added
1309
-
1310
- - ChatML prompt format.
1311
-
1312
- ### Changed
1313
-
1314
- - **breaking change**: `ChatPrompt` structure and terminology has changed to align more closely with OpenAI and similar chat prompts. This is also in preparation for integrating images and function calls results into chat prompts.
1315
- - **breaking change**: Prompt formats are namespaced. Use e.g. `Llama2PromptFormat.chat()` instead of `mapChatPromptToLlama2Format()`. See [Prompt Format](https://modelfusion.dev/guide/function/generate-text#prompt-styles) for documentation of the new prompt formats.
1316
-
1317
- ## v0.69.0 - 2023-11-15
1318
-
1319
- ### Added
1320
-
1321
- - Experimental generateToolCall function for generating a single tool call using the OpenAI chat/tools API.
1322
-
1323
- ## v0.68.0 - 2023-11-14
1324
-
1325
- ### Changed
1326
-
1327
- - Refactored JSON parsing to use abstracted schemas. You can use `parseJSON` and `safeParseJSON` to securely parse JSON objects and optionally type-check them using any schema (e.g. a Zod schema).
1328
-
1329
- ## v0.67.0 - 2023-11-12
1330
-
1331
- ### Added
1332
-
1333
- - Ollama 0.1.9 support: `format` (for forcing JSON output) and `raw` settings
1334
- - Improved Ollama settings documentation
1335
-
1336
- ## v0.66.0 - 2023-11-12
1337
-
1338
- ### Added
1339
-
1340
- - Support for fine-tuned OpenAI `gpt-4-0613` models
1341
- - Support for `trimWhitespace` model setting in `streamText` calls
1342
-
1343
- ## v0.65.0 - 2023-11-12
1344
-
1345
- ### Added
1346
-
1347
- - Image support for `OpenAIChatMessage.user`
1348
- - `mapInstructionPromptToBakLLaVA1ForLlamaCppFormat` prompt format
1349
-
1350
- ### Changed
1351
-
1352
- - **breaking change**: `VisionInstructionPrompt` was replaced by an optional `image` field in `InstructionPrompt`.
1353
-
1354
- ## v0.64.0 - 2023-11-11
1355
-
1356
- ### Added
1357
-
1358
- - Support for OpenAI vision model.
1359
- - Example: `examples/basic/src/model-provider/openai/openai-chat-stream-text-vision-example.ts`
1360
-
1361
- ## v0.63.0 - 2023-11-08
1362
-
1363
- ### Added
1364
-
1365
- - Support for OpenAI chat completion `seed` and `responseFormat` options.
1366
-
1367
- ## v0.62.0 - 2023-11-08
1368
-
1369
- ### Added
1370
-
1371
- - OpenAI speech generation support. Shoutout to [@bjsi](https://github.com/bjsi) for the awesome contribution!
1372
-
1373
- ## v0.61.0 - 2023-11-07
1374
-
1375
- ### Added
1376
-
1377
- - OpenAI `gpt-3.5-turbo-1106`, `gpt-4-1106-preview`, `gpt-4-vision-preview` chat models.
1378
- - OpenAI `Dalle-E-3` image model.
1379
-
1380
- ### Changed
1381
-
1382
- - **breaking change**: `OpenAIImageGenerationModel` requires a `model` parameter.
1383
-
1384
- ## v0.60.0 - 2023-11-06
1385
-
1386
- ### Added
1387
-
1388
- - Support image input for multi-modal Llama.cpp models (e.g. Llava, Bakllava).
1389
-
1390
- ### Changed
1391
-
1392
- - **breaking change**: Llama.cpp prompt format has changed to support images. Use `.withTextPrompt()` to get a text prompt format.
1393
-
1394
- ## v0.59.0 - 2023-11-06
1395
-
1396
- ### Added
1397
-
1398
- - ElevenLabs `eleven_turbo_v2` support.
1399
-
1400
- ## v0.58 - 2023-11-05
1401
-
1402
- ### Fixed
1403
-
1404
- - **breaking change**: Uncaught errors were caused by custom Promises. ModelFusion uses only standard Promises. To get full responses from model function, you need to use the `{ returnType: "full" }` option instead of calling `.asFullResponse()` on the result.
1405
-
1406
- ## v0.57.1 - 2023-11-05
1407
-
1408
- ### Improved
1409
-
1410
- - ModelFusion server error logging and reporting.
1411
-
1412
- ### Fixed
1413
-
1414
- - ModelFusion server creates directory for runs automatically when errors are thrown.
1415
-
1416
- ## v0.57.0 - 2023-11-04
1417
-
1418
- ### Added
1419
-
1420
- - Support for [Cohere v3 embeddings](https://txt.cohere.com/introducing-embed-v3/).
1421
-
1422
- ## v0.56.0 - 2023-11-04
1423
-
1424
- ### Added
1425
-
1426
- - [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text embeddings.
1427
-
1428
- ## v0.55.1 - 2023-11-04
1429
-
1430
- ### Fixed
1431
-
1432
- - Llama.cpp embeddings are invoked sequentially to avoid rejection by the server.
1433
-
1434
- ## v0.55.0 - 2023-11-04
1435
-
1436
- ### Added
1437
-
1438
- - [Ollama model provider](https://modelfusion.dev/integration/model-provider/ollama) for text generation and text streaming.
1439
-
1440
- ## v0.54.0 - 2023-10-29
1441
-
1442
- Adding experimental ModelFusion server, flows, and browser utils.
1443
-
1444
- ### Added
1445
-
1446
- - ModelFusion server (separate export 'modelfusion/server') with a Fastify plugin for running ModelFusion flows on a server.
1447
- - ModelFusion flows.
1448
- - ModelFusion browser utils (separate export 'modelfusion/browser') for dealing with audio data and invoking ModelFusion flows on the server (`invokeFlow`).
1449
-
1450
- ### Changed
1451
-
1452
- - **breaking change**: `readEventSource` and `readEventSourceStream` are part of 'modelfusion/browser'.
1453
-
1454
- ## v0.53.2 - 2023-10-26
1455
-
1456
- ### Added
1457
-
1458
- - Prompt callback option for `streamStructure`
1459
-
1460
- ### Improved
1461
-
1462
- - Inline JSDoc comments for the model functions.
1463
-
1464
- ## v0.53.1 - 2023-10-25
1465
-
1466
- ### Fixed
1467
-
1468
- - Abort signals and errors during streaming are caught and forwarded correctly.
1469
-
1470
- ## v0.53.0 - 2023-10-23
1471
-
1472
- ### Added
1473
-
1474
- - `executeFunction` utility function for tracing execution time, parameters, and result of composite functions and non-ModelFusion functions.
1475
-
1476
- ## v0.52.0 - 2023-10-23
1477
-
1478
- ### Changed
1479
-
1480
- - Streaming results and `AsyncQueue` objects can be used by several consumers. Each consumer will receive all values. This means that you can e.g. forward the same text stream to speech generation and the client.
1481
-
1482
- ## v0.51.0 - 2023-10-23
1483
-
1484
- ElevenLabs improvements.
1485
-
1486
- ### Added
1487
-
1488
- - ElevenLabs model settings `outputFormat` and `optimizeStreamingLatency`.
1489
-
1490
- ### Fixed
1491
-
1492
- - Default ElevenLabs model is `eleven_monolingual_v1`.
1493
-
1494
- ## v0.50.0 - 2023-10-22
1495
-
1496
- ### Added
1497
-
1498
- - `parentCallId` event property
1499
- - Tracing for `useTool`, `useToolOrGenerateText`, `upsertIntoVectorIndex`, and `guard`
1500
-
1501
- ### Changed
1502
-
1503
- - **breaking change**: rename `embedding` event type to `embed`
1504
- - **breaking change**: rename `image-generation` event type to `generate-image`
1505
- - **breaking change**: rename `speech-generation` event type to `generate-speech`
1506
- - **breaking change**: rename `speech-streaming` event type to `stream-speech`
1507
- - **breaking change**: rename `structure-generation` event type to `generate-structure`
1508
- - **breaking change**: rename `structure-or-text-generation` event type to `generate-structure-or-text`
1509
- - **breaking change**: rename `structure-streaming` event type to `stream-structure`
1510
- - **breaking change**: rename `text-generation` event type to `generate-text`
1511
- - **breaking change**: rename `text-streaming` event type to `stream-text`
1512
- - **breaking change**: rename `transcription` event type to `generate-transcription`
1513
-
1514
- ## v0.49.0 - 2023-10-21
1515
-
1516
- ### Added
1517
-
1518
- - Speech synthesis streaming supports string inputs.
1519
- - Observability for speech synthesis streaming.
1520
-
1521
- ### Changed
1522
-
1523
- - **breaking change**: split `synthesizeSpeech` into `generateSpeech` and `streamSpeech` functions
1524
- - **breaking change**: renamed `speech-synthesis` event to `speech-generation`
1525
- - **breaking change**: renamed `transcribe` to `generateTranscription`
1526
- - **breaking change**: renamed `LmntSpeechSynthesisModel` to `LmntSpeechModel`
1527
- - **breaking change**: renamed `ElevenLabesSpeechSynthesisModel` to `ElevenLabsSpeechModel`
1528
- - **breaking change**: renamed `OpenAITextGenerationModel` to `OpenAICompletionModel`
1529
-
1530
- ### Removed
1531
-
1532
- - **breaking change**: `describeImage` model function. Use `generateText` instead (with e.g. `HuggingFaceImageDescriptionModel`).
1533
-
1534
- ## v0.48.0 - 2023-10-20
1535
-
1536
- ### Added
1537
-
1538
- - Duplex streaming for speech synthesis.
1539
- - Elevenlabs duplex streaming support.
1540
-
1541
- ### Changed
1542
-
1543
- - Schema is using data in return type (breaking change for tools).
1544
-
1545
- ## v0.47.0 - 2023-10-14
1546
-
1547
- ### Added
1548
-
1549
- - Prompt formats for image generation. You can use `.withPromptFormat()` or `.withBasicPrompt()` to apply a prompt format to an image generation model.
1550
-
1551
- ### Changed
1552
-
1553
- - **breaking change**: `generateImage` returns a Buffer with the binary image data instead of a base-64 encoded string. You can call `.asBase64Text()` on the response to get a base64 encoded string.
1554
-
1555
- ## v0.46.0 - 2023-10-14
1556
-
1557
- ### Added
1558
-
1559
- - `.withChatPrompt()` and `.withInstructionPrompt()` shorthand methods.
1560
-
1561
- ## v0.45.0 - 2023-10-14
1562
-
1563
- ### Changed
1564
-
1565
- - Updated Zod to 3.22.4. You need to use Zod 3.22.4 or higher in your project.
1566
-
1567
- ## v0.44.0 - 2023-10-13
1568
-
1569
- ### Added
1570
-
1571
- - Store runs in AsyncLocalStorage for convienience (Node.js only).
1572
-
1573
- ## v0.43.0 - 2023-10-12
1574
-
1575
- ### Added
1576
-
1577
- - Guard function.
1578
-
1579
- ## v0.42.0 - 2023-10-11
1580
-
1581
- ### Added
1582
-
1583
- - Anthropic model support (Claude 2, Claude instant).
1584
-
1585
- ## v0.41.0 - 2023-10-05
1586
-
1587
- ### Changed
1588
-
1589
- **breaking change**: generics simplification to enable dynamic model usage. Models can be used more easily as function parameters.
1590
-
1591
- - `output` renamed to `value` in `asFullResponse()`
1592
- - model settings can no longer be configured as a model options parameter. Use `.withSettings()` instead.
1593
-
1594
- ## v0.40.0 - 2023-10-04
1595
-
1596
- ### Changed
1597
-
1598
- **breaking change**: moved Pinecone integration into `@modelfusion/pinecone` module.
1599
-
1600
- ## v0.39.0 - 2023-10-03
1601
-
1602
- ### Added
1603
-
1604
- - `readEventSource` for parsing a server-sent event stream using the JavaScript EventSource.
1605
-
1606
- ### Changed
1607
-
1608
- **breaking change**: generalization to use Schema instead of Zod.
1609
-
1610
- - `MemoryVectorIndex.deserialize` requires a `Schema`, e.g. `new ZodSchema` (from ModelFusion).
1611
- - `readEventSourceStream` requires a `Schema`.
1612
- - `UncheckedJsonSchema[Schema/StructureDefinition]` renamed to `Unchecked[Schema/StructureDefinition]`.
1613
-
1614
- ## v0.38.0 - 2023-10-02
1615
-
1616
- ### Changed
1617
-
1618
- **breaking change**: Generalized embeddings beyond text embedding.
1619
-
1620
- - `embedText` renamed to `embed`.
1621
- - `embedTexts` renamed to `embedMany`
1622
- - Removed filtering from `VectorIndexRetriever` query (still available as a setting).
1623
-
1624
- ## v0.37.0 - 2023-10-02
1625
-
1626
- ### Added
1627
-
1628
- - `VectorIndexRetriever` supports a filter option that is passed to the vector index.
1629
- - `MemoryVectorIndex` supports filter functions that are applied to the objects before calculating the embeddings.
1630
-
1631
- ## v0.36.0 - 2023-10-02
1632
-
1633
- ### Added
1634
-
1635
- - `basic-text` logger logs function ids when available.
1636
- - `retrieve` produces events for logging and observability.
1637
-
1638
- ## v0.35.2 - 2023-09-27
1639
-
1640
- ### Fixed
1641
-
1642
- - Support empty stop sequences when calling OpenAI text and chat models.
1643
-
1644
- ## v0.35.1 - 2023-09-27
1645
-
1646
- ### Fixed
1647
-
1648
- - Fixed bugs in `streamStructure` partial JSON parsing.
1649
-
1650
- ## v0.35.0 - 2023-09-26
1651
-
1652
- ### Added
1653
-
1654
- - `streamStructure` for streaming structured responses, e.g. from OpenAI function calls. Thanks [@bjsi](https://github.com/bjsi) for the input!
1655
-
1656
- ## v0.34.0 - 2023-09-25
1657
-
1658
- ### Added
1659
-
1660
- - First version of event source utilities: `AsyncQueue`, `createEventSourceStream`, `readEventSourceStream`.
1661
-
1662
- ## v0.33.1 - 2023-09-24
1663
-
1664
- ### Fixed
1665
-
1666
- - Remove resolution part from type definitions.
1667
-
1668
- ## v0.33.0 - 2023-09-19
1669
-
1670
- ### Changed
1671
-
1672
- **breaking change**: Generalized vector store upsert/retrieve beyond text chunks:
1673
-
1674
- - `upsertTextChunks` renamed to `upsertIntoVectorStore`. Syntax has changed.
1675
- - `retrieveTextChunks` renamed to `retrieve`
1676
- - `SimilarTextChunksFromVectorIndexRetriever` renamed to `VectorIndexRetriever`
1677
-
1678
- ## v0.32.0 - 2023-09-19
1679
-
1680
- ### Added
1681
-
1682
- - OpenAI gpt-3.5-turbo-instruct model support.
1683
- - Autocomplete for Stability AI models (thanks [@Danielwinkelmann](https://github.com/Danielwinkelmann)!)
1684
-
1685
- ### Changed
1686
-
1687
- - Downgrade Zod version to 3.21.4 because of https://github.com/colinhacks/zod/issues/2697
1688
-
1689
- ## v0.31.0 - 2023-09-13
1690
-
1691
- ### Changed
1692
-
1693
- - **breaking change**: Renamed chat format construction functions to follow the pattern `map[Chat|Instruction]PromptTo[FORMAT]Format()`, e.g. `mapInstructionPromptToAlpacaFormat()`, for easy auto-completion.
1694
-
1695
- ### Removed
1696
-
1697
- - **breaking change**: The prompts for `generateStructure` and `generateStructureOrText` have been simplified. You can remove the `OpenAIChatPrompt.forStructureCurried` (and similar) parts.
1698
-
1699
- ## v0.30.0 - 2023-09-10
1700
-
1701
- ### Added
1702
-
1703
- - You can directly pass JSON schemas into `generateStructure` and `generateStructureOrText` calls without validation using `UncheckedJsonSchemaStructureDefinition`. This is useful when you need more flexility and don't require type inference. See `examples/basic/src/util/schema/generate-structure-unchecked-json-schema-example.ts`.
1704
-
1705
- ### Changed
1706
-
1707
- - **BREAKING CHANGE**: renamed `generateJson` and `generateJsonOrText` to `generateStructure` and `generateStructureOrText`.
1708
- - **BREAKING CHANGE**: introduced `ZodSchema` and `ZodStructureDefinition`. These are required for `generateStructure` and `generateStructureOrText` calls and in tools.
1709
- - **BREAKING CHANGE**: renamed the corresponding methods and objects.
1710
-
1711
- Why this breaking change?
1712
-
1713
- ModelFusion is currently tied to Zod, but there are many other type checking libraries out there, and Zod does not map perfectly to JSON Schema (which is used in OpenAI function calling).
1714
- Enabling you to use JSON Schema directly in ModelFusion is a first step towards decoupling ModelFusion from Zod.
1715
- You can also configure your own schema adapters that e.g. use Ajv or another library.
1716
- Since this change already affected all JSON generation calls and tools, I included other changes that I had planned in the same area (e.g., renaming to generateStructure and making it more consistent).
1717
-
1718
- ## v0.29.0 - 2023-09-09
1719
-
1720
- ### Added
1721
-
1722
- - `describeImage` model function for image captioning and OCR. HuggingFace provider available.
1723
-
1724
- ## v0.28.0 - 2023-09-09
1725
-
1726
- ### Added
1727
-
1728
- - BaseUrlApiConfiguration class for setting up API configurations with custom base URLs and headers.
1729
-
1730
- ## v0.27.0 - 2023-09-07
1731
-
1732
- ### Added
1733
-
1734
- - Support for running OpenAI on Microsoft Azure.
1735
-
1736
- ### Changed
1737
-
1738
- - **Breaking change**: Introduce API configuration. This affects setting the baseUrl, throttling, and retries.
1739
- - Improved Helicone support via `HeliconeOpenAIApiConfiguration`.
1740
-
1741
- ## v0.26.0 - 2023-09-06
1742
-
1743
- ### Added
1744
-
1745
- - LMNT speech synthesis support.
1746
-
1747
- ## v0.25.0 - 2023-09-05
1748
-
1749
- ### Changed
1750
-
1751
- - Separated cost calculation from Run.
1752
-
1753
- ## v0.24.1 - 2023-09-04
1754
-
1755
- ### Added
1756
-
1757
- - Exposed `logitBias` setting for OpenAI chat and text generation models.
1758
-
1759
- ## v0.24.0 - 2023-09-02
1760
-
1761
- ### Added
1762
-
1763
- - Support for fine-tuned OpenAI models (for the `davinci-002`, `babbage-002`, and `gpt-3.5-turbo` base models).
1764
-
1765
- ## v0.23.0 - 2023-08-31
1766
-
1767
- ### Added
1768
-
1769
- - Function logging support.
1770
- - Usage information for events.
1771
- - Filtering of model settings for events.
1772
-
1773
- ## v0.22.0 - 2023-08-28
1774
-
1775
- ### Changed
1776
-
1777
- - **Breaking change**: Restructured the function call events.
1778
-
1779
- ## v0.21.0 - 2023-08-26
1780
-
1781
- ### Changed
1782
-
1783
- - **Breaking change**: Reworked the function observer system. See [Function observers](https://modelfusion.dev/guide/util/observer) for details on how to use the new system.
1784
-
1785
- ## v0.20.0 - 2023-08-24
1786
-
1787
- ### Changed
1788
-
1789
- - **Breaking change**: Use `.asFullResponse()` to get full responses from model functions (replaces the `fullResponse: true` option).
1790
-
1791
- ## v0.19.0 - 2023-08-23
1792
-
1793
- ### Added
1794
-
1795
- - Support for "babbage-002" and "davinci-002" OpenAI base models.
1796
-
1797
- ### Fixed
1798
-
1799
- - Choose correct tokenizer for older OpenAI text models.
1800
-
1801
- ## v0.18.0 - 2023-08-22
1802
-
1803
- ### Added
1804
-
1805
- - Support for ElevenLabs speech synthesis parameters.
1806
-
1807
- ## v0.17.0 - 2023-08-21
1808
-
1809
- ### Added
1810
-
1811
- - `generateSpeech` function to generate speech from text.
1812
- - ElevenLabs support.
1813
-
1814
- ## v0.15.0 - 2023-08-21
1815
-
1816
- ### Changed
1817
-
1818
- - Introduced unified `stopSequences` and `maxCompletionTokens` properties for all text generation models. **Breaking change**: `maxCompletionTokens` and `stopSequences` are part of the base TextGenerationModel. Specific names for these properties in models have been replaced by this, e.g. `maxTokens` in OpenAI models is `maxCompletionTokens`.
1819
-
1820
- ## v0.14.0 - 2023-08-17
1821
-
1822
- ### Changed
1823
-
1824
- - **Breaking change**: Renamed prompt mappings (and related code) to prompt format.
1825
- - Improved type inference for WebSearchTool and executeTool.
1826
-
1827
- ## v0.12.0 - 2023-08-15
1828
-
1829
- ### Added
1830
-
1831
- - JsonTextGenerationModel and InstructionWithSchemaPrompt to support generateJson on text generation models.
1832
-
1833
- ## v0.11.0 - 2023-08-14
1834
-
1835
- ### Changed
1836
-
1837
- - WebSearchTool signature updated.
1838
-
1839
- ## v0.10.0 - 2023-08-13
1840
-
1841
- ### Added
1842
-
1843
- - Convenience functions to create OpenAI chat messages from tool calls and results.
1844
-
1845
- ## v0.9.0 - 2023-08-13
1846
-
1847
- ### Added
1848
-
1849
- - `WebSearchTool` definition to support the SerpAPI tool (separate package: `@modelfusion/serpapi-tools`)
1850
-
1851
- ## v0.8.0 - 2023-08-12
1852
-
1853
- ### Added
1854
-
1855
- - `executeTool` function that directly executes a single tool and records execution metadata.
1856
-
1857
- ### Changed
1858
-
1859
- - Reworked event system and introduced RunFunctionEvent.
1860
-
1861
- ## v0.7.0 - 2023-08-10
1862
-
1863
- ### Changed
1864
-
1865
- - **Breaking change**: Model functions return a simple object by default to make the 95% use case easier. You can use the `fullResponse` option to get a richer response object that includes the original model response and metadata.
1866
-
1867
- ## v0.6.0 - 2023-08-07
1868
-
1869
- ### Added
1870
-
1871
- - `splitTextChunk` function.
1872
-
1873
- ### Changed
1874
-
1875
- - **Breaking change**: Restructured text splitter functions.
1876
-
1877
- ## v0.5.0 - 2023-08-07
1878
-
1879
- ### Added
1880
-
1881
- - `splitTextChunks` function.
1882
- - Chat with PDF demo.
1883
-
1884
- ### Changed
1885
-
1886
- - **Breaking change**: Renamed VectorIndexSimilarTextChunkRetriever to SimilarTextChunksFromVectorIndexRetriever.
1887
- - **Breaking change**: Renamed 'content' property in TextChunk to 'text.
1888
-
1889
- ### Removed
1890
-
1891
- - `VectorIndexTextChunkStore`
1892
-
1893
- ## v0.4.1 - 2023-08-06
1894
-
1895
- ### Fixed
1896
-
1897
- - Type inference bug in `trimChatPrompt`.
1898
-
1899
- ## v0.4.0 - 2023-08-06
1900
-
1901
- ### Added
1902
-
1903
- - HuggingFace text embedding support.
1904
-
1905
- ## v0.3.0 - 2023-08-05
1906
-
1907
- ### Added
1908
-
1909
- - Helicone observability integration.
1910
-
1911
- ## v0.2.0 - 2023-08-04
1912
-
1913
- ### Added
1914
-
1915
- - Instruction prompts can contain optional `input` property.
1916
- - Alpaca instruction prompt mapping.
1917
- - Vicuna chat prompt mapping.
1918
-
1919
- ## v0.1.1 - 2023-08-02
1920
-
1921
- ### Changed
1922
-
1923
- - Docs updated to ModelFusion.
1924
-
1925
- ## v0.1.0 - 2023-08-01
1926
-
1927
- ### Changed
1928
-
1929
- - **Breaking Change**: Renamed to `modelfusion` (from `ai-utils.js`).
1930
-
1931
- ## v0.0.43 - 2023-08-01
1932
-
1933
- ### Changed
1934
-
1935
- - **Breaking Change**: model functions return rich objects that include the result, the model response and metadata. This enables you to access the original model response easily when you need it and also use the metadata outside of runs.
1936
-
1937
- ## v0.0.42 - 2023-07-31
1938
-
1939
- ### Added
1940
-
1941
- - `trimChatPrompt()` function to fit chat prompts into the context window and leave enough space for the completion.
1942
- - `maxCompletionTokens` property on TextGenerationModels.
1943
-
1944
- ### Changed
1945
-
1946
- - Renamed `withMaxTokens` to `withMaxCompletionTokens` on TextGenerationModels.
1947
-
1948
- ### Removed
1949
-
1950
- - `composeRecentMessagesOpenAIChatPrompt` function (use `trimChatPrompt` instead).
1951
-
1952
- ## v0.0.41 - 2023-07-30
1953
-
1954
- ### Added
1955
-
1956
- - ChatPrompt concept (with chat prompt mappings for text, OpenAI chat, and Llama 2 prompts).
1957
-
1958
- ### Changed
1959
-
1960
- - Renamed prompt mappings and changed into functions.
1961
-
1962
- ## v0.0.40 - 2023-07-30
1963
-
1964
- ### Added
1965
-
1966
- - Prompt mapping support for text generation and streaming.
1967
- - Added instruction prompt concept and mapping.
1968
- - Option to specify context window size for Llama.cpp text generation models.
1969
-
1970
- ### Changed
1971
-
1972
- - Renamed 'maxTokens' to 'contextWindowSize' where applicable.
1973
- - Restructured how tokenizers are exposed by text generation models.
1974
-
1975
- ## v0.0.39 - 2023-07-26
1976
-
1977
- ### Added
1978
-
1979
- - llama.cpp embedding support.
1980
-
1981
- ## v0.0.38 - 2023-07-24
1982
-
1983
- ### Changed
1984
-
1985
- - `zod` and `zod-to-json-schema` are peer dependencies and no longer included in the package.
1986
-
1987
- ## v0.0.37 - 2023-07-23
1988
-
1989
- ### Changed
1990
-
1991
- - `generateJsonOrText`, `useToolOrGenerateText`, `useTool` return additional information in the response (e.g. the parameters and additional text).
1992
-
1993
- ## v0.0.36 - 2023-07-23
1994
-
1995
- ### Changed
1996
-
1997
- - Renamed `callTool` to `useTool` and `callToolOrGenerateText` to `useToolOrGenerateText`.
1998
-
1999
- ## v0.0.35 - 2023-07-22
2000
-
2001
- ### Added
2002
-
2003
- - `generateJsonOrText`
2004
- - Tools: `Tool` class, `callTool`, `callToolOrGenerateText`
2005
-
2006
- ### Changed
2007
-
2008
- - Restructured "generateJson" arguments.
2009
-
2010
- ## v0.0.34 - 2023-07-18
2011
-
2012
- ### Removed
2013
-
2014
- - `asFunction` model function variants. Use JavaScript lamba functions instead.
2015
-
2016
- ## v0.0.33 - 2023-07-18
2017
-
2018
- ### Added
2019
-
2020
- - OpenAIChatAutoFunctionPrompt to call the OpenAI functions API with multiple functions in 'auto' mode.
2021
-
2022
- ## v0.0.32 - 2023-07-15
2023
-
2024
- ### Changed
2025
-
2026
- - Changed the prompt format of the generateJson function.
2027
-
2028
- ## v0.0.31 - 2023-07-14
2029
-
2030
- ### Changed
2031
-
2032
- - Reworked interaction with vectors stores. Removed VectorDB, renamed VectorStore to VectorIndex, and introduced upsertTextChunks and retrieveTextChunks functions.
2033
-
2034
- ## v0.0.30 - 2023-07-13
2035
-
2036
- ### Fixed
2037
-
2038
- - Bugs related to performance. not being available.
2039
-
2040
- ## v0.0.29 - 2023-07-13
2041
-
2042
- ### Added
2043
-
2044
- - Llama.cpp tokenization support.
2045
-
2046
- ### Changed
2047
-
2048
- - Split Tokenizer API into BasicTokenizer and FullTokenizer.
2049
- - Introduce countTokens function (replacing Tokenizer.countTokens).
2050
-
2051
- ## v0.0.28 - 2023-07-12
2052
-
2053
- ### Added
2054
-
2055
- - Events for streamText.
2056
-
2057
- ## v0.0.27 - 2023-07-11
2058
-
2059
- ### Added
2060
-
2061
- - TextDeltaEventSource for Client/Server streaming support.
2062
-
2063
- ### Fixed
2064
-
2065
- - End-of-stream bug in Llama.cpp text streaming.
2066
-
2067
- ## v0.0.26 - 2023-07-11
2068
-
2069
- ### Added
2070
-
2071
- - Streaming support for Cohere text generation models.
2072
-
2073
- ## v0.0.25 - 2023-07-10
2074
-
2075
- ### Added
2076
-
2077
- - Streaming support for OpenAI text completion models.
2078
- - OpenAI function streaming support (in low-level API).
2079
-
2080
- ## v0.0.24 - 2023-07-09
2081
-
2082
- ### Added
2083
-
2084
- - Generalized text streaming (async string iterable, useful for command line streaming).
2085
- - Streaming support for Llama.cpp text generation.
2086
-
2087
- ## v0.0.23 - 2023-07-08
2088
-
2089
- ### Added
2090
-
2091
- - Llama.cpp text generation support.
2092
-
2093
- ## v0.0.22 - 2023-07-08
2094
-
2095
- ### Changed
2096
-
2097
- - Convert all main methods (e.g. `model.generateText(...)`) to a functional API (i.e., `generateText(model, ...)`).
2098
-
2099
- ## v0.0.21 - 2023-07-07
2100
-
2101
- ### New
2102
-
2103
- - JSON generation model.
2104
-
2105
- ## v0.0.20 - 2023-07-02
2106
-
2107
- ### New
2108
-
2109
- - Automatic1111 image generation provider.
2110
-
2111
- ## v0.0.19 - 2023-06-30
2112
-
2113
- ### New
2114
-
2115
- - Cost calculation for OpenAI image generation and transcription models.
2116
-
2117
- ## v0.0.18 - 2023-06-28
2118
-
2119
- ### New
2120
-
2121
- - Cost calculation for Open AI text generation, chat and embedding models.
2122
-
2123
- ### Changed
2124
-
2125
- - Renamed RunContext to Run. Introduced DefaultRun.
2126
- - Changed events and observers.
2127
-
2128
- ## v0.0.17 - 2023-06-14
2129
-
2130
- ### New
2131
-
2132
- 1. Updated OpenAI models.
2133
- 1. Low-level support for OpenAI chat functions API (via `OpenAIChatModel.callApi`).
2134
- 1. TranscriptionModel and OpenAITranscriptionModel (using `whisper`)
2135
-
2136
- ### Changed
2137
-
2138
- 1. Single optional parameter for functions/method that contains run, functionId, etc.
2139
-
2140
- ## v0.0.16 - 2023-06-13
2141
-
2142
- ### Fixed
2143
-
2144
- 1. Retry is not attempted when you ran out of OpenAI credits.
2145
- 1. Vercel edge function support (switched to nanoid for unique IDs).
2146
-
2147
- ### Changed
2148
-
2149
- 1. Improved OpenAI chat streaming API.
2150
- 1. Changed `asFunction` variants from namespaced functions into stand-alone functions.
2151
-
2152
- ## v0.0.15 - 2023-06-12
2153
-
2154
- ### Changed
2155
-
2156
- 1. Documentation update.
2157
-
2158
- ## v0.0.14 - 2023-06-11
2159
-
2160
- ### Changed
2161
-
2162
- 1. Major rework of embedding APIs.
2163
-
2164
- ## v0.0.13 - 2023-06-10
2165
-
2166
- ### Changed
2167
-
2168
- 1. Major rework of text and image generation APIs.
2169
-
2170
- ## v0.0.12 - 2023-06-06
2171
-
2172
- ## v0.0.11 - 2023-06-05
2173
-
2174
- ### Changed
2175
-
2176
- 1. Various renames.
2177
-
2178
- ## v0.0.10 - 2023-06-04
2179
-
2180
- ### New
2181
-
2182
- 1. Pinecone VectorDB support
2183
- 1. Cohere tokenization support
2184
-
2185
- ## v0.0.9 - 2023-06-03
2186
-
2187
- ### New
2188
-
2189
- 1. OpenAI DALL-E image generation support
2190
- 1. `generateImage` function
2191
- 1. Throttling and retries on model level
2192
-
2193
- ## v0.0.8 - 2023-06-02
2194
-
2195
- ### New
2196
-
2197
- 1. Stability AI image generation support
2198
- 1. Image generation Next.js example
2199
-
2200
- ### Changed
2201
-
2202
- 1. Updated PDF to tweet example with style transfer
2203
-
2204
- ## v0.0.7 - 2023-06-01
2205
-
2206
- ### New
2207
-
2208
- 1. Hugging Face text generation support
2209
- 1. Memory vector DB
2210
-
2211
- ## v0.0.6 - 2023-05-31
2212
-
2213
- ### New
2214
-
2215
- 1. Cohere embedding API support
2216
-
2217
- ### Changes
2218
-
2219
- 1. Restructured retry logic
2220
- 1. `embed` embeds many texts at once
2221
-
2222
- ## v0.0.5 - 2023-05-30
2223
-
2224
- ### New
2225
-
2226
- 1. Cohere text generation support
2227
- 1. OpenAI chat streams can be returned as delta async iterables
2228
- 1. Documentation of integration APIs and models
2229
-
2230
- ## v0.0.4 - 2023-05-29
2231
-
2232
- ### New
2233
-
2234
- 1. OpenAI embedding support
2235
- 1. Text embedding functions
2236
- 1. Chat streams can be returned as ReadableStream or AsyncIterable
2237
- 1. Basic examples under `examples/basic`
2238
- 1. Initial documentation available at [modelfusion.dev](https://modelfusion.dev)
2239
-
2240
- ## v0.0.3 - 2023-05-28
2241
-
2242
- ### New
2243
-
2244
- 1. Voice recording and transcription Next.js app example.
2245
- 1. OpenAI transcription support (Whisper).
2246
-
2247
- ## v0.0.2 - 2023-05-27
2248
-
2249
- ### New
2250
-
2251
- 1. BabyAGI Example in TypeScript
2252
- 1. TikToken for OpenAI: We've added tiktoken to aid in tokenization and token counting, including those for message and prompt overhead tokens in chat.
2253
- 1. Tokenization-based Recursive Splitter: A new splitter that operates recursively using tokenization.
2254
- 1. Prompt Management Utility: An enhancement to fit recent chat messages into the context window.
2255
-
2256
- ## v0.0.1 - 2023-05-26
2257
-
2258
- ### New
2259
-
2260
- 1. AI Chat Example using Next.js: An example demonstrating AI chat implementation using Next.js.
2261
- 1. PDF to Twitter Thread Example: This shows how a PDF can be converted into a Twitter thread.
2262
- 1. OpenAI Chat Completion Streaming Support: A feature providing real-time response capabilities using OpenAI's chat completion streaming.
2263
- 1. OpenAI Chat and Text Completion Support: This addition enables the software to handle both chat and text completions from OpenAI.
2264
- 1. Retry Management: A feature to enhance resilience by managing retry attempts for tasks.
2265
- 1. Task Progress Reporting and Abort Signals: This allows users to track the progress of tasks and gives the ability to abort tasks when needed.
2266
- 1. Recursive Character Splitter: A feature to split text into characters recursively for more detailed text analysis.
2267
- 1. Recursive Text Mapping: This enables recursive mapping of text, beneficial for tasks like summarization or extraction.
2268
- 1. Split-Map-Filter-Reduce for Text Processing: A process chain developed for sophisticated text handling, allowing operations to split, map, filter, and reduce text data.
2269
-
2270
- ```
2271
-
2272
- ```