modelfusion 0.0.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (405) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +429 -0
  3. package/composed-function/index.cjs +22 -0
  4. package/composed-function/index.d.ts +6 -0
  5. package/composed-function/index.js +6 -0
  6. package/composed-function/summarize/SummarizationFunction.cjs +2 -0
  7. package/composed-function/summarize/SummarizationFunction.d.ts +4 -0
  8. package/composed-function/summarize/SummarizationFunction.js +1 -0
  9. package/composed-function/summarize/summarizeRecursively.cjs +19 -0
  10. package/composed-function/summarize/summarizeRecursively.d.ts +11 -0
  11. package/composed-function/summarize/summarizeRecursively.js +15 -0
  12. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +29 -0
  13. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +24 -0
  14. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +25 -0
  15. package/composed-function/use-tool/NoSuchToolError.cjs +17 -0
  16. package/composed-function/use-tool/NoSuchToolError.d.ts +4 -0
  17. package/composed-function/use-tool/NoSuchToolError.js +13 -0
  18. package/composed-function/use-tool/Tool.cjs +43 -0
  19. package/composed-function/use-tool/Tool.d.ts +15 -0
  20. package/composed-function/use-tool/Tool.js +39 -0
  21. package/composed-function/use-tool/useTool.cjs +59 -0
  22. package/composed-function/use-tool/useTool.d.ts +36 -0
  23. package/composed-function/use-tool/useTool.js +54 -0
  24. package/cost/Cost.cjs +38 -0
  25. package/cost/Cost.d.ts +16 -0
  26. package/cost/Cost.js +34 -0
  27. package/cost/CostCalculator.cjs +2 -0
  28. package/cost/CostCalculator.d.ts +8 -0
  29. package/cost/CostCalculator.js +1 -0
  30. package/cost/calculateCost.cjs +28 -0
  31. package/cost/calculateCost.d.ts +7 -0
  32. package/cost/calculateCost.js +24 -0
  33. package/cost/index.cjs +19 -0
  34. package/cost/index.d.ts +3 -0
  35. package/cost/index.js +3 -0
  36. package/index.cjs +25 -0
  37. package/index.d.ts +9 -0
  38. package/index.js +9 -0
  39. package/model-function/AbstractModel.cjs +22 -0
  40. package/model-function/AbstractModel.d.ts +12 -0
  41. package/model-function/AbstractModel.js +18 -0
  42. package/model-function/FunctionOptions.cjs +2 -0
  43. package/model-function/FunctionOptions.d.ts +6 -0
  44. package/model-function/FunctionOptions.js +1 -0
  45. package/model-function/Model.cjs +2 -0
  46. package/model-function/Model.d.ts +23 -0
  47. package/model-function/Model.js +1 -0
  48. package/model-function/ModelCallEvent.cjs +2 -0
  49. package/model-function/ModelCallEvent.d.ts +18 -0
  50. package/model-function/ModelCallEvent.js +1 -0
  51. package/model-function/ModelCallEventSource.cjs +42 -0
  52. package/model-function/ModelCallEventSource.d.ts +13 -0
  53. package/model-function/ModelCallEventSource.js +38 -0
  54. package/model-function/ModelCallObserver.cjs +2 -0
  55. package/model-function/ModelCallObserver.d.ts +5 -0
  56. package/model-function/ModelCallObserver.js +1 -0
  57. package/model-function/ModelInformation.cjs +2 -0
  58. package/model-function/ModelInformation.d.ts +4 -0
  59. package/model-function/ModelInformation.js +1 -0
  60. package/model-function/SuccessfulModelCall.cjs +22 -0
  61. package/model-function/SuccessfulModelCall.d.ts +9 -0
  62. package/model-function/SuccessfulModelCall.js +18 -0
  63. package/model-function/embed-text/TextEmbeddingEvent.cjs +2 -0
  64. package/model-function/embed-text/TextEmbeddingEvent.d.ts +23 -0
  65. package/model-function/embed-text/TextEmbeddingEvent.js +1 -0
  66. package/model-function/embed-text/TextEmbeddingModel.cjs +2 -0
  67. package/model-function/embed-text/TextEmbeddingModel.d.ts +18 -0
  68. package/model-function/embed-text/TextEmbeddingModel.js +1 -0
  69. package/model-function/embed-text/embedText.cjs +90 -0
  70. package/model-function/embed-text/embedText.d.ts +33 -0
  71. package/model-function/embed-text/embedText.js +85 -0
  72. package/model-function/executeCall.cjs +60 -0
  73. package/model-function/executeCall.d.ts +27 -0
  74. package/model-function/executeCall.js +56 -0
  75. package/model-function/generate-image/ImageGenerationEvent.cjs +2 -0
  76. package/model-function/generate-image/ImageGenerationEvent.d.ts +22 -0
  77. package/model-function/generate-image/ImageGenerationEvent.js +1 -0
  78. package/model-function/generate-image/ImageGenerationModel.cjs +2 -0
  79. package/model-function/generate-image/ImageGenerationModel.d.ts +8 -0
  80. package/model-function/generate-image/ImageGenerationModel.js +1 -0
  81. package/model-function/generate-image/generateImage.cjs +63 -0
  82. package/model-function/generate-image/generateImage.d.ts +23 -0
  83. package/model-function/generate-image/generateImage.js +59 -0
  84. package/model-function/generate-json/GenerateJsonModel.cjs +2 -0
  85. package/model-function/generate-json/GenerateJsonModel.d.ts +10 -0
  86. package/model-function/generate-json/GenerateJsonModel.js +1 -0
  87. package/model-function/generate-json/GenerateJsonOrTextModel.cjs +2 -0
  88. package/model-function/generate-json/GenerateJsonOrTextModel.d.ts +18 -0
  89. package/model-function/generate-json/GenerateJsonOrTextModel.js +1 -0
  90. package/model-function/generate-json/JsonGenerationEvent.cjs +2 -0
  91. package/model-function/generate-json/JsonGenerationEvent.d.ts +22 -0
  92. package/model-function/generate-json/JsonGenerationEvent.js +1 -0
  93. package/model-function/generate-json/NoSuchSchemaError.cjs +17 -0
  94. package/model-function/generate-json/NoSuchSchemaError.d.ts +4 -0
  95. package/model-function/generate-json/NoSuchSchemaError.js +13 -0
  96. package/model-function/generate-json/SchemaDefinition.cjs +2 -0
  97. package/model-function/generate-json/SchemaDefinition.d.ts +6 -0
  98. package/model-function/generate-json/SchemaDefinition.js +1 -0
  99. package/model-function/generate-json/SchemaValidationError.cjs +36 -0
  100. package/model-function/generate-json/SchemaValidationError.d.ts +11 -0
  101. package/model-function/generate-json/SchemaValidationError.js +32 -0
  102. package/model-function/generate-json/generateJson.cjs +61 -0
  103. package/model-function/generate-json/generateJson.d.ts +9 -0
  104. package/model-function/generate-json/generateJson.js +57 -0
  105. package/model-function/generate-json/generateJsonOrText.cjs +74 -0
  106. package/model-function/generate-json/generateJsonOrText.d.ts +25 -0
  107. package/model-function/generate-json/generateJsonOrText.js +70 -0
  108. package/model-function/generate-text/AsyncQueue.cjs +66 -0
  109. package/model-function/generate-text/AsyncQueue.d.ts +17 -0
  110. package/model-function/generate-text/AsyncQueue.js +62 -0
  111. package/model-function/generate-text/DeltaEvent.cjs +2 -0
  112. package/model-function/generate-text/DeltaEvent.d.ts +7 -0
  113. package/model-function/generate-text/DeltaEvent.js +1 -0
  114. package/model-function/generate-text/TextDeltaEventSource.cjs +54 -0
  115. package/model-function/generate-text/TextDeltaEventSource.d.ts +5 -0
  116. package/model-function/generate-text/TextDeltaEventSource.js +46 -0
  117. package/model-function/generate-text/TextGenerationEvent.cjs +2 -0
  118. package/model-function/generate-text/TextGenerationEvent.d.ts +22 -0
  119. package/model-function/generate-text/TextGenerationEvent.js +1 -0
  120. package/model-function/generate-text/TextGenerationModel.cjs +2 -0
  121. package/model-function/generate-text/TextGenerationModel.d.ts +42 -0
  122. package/model-function/generate-text/TextGenerationModel.js +1 -0
  123. package/model-function/generate-text/TextStreamingEvent.cjs +2 -0
  124. package/model-function/generate-text/TextStreamingEvent.d.ts +22 -0
  125. package/model-function/generate-text/TextStreamingEvent.js +1 -0
  126. package/model-function/generate-text/extractTextDeltas.cjs +23 -0
  127. package/model-function/generate-text/extractTextDeltas.d.ts +7 -0
  128. package/model-function/generate-text/extractTextDeltas.js +19 -0
  129. package/model-function/generate-text/generateText.cjs +67 -0
  130. package/model-function/generate-text/generateText.d.ts +20 -0
  131. package/model-function/generate-text/generateText.js +63 -0
  132. package/model-function/generate-text/parseEventSourceReadableStream.cjs +30 -0
  133. package/model-function/generate-text/parseEventSourceReadableStream.d.ts +8 -0
  134. package/model-function/generate-text/parseEventSourceReadableStream.js +26 -0
  135. package/model-function/generate-text/streamText.cjs +115 -0
  136. package/model-function/generate-text/streamText.d.ts +11 -0
  137. package/model-function/generate-text/streamText.js +111 -0
  138. package/model-function/index.cjs +47 -0
  139. package/model-function/index.d.ts +31 -0
  140. package/model-function/index.js +31 -0
  141. package/model-function/tokenize-text/Tokenizer.cjs +2 -0
  142. package/model-function/tokenize-text/Tokenizer.d.ts +19 -0
  143. package/model-function/tokenize-text/Tokenizer.js +1 -0
  144. package/model-function/tokenize-text/countTokens.cjs +10 -0
  145. package/model-function/tokenize-text/countTokens.d.ts +5 -0
  146. package/model-function/tokenize-text/countTokens.js +6 -0
  147. package/model-function/transcribe-audio/TranscriptionEvent.cjs +2 -0
  148. package/model-function/transcribe-audio/TranscriptionEvent.d.ts +22 -0
  149. package/model-function/transcribe-audio/TranscriptionEvent.js +1 -0
  150. package/model-function/transcribe-audio/TranscriptionModel.cjs +2 -0
  151. package/model-function/transcribe-audio/TranscriptionModel.d.ts +8 -0
  152. package/model-function/transcribe-audio/TranscriptionModel.js +1 -0
  153. package/model-function/transcribe-audio/transcribe.cjs +62 -0
  154. package/model-function/transcribe-audio/transcribe.d.ts +22 -0
  155. package/model-function/transcribe-audio/transcribe.js +58 -0
  156. package/model-provider/automatic1111/Automatic1111Error.cjs +39 -0
  157. package/model-provider/automatic1111/Automatic1111Error.d.ts +31 -0
  158. package/model-provider/automatic1111/Automatic1111Error.js +31 -0
  159. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +76 -0
  160. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +54 -0
  161. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +72 -0
  162. package/model-provider/automatic1111/index.cjs +20 -0
  163. package/model-provider/automatic1111/index.d.ts +2 -0
  164. package/model-provider/automatic1111/index.js +2 -0
  165. package/model-provider/cohere/CohereError.cjs +36 -0
  166. package/model-provider/cohere/CohereError.d.ts +22 -0
  167. package/model-provider/cohere/CohereError.js +28 -0
  168. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +172 -0
  169. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +119 -0
  170. package/model-provider/cohere/CohereTextEmbeddingModel.js +165 -0
  171. package/model-provider/cohere/CohereTextGenerationModel.cjs +283 -0
  172. package/model-provider/cohere/CohereTextGenerationModel.d.ts +203 -0
  173. package/model-provider/cohere/CohereTextGenerationModel.js +276 -0
  174. package/model-provider/cohere/CohereTokenizer.cjs +136 -0
  175. package/model-provider/cohere/CohereTokenizer.d.ts +118 -0
  176. package/model-provider/cohere/CohereTokenizer.js +129 -0
  177. package/model-provider/cohere/index.cjs +22 -0
  178. package/model-provider/cohere/index.d.ts +4 -0
  179. package/model-provider/cohere/index.js +4 -0
  180. package/model-provider/huggingface/HuggingFaceError.cjs +52 -0
  181. package/model-provider/huggingface/HuggingFaceError.d.ts +22 -0
  182. package/model-provider/huggingface/HuggingFaceError.js +44 -0
  183. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +174 -0
  184. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +75 -0
  185. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +167 -0
  186. package/model-provider/huggingface/index.cjs +20 -0
  187. package/model-provider/huggingface/index.d.ts +2 -0
  188. package/model-provider/huggingface/index.js +2 -0
  189. package/model-provider/index.cjs +22 -0
  190. package/model-provider/index.d.ts +6 -0
  191. package/model-provider/index.js +6 -0
  192. package/model-provider/llamacpp/LlamaCppError.cjs +52 -0
  193. package/model-provider/llamacpp/LlamaCppError.d.ts +22 -0
  194. package/model-provider/llamacpp/LlamaCppError.js +44 -0
  195. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +96 -0
  196. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +40 -0
  197. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +89 -0
  198. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +245 -0
  199. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +399 -0
  200. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +238 -0
  201. package/model-provider/llamacpp/LlamaCppTokenizer.cjs +64 -0
  202. package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +38 -0
  203. package/model-provider/llamacpp/LlamaCppTokenizer.js +57 -0
  204. package/model-provider/llamacpp/index.cjs +22 -0
  205. package/model-provider/llamacpp/index.d.ts +4 -0
  206. package/model-provider/llamacpp/index.js +4 -0
  207. package/model-provider/openai/OpenAICostCalculator.cjs +71 -0
  208. package/model-provider/openai/OpenAICostCalculator.d.ts +6 -0
  209. package/model-provider/openai/OpenAICostCalculator.js +67 -0
  210. package/model-provider/openai/OpenAIError.cjs +50 -0
  211. package/model-provider/openai/OpenAIError.d.ts +47 -0
  212. package/model-provider/openai/OpenAIError.js +42 -0
  213. package/model-provider/openai/OpenAIImageGenerationModel.cjs +124 -0
  214. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +113 -0
  215. package/model-provider/openai/OpenAIImageGenerationModel.js +119 -0
  216. package/model-provider/openai/OpenAIModelSettings.cjs +2 -0
  217. package/model-provider/openai/OpenAIModelSettings.d.ts +8 -0
  218. package/model-provider/openai/OpenAIModelSettings.js +1 -0
  219. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +171 -0
  220. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +122 -0
  221. package/model-provider/openai/OpenAITextEmbeddingModel.js +162 -0
  222. package/model-provider/openai/OpenAITextGenerationModel.cjs +326 -0
  223. package/model-provider/openai/OpenAITextGenerationModel.d.ts +254 -0
  224. package/model-provider/openai/OpenAITextGenerationModel.js +317 -0
  225. package/model-provider/openai/OpenAITranscriptionModel.cjs +195 -0
  226. package/model-provider/openai/OpenAITranscriptionModel.d.ts +196 -0
  227. package/model-provider/openai/OpenAITranscriptionModel.js +187 -0
  228. package/model-provider/openai/TikTokenTokenizer.cjs +86 -0
  229. package/model-provider/openai/TikTokenTokenizer.d.ts +35 -0
  230. package/model-provider/openai/TikTokenTokenizer.js +82 -0
  231. package/model-provider/openai/chat/OpenAIChatMessage.cjs +24 -0
  232. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +26 -0
  233. package/model-provider/openai/chat/OpenAIChatMessage.js +21 -0
  234. package/model-provider/openai/chat/OpenAIChatModel.cjs +288 -0
  235. package/model-provider/openai/chat/OpenAIChatModel.d.ts +344 -0
  236. package/model-provider/openai/chat/OpenAIChatModel.js +279 -0
  237. package/model-provider/openai/chat/OpenAIChatPrompt.cjs +143 -0
  238. package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +108 -0
  239. package/model-provider/openai/chat/OpenAIChatPrompt.js +135 -0
  240. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +112 -0
  241. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +19 -0
  242. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +105 -0
  243. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +28 -0
  244. package/model-provider/openai/chat/countOpenAIChatMessageTokens.d.ts +20 -0
  245. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +23 -0
  246. package/model-provider/openai/index.cjs +31 -0
  247. package/model-provider/openai/index.d.ts +13 -0
  248. package/model-provider/openai/index.js +12 -0
  249. package/model-provider/stability/StabilityError.cjs +36 -0
  250. package/model-provider/stability/StabilityError.d.ts +22 -0
  251. package/model-provider/stability/StabilityError.js +28 -0
  252. package/model-provider/stability/StabilityImageGenerationModel.cjs +133 -0
  253. package/model-provider/stability/StabilityImageGenerationModel.d.ts +95 -0
  254. package/model-provider/stability/StabilityImageGenerationModel.js +129 -0
  255. package/model-provider/stability/index.cjs +20 -0
  256. package/model-provider/stability/index.d.ts +2 -0
  257. package/model-provider/stability/index.js +2 -0
  258. package/package.json +87 -0
  259. package/prompt/InstructionPrompt.cjs +2 -0
  260. package/prompt/InstructionPrompt.d.ts +7 -0
  261. package/prompt/InstructionPrompt.js +1 -0
  262. package/prompt/Llama2PromptMapping.cjs +56 -0
  263. package/prompt/Llama2PromptMapping.d.ts +10 -0
  264. package/prompt/Llama2PromptMapping.js +51 -0
  265. package/prompt/OpenAIChatPromptMapping.cjs +62 -0
  266. package/prompt/OpenAIChatPromptMapping.d.ts +6 -0
  267. package/prompt/OpenAIChatPromptMapping.js +57 -0
  268. package/prompt/PromptMapping.cjs +2 -0
  269. package/prompt/PromptMapping.d.ts +7 -0
  270. package/prompt/PromptMapping.js +1 -0
  271. package/prompt/PromptMappingTextGenerationModel.cjs +88 -0
  272. package/prompt/PromptMappingTextGenerationModel.d.ts +26 -0
  273. package/prompt/PromptMappingTextGenerationModel.js +84 -0
  274. package/prompt/TextPromptMapping.cjs +50 -0
  275. package/prompt/TextPromptMapping.d.ts +14 -0
  276. package/prompt/TextPromptMapping.js +45 -0
  277. package/prompt/chat/ChatPrompt.cjs +2 -0
  278. package/prompt/chat/ChatPrompt.d.ts +33 -0
  279. package/prompt/chat/ChatPrompt.js +1 -0
  280. package/prompt/chat/trimChatPrompt.cjs +50 -0
  281. package/prompt/chat/trimChatPrompt.d.ts +19 -0
  282. package/prompt/chat/trimChatPrompt.js +46 -0
  283. package/prompt/chat/validateChatPrompt.cjs +36 -0
  284. package/prompt/chat/validateChatPrompt.d.ts +8 -0
  285. package/prompt/chat/validateChatPrompt.js +31 -0
  286. package/prompt/index.cjs +25 -0
  287. package/prompt/index.d.ts +9 -0
  288. package/prompt/index.js +9 -0
  289. package/run/ConsoleLogger.cjs +12 -0
  290. package/run/ConsoleLogger.d.ts +6 -0
  291. package/run/ConsoleLogger.js +8 -0
  292. package/run/DefaultRun.cjs +78 -0
  293. package/run/DefaultRun.d.ts +24 -0
  294. package/run/DefaultRun.js +74 -0
  295. package/run/IdMetadata.cjs +2 -0
  296. package/run/IdMetadata.d.ts +7 -0
  297. package/run/IdMetadata.js +1 -0
  298. package/run/Run.cjs +2 -0
  299. package/run/Run.d.ts +27 -0
  300. package/run/Run.js +1 -0
  301. package/run/RunFunction.cjs +2 -0
  302. package/run/RunFunction.d.ts +13 -0
  303. package/run/RunFunction.js +1 -0
  304. package/run/Vector.cjs +2 -0
  305. package/run/Vector.d.ts +5 -0
  306. package/run/Vector.js +1 -0
  307. package/run/index.cjs +22 -0
  308. package/run/index.d.ts +6 -0
  309. package/run/index.js +6 -0
  310. package/text-chunk/TextChunk.cjs +2 -0
  311. package/text-chunk/TextChunk.d.ts +3 -0
  312. package/text-chunk/TextChunk.js +1 -0
  313. package/text-chunk/index.cjs +22 -0
  314. package/text-chunk/index.d.ts +6 -0
  315. package/text-chunk/index.js +6 -0
  316. package/text-chunk/retrieve-text-chunks/TextChunkRetriever.cjs +2 -0
  317. package/text-chunk/retrieve-text-chunks/TextChunkRetriever.d.ts +8 -0
  318. package/text-chunk/retrieve-text-chunks/TextChunkRetriever.js +1 -0
  319. package/text-chunk/retrieve-text-chunks/retrieveTextChunks.cjs +10 -0
  320. package/text-chunk/retrieve-text-chunks/retrieveTextChunks.d.ts +6 -0
  321. package/text-chunk/retrieve-text-chunks/retrieveTextChunks.js +6 -0
  322. package/text-chunk/split/SplitFunction.cjs +2 -0
  323. package/text-chunk/split/SplitFunction.d.ts +4 -0
  324. package/text-chunk/split/SplitFunction.js +1 -0
  325. package/text-chunk/split/splitOnSeparator.cjs +12 -0
  326. package/text-chunk/split/splitOnSeparator.d.ts +8 -0
  327. package/text-chunk/split/splitOnSeparator.js +7 -0
  328. package/text-chunk/split/splitRecursively.cjs +41 -0
  329. package/text-chunk/split/splitRecursively.d.ts +22 -0
  330. package/text-chunk/split/splitRecursively.js +33 -0
  331. package/util/DurationMeasurement.cjs +42 -0
  332. package/util/DurationMeasurement.d.ts +5 -0
  333. package/util/DurationMeasurement.js +38 -0
  334. package/util/ErrorHandler.cjs +2 -0
  335. package/util/ErrorHandler.d.ts +1 -0
  336. package/util/ErrorHandler.js +1 -0
  337. package/util/SafeResult.cjs +2 -0
  338. package/util/SafeResult.d.ts +8 -0
  339. package/util/SafeResult.js +1 -0
  340. package/util/api/AbortError.cjs +9 -0
  341. package/util/api/AbortError.d.ts +3 -0
  342. package/util/api/AbortError.js +5 -0
  343. package/util/api/ApiCallError.cjs +45 -0
  344. package/util/api/ApiCallError.d.ts +15 -0
  345. package/util/api/ApiCallError.js +41 -0
  346. package/util/api/RetryError.cjs +24 -0
  347. package/util/api/RetryError.d.ts +10 -0
  348. package/util/api/RetryError.js +20 -0
  349. package/util/api/RetryFunction.cjs +2 -0
  350. package/util/api/RetryFunction.d.ts +1 -0
  351. package/util/api/RetryFunction.js +1 -0
  352. package/util/api/ThrottleFunction.cjs +2 -0
  353. package/util/api/ThrottleFunction.d.ts +1 -0
  354. package/util/api/ThrottleFunction.js +1 -0
  355. package/util/api/callWithRetryAndThrottle.cjs +7 -0
  356. package/util/api/callWithRetryAndThrottle.d.ts +7 -0
  357. package/util/api/callWithRetryAndThrottle.js +3 -0
  358. package/util/api/postToApi.cjs +103 -0
  359. package/util/api/postToApi.d.ts +29 -0
  360. package/util/api/postToApi.js +96 -0
  361. package/util/api/retryNever.cjs +8 -0
  362. package/util/api/retryNever.d.ts +4 -0
  363. package/util/api/retryNever.js +4 -0
  364. package/util/api/retryWithExponentialBackoff.cjs +48 -0
  365. package/util/api/retryWithExponentialBackoff.d.ts +10 -0
  366. package/util/api/retryWithExponentialBackoff.js +44 -0
  367. package/util/api/throttleMaxConcurrency.cjs +65 -0
  368. package/util/api/throttleMaxConcurrency.d.ts +7 -0
  369. package/util/api/throttleMaxConcurrency.js +61 -0
  370. package/util/api/throttleUnlimitedConcurrency.cjs +8 -0
  371. package/util/api/throttleUnlimitedConcurrency.d.ts +5 -0
  372. package/util/api/throttleUnlimitedConcurrency.js +4 -0
  373. package/util/cosineSimilarity.cjs +26 -0
  374. package/util/cosineSimilarity.d.ts +11 -0
  375. package/util/cosineSimilarity.js +22 -0
  376. package/util/index.cjs +26 -0
  377. package/util/index.d.ts +10 -0
  378. package/util/index.js +10 -0
  379. package/util/never.cjs +6 -0
  380. package/util/never.d.ts +1 -0
  381. package/util/never.js +2 -0
  382. package/util/runSafe.cjs +15 -0
  383. package/util/runSafe.d.ts +2 -0
  384. package/util/runSafe.js +11 -0
  385. package/vector-index/VectorIndex.cjs +2 -0
  386. package/vector-index/VectorIndex.d.ts +18 -0
  387. package/vector-index/VectorIndex.js +1 -0
  388. package/vector-index/VectorIndexSimilarTextChunkRetriever.cjs +57 -0
  389. package/vector-index/VectorIndexSimilarTextChunkRetriever.d.ts +20 -0
  390. package/vector-index/VectorIndexSimilarTextChunkRetriever.js +53 -0
  391. package/vector-index/VectorIndexTextChunkStore.cjs +77 -0
  392. package/vector-index/VectorIndexTextChunkStore.d.ts +35 -0
  393. package/vector-index/VectorIndexTextChunkStore.js +73 -0
  394. package/vector-index/index.cjs +22 -0
  395. package/vector-index/index.d.ts +6 -0
  396. package/vector-index/index.js +6 -0
  397. package/vector-index/memory/MemoryVectorIndex.cjs +63 -0
  398. package/vector-index/memory/MemoryVectorIndex.d.ts +31 -0
  399. package/vector-index/memory/MemoryVectorIndex.js +56 -0
  400. package/vector-index/pinecone/PineconeVectorIndex.cjs +66 -0
  401. package/vector-index/pinecone/PineconeVectorIndex.d.ts +29 -0
  402. package/vector-index/pinecone/PineconeVectorIndex.js +62 -0
  403. package/vector-index/upsertTextChunks.cjs +15 -0
  404. package/vector-index/upsertTextChunks.d.ts +11 -0
  405. package/vector-index/upsertTextChunks.js +11 -0
@@ -0,0 +1,288 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.OPENAI_CHAT_MODELS = void 0;
7
+ const zod_1 = __importDefault(require("zod"));
8
+ const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
9
+ const PromptMappingTextGenerationModel_js_1 = require("../../../prompt/PromptMappingTextGenerationModel.cjs");
10
+ const callWithRetryAndThrottle_js_1 = require("../../../util/api/callWithRetryAndThrottle.cjs");
11
+ const postToApi_js_1 = require("../../../util/api/postToApi.cjs");
12
+ const OpenAIError_js_1 = require("../OpenAIError.cjs");
13
+ const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
14
+ const OpenAIChatStreamIterable_js_1 = require("./OpenAIChatStreamIterable.cjs");
15
+ const countOpenAIChatMessageTokens_js_1 = require("./countOpenAIChatMessageTokens.cjs");
16
+ /*
17
+ * Available OpenAI chat models, their token limits, and pricing.
18
+ *
19
+ * @see https://platform.openai.com/docs/models/
20
+ * @see https://openai.com/pricing
21
+ */
22
+ exports.OPENAI_CHAT_MODELS = {
23
+ "gpt-4": {
24
+ contextWindowSize: 8192,
25
+ promptTokenCostInMillicents: 3,
26
+ completionTokenCostInMillicents: 6,
27
+ },
28
+ "gpt-4-0314": {
29
+ contextWindowSize: 8192,
30
+ promptTokenCostInMillicents: 3,
31
+ completionTokenCostInMillicents: 6,
32
+ },
33
+ "gpt-4-0613": {
34
+ contextWindowSize: 8192,
35
+ promptTokenCostInMillicents: 3,
36
+ completionTokenCostInMillicents: 6,
37
+ },
38
+ "gpt-4-32k": {
39
+ contextWindowSize: 32768,
40
+ promptTokenCostInMillicents: 6,
41
+ completionTokenCostInMillicents: 12,
42
+ },
43
+ "gpt-4-32k-0314": {
44
+ contextWindowSize: 32768,
45
+ promptTokenCostInMillicents: 6,
46
+ completionTokenCostInMillicents: 12,
47
+ },
48
+ "gpt-4-32k-0613": {
49
+ contextWindowSize: 32768,
50
+ promptTokenCostInMillicents: 6,
51
+ completionTokenCostInMillicents: 12,
52
+ },
53
+ "gpt-3.5-turbo": {
54
+ contextWindowSize: 4096,
55
+ promptTokenCostInMillicents: 0.15,
56
+ completionTokenCostInMillicents: 0.2,
57
+ },
58
+ "gpt-3.5-turbo-0301": {
59
+ contextWindowSize: 4096,
60
+ promptTokenCostInMillicents: 0.15,
61
+ completionTokenCostInMillicents: 0.2,
62
+ },
63
+ "gpt-3.5-turbo-0613": {
64
+ contextWindowSize: 4096,
65
+ promptTokenCostInMillicents: 0.15,
66
+ completionTokenCostInMillicents: 0.2,
67
+ },
68
+ "gpt-3.5-turbo-16k": {
69
+ contextWindowSize: 16384,
70
+ promptTokenCostInMillicents: 0.3,
71
+ completionTokenCostInMillicents: 0.4,
72
+ },
73
+ "gpt-3.5-turbo-16k-0613": {
74
+ contextWindowSize: 16384,
75
+ promptTokenCostInMillicents: 0.3,
76
+ completionTokenCostInMillicents: 0.4,
77
+ },
78
+ };
79
+ const isOpenAIChatModel = (model) => model in exports.OPENAI_CHAT_MODELS;
80
+ exports.isOpenAIChatModel = isOpenAIChatModel;
81
+ const calculateOpenAIChatCostInMillicents = ({ model, response, }) => response.usage.prompt_tokens *
82
+ exports.OPENAI_CHAT_MODELS[model].promptTokenCostInMillicents +
83
+ response.usage.completion_tokens *
84
+ exports.OPENAI_CHAT_MODELS[model].completionTokenCostInMillicents;
85
+ exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicents;
86
+ /**
87
+ * Create a text generation model that calls the OpenAI chat completion API.
88
+ *
89
+ * @see https://platform.openai.com/docs/api-reference/chat/create
90
+ *
91
+ * @example
92
+ * const model = new OpenAIChatModel({
93
+ * model: "gpt-3.5-turbo",
94
+ * temperature: 0.7,
95
+ * maxTokens: 500,
96
+ * });
97
+ *
98
+ * const { text } = await generateText([
99
+ * model,
100
+ * OpenAIChatMessage.system(
101
+ * "Write a short story about a robot learning to love:"
102
+ * ),
103
+ * ]);
104
+ */
105
+ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
106
+ constructor(settings) {
107
+ super({ settings });
108
+ Object.defineProperty(this, "provider", {
109
+ enumerable: true,
110
+ configurable: true,
111
+ writable: true,
112
+ value: "openai"
113
+ });
114
+ Object.defineProperty(this, "contextWindowSize", {
115
+ enumerable: true,
116
+ configurable: true,
117
+ writable: true,
118
+ value: void 0
119
+ });
120
+ Object.defineProperty(this, "tokenizer", {
121
+ enumerable: true,
122
+ configurable: true,
123
+ writable: true,
124
+ value: void 0
125
+ });
126
+ this.tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({ model: this.settings.model });
127
+ this.contextWindowSize =
128
+ exports.OPENAI_CHAT_MODELS[this.settings.model].contextWindowSize;
129
+ }
130
+ get modelName() {
131
+ return this.settings.model;
132
+ }
133
+ get apiKey() {
134
+ const apiKey = this.settings.apiKey ?? process.env.OPENAI_API_KEY;
135
+ if (apiKey == null) {
136
+ throw new Error(`OpenAI API key is missing. Pass it as an argument to the constructor or set it as an environment variable named OPENAI_API_KEY.`);
137
+ }
138
+ return apiKey;
139
+ }
140
+ /**
141
+ * Counts the prompt tokens required for the messages. This includes the message base tokens
142
+ * and the prompt base tokens.
143
+ */
144
+ countPromptTokens(messages) {
145
+ return (0, countOpenAIChatMessageTokens_js_1.countOpenAIChatPromptTokens)({
146
+ messages,
147
+ model: this.modelName,
148
+ });
149
+ }
150
+ async callAPI(messages, options) {
151
+ const { run, settings, responseFormat } = options;
152
+ const callSettings = Object.assign({
153
+ apiKey: this.apiKey,
154
+ user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
155
+ }, this.settings, settings, {
156
+ abortSignal: run?.abortSignal,
157
+ messages,
158
+ responseFormat,
159
+ });
160
+ return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
161
+ retry: callSettings.retry,
162
+ throttle: callSettings.throttle,
163
+ call: async () => callOpenAIChatCompletionAPI(callSettings),
164
+ });
165
+ }
166
+ generateTextResponse(prompt, options) {
167
+ return this.callAPI(prompt, {
168
+ ...options,
169
+ responseFormat: exports.OpenAIChatResponseFormat.json,
170
+ });
171
+ }
172
+ extractText(response) {
173
+ return response.choices[0].message.content;
174
+ }
175
+ generateDeltaStreamResponse(prompt, options) {
176
+ return this.callAPI(prompt, {
177
+ ...options,
178
+ responseFormat: exports.OpenAIChatResponseFormat.deltaIterable,
179
+ });
180
+ }
181
+ extractTextDelta(fullDelta) {
182
+ return fullDelta[0]?.delta.content ?? undefined;
183
+ }
184
+ /**
185
+ * JSON generation uses the OpenAI GPT function calling API.
186
+ * It provides a single function specification and instructs the model to provide parameters for calling the function.
187
+ * The result is returned as parsed JSON.
188
+ *
189
+ * @see https://platform.openai.com/docs/guides/gpt/function-calling
190
+ */
191
+ generateJsonResponse(prompt, options) {
192
+ const settingsWithFunctionCall = Object.assign({}, options, {
193
+ functionCall: prompt.functionCall,
194
+ functions: prompt.functions,
195
+ });
196
+ return this.callAPI(prompt.messages, {
197
+ responseFormat: exports.OpenAIChatResponseFormat.json,
198
+ functionId: options?.functionId,
199
+ settings: settingsWithFunctionCall,
200
+ run: options?.run,
201
+ });
202
+ }
203
+ mapPrompt(promptMapping) {
204
+ return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
205
+ model: this.withStopTokens(promptMapping.stopTokens),
206
+ promptMapping,
207
+ });
208
+ }
209
+ withSettings(additionalSettings) {
210
+ return new OpenAIChatModel(Object.assign({}, this.settings, additionalSettings));
211
+ }
212
+ get maxCompletionTokens() {
213
+ return this.settings.maxTokens;
214
+ }
215
+ withMaxCompletionTokens(maxCompletionTokens) {
216
+ return this.withSettings({ maxTokens: maxCompletionTokens });
217
+ }
218
+ withStopTokens(stopTokens) {
219
+ return this.withSettings({ stop: stopTokens });
220
+ }
221
+ }
222
+ exports.OpenAIChatModel = OpenAIChatModel;
223
+ const openAIChatResponseSchema = zod_1.default.object({
224
+ id: zod_1.default.string(),
225
+ object: zod_1.default.literal("chat.completion"),
226
+ created: zod_1.default.number(),
227
+ model: zod_1.default.string(),
228
+ choices: zod_1.default.array(zod_1.default.object({
229
+ message: zod_1.default.object({
230
+ role: zod_1.default.literal("assistant"),
231
+ content: zod_1.default.string().nullable(),
232
+ function_call: zod_1.default
233
+ .object({
234
+ name: zod_1.default.string(),
235
+ arguments: zod_1.default.string(),
236
+ })
237
+ .optional(),
238
+ }),
239
+ index: zod_1.default.number(),
240
+ logprobs: zod_1.default.nullable(zod_1.default.any()),
241
+ finish_reason: zod_1.default.string(),
242
+ })),
243
+ usage: zod_1.default.object({
244
+ prompt_tokens: zod_1.default.number(),
245
+ completion_tokens: zod_1.default.number(),
246
+ total_tokens: zod_1.default.number(),
247
+ }),
248
+ });
249
+ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
250
+ return (0, postToApi_js_1.postJsonToApi)({
251
+ url: `${baseUrl}/chat/completions`,
252
+ apiKey,
253
+ body: {
254
+ stream: responseFormat.stream,
255
+ model,
256
+ messages,
257
+ functions,
258
+ function_call: functionCall,
259
+ temperature,
260
+ top_p: topP,
261
+ n,
262
+ stop,
263
+ max_tokens: maxTokens,
264
+ presence_penalty: presencePenalty,
265
+ frequency_penalty: frequencyPenalty,
266
+ user,
267
+ },
268
+ failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
269
+ successfulResponseHandler: responseFormat.handler,
270
+ abortSignal,
271
+ });
272
+ }
273
+ exports.OpenAIChatResponseFormat = {
274
+ /**
275
+ * Returns the response as a JSON object.
276
+ */
277
+ json: {
278
+ stream: false,
279
+ handler: (0, postToApi_js_1.createJsonResponseHandler)(openAIChatResponseSchema),
280
+ },
281
+ /**
282
+ * Returns an async iterable over the text deltas (only the tex different of the first choice).
283
+ */
284
+ deltaIterable: {
285
+ stream: true,
286
+ handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatFullDeltaIterableQueue)(response.body),
287
+ },
288
+ };
@@ -0,0 +1,344 @@
1
+ import z from "zod";
2
+ import { AbstractModel } from "../../../model-function/AbstractModel.js";
3
+ import { FunctionOptions } from "../../../model-function/FunctionOptions.js";
4
+ import { GenerateJsonOrTextModel } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
5
+ import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
6
+ import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
7
+ import { PromptMapping } from "../../../prompt/PromptMapping.js";
8
+ import { PromptMappingTextGenerationModel } from "../../../prompt/PromptMappingTextGenerationModel.js";
9
+ import { ResponseHandler } from "../../../util/api/postToApi.js";
10
+ import { OpenAIModelSettings } from "../OpenAIModelSettings.js";
11
+ import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
12
+ import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
13
+ import { OpenAIChatAutoFunctionPrompt, OpenAIChatSingleFunctionPrompt, OpenAIFunctionDescription } from "./OpenAIChatPrompt.js";
14
+ import { OpenAIChatDelta } from "./OpenAIChatStreamIterable.js";
15
+ export declare const OPENAI_CHAT_MODELS: {
16
+ "gpt-4": {
17
+ contextWindowSize: number;
18
+ promptTokenCostInMillicents: number;
19
+ completionTokenCostInMillicents: number;
20
+ };
21
+ "gpt-4-0314": {
22
+ contextWindowSize: number;
23
+ promptTokenCostInMillicents: number;
24
+ completionTokenCostInMillicents: number;
25
+ };
26
+ "gpt-4-0613": {
27
+ contextWindowSize: number;
28
+ promptTokenCostInMillicents: number;
29
+ completionTokenCostInMillicents: number;
30
+ };
31
+ "gpt-4-32k": {
32
+ contextWindowSize: number;
33
+ promptTokenCostInMillicents: number;
34
+ completionTokenCostInMillicents: number;
35
+ };
36
+ "gpt-4-32k-0314": {
37
+ contextWindowSize: number;
38
+ promptTokenCostInMillicents: number;
39
+ completionTokenCostInMillicents: number;
40
+ };
41
+ "gpt-4-32k-0613": {
42
+ contextWindowSize: number;
43
+ promptTokenCostInMillicents: number;
44
+ completionTokenCostInMillicents: number;
45
+ };
46
+ "gpt-3.5-turbo": {
47
+ contextWindowSize: number;
48
+ promptTokenCostInMillicents: number;
49
+ completionTokenCostInMillicents: number;
50
+ };
51
+ "gpt-3.5-turbo-0301": {
52
+ contextWindowSize: number;
53
+ promptTokenCostInMillicents: number;
54
+ completionTokenCostInMillicents: number;
55
+ };
56
+ "gpt-3.5-turbo-0613": {
57
+ contextWindowSize: number;
58
+ promptTokenCostInMillicents: number;
59
+ completionTokenCostInMillicents: number;
60
+ };
61
+ "gpt-3.5-turbo-16k": {
62
+ contextWindowSize: number;
63
+ promptTokenCostInMillicents: number;
64
+ completionTokenCostInMillicents: number;
65
+ };
66
+ "gpt-3.5-turbo-16k-0613": {
67
+ contextWindowSize: number;
68
+ promptTokenCostInMillicents: number;
69
+ completionTokenCostInMillicents: number;
70
+ };
71
+ };
72
+ export type OpenAIChatModelType = keyof typeof OPENAI_CHAT_MODELS;
73
+ export declare const isOpenAIChatModel: (model: string) => model is "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613";
74
+ export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }: {
75
+ model: OpenAIChatModelType;
76
+ response: OpenAIChatResponse;
77
+ }) => number;
78
+ export interface OpenAIChatCallSettings {
79
+ model: OpenAIChatModelType;
80
+ functions?: Array<{
81
+ name: string;
82
+ description?: string;
83
+ parameters: unknown;
84
+ }>;
85
+ functionCall?: "none" | "auto" | {
86
+ name: string;
87
+ };
88
+ temperature?: number;
89
+ topP?: number;
90
+ n?: number;
91
+ stop?: string | string[];
92
+ maxTokens?: number;
93
+ presencePenalty?: number;
94
+ frequencyPenalty?: number;
95
+ }
96
+ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIModelSettings, OpenAIChatCallSettings {
97
+ isUserIdForwardingEnabled?: boolean;
98
+ }
99
+ /**
100
+ * Create a text generation model that calls the OpenAI chat completion API.
101
+ *
102
+ * @see https://platform.openai.com/docs/api-reference/chat/create
103
+ *
104
+ * @example
105
+ * const model = new OpenAIChatModel({
106
+ * model: "gpt-3.5-turbo",
107
+ * temperature: 0.7,
108
+ * maxTokens: 500,
109
+ * });
110
+ *
111
+ * const { text } = await generateText([
112
+ * model,
113
+ * OpenAIChatMessage.system(
114
+ * "Write a short story about a robot learning to love:"
115
+ * ),
116
+ * ]);
117
+ */
118
+ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings>, GenerateJsonOrTextModel<OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, OpenAIChatResponse, OpenAIChatSettings> {
119
+ constructor(settings: OpenAIChatSettings);
120
+ readonly provider: "openai";
121
+ get modelName(): "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613";
122
+ readonly contextWindowSize: number;
123
+ readonly tokenizer: TikTokenTokenizer;
124
+ private get apiKey();
125
+ /**
126
+ * Counts the prompt tokens required for the messages. This includes the message base tokens
127
+ * and the prompt base tokens.
128
+ */
129
+ countPromptTokens(messages: OpenAIChatMessage[]): Promise<number>;
130
+ callAPI<RESULT>(messages: Array<OpenAIChatMessage>, options: {
131
+ responseFormat: OpenAIChatResponseFormatType<RESULT>;
132
+ } & FunctionOptions<Partial<OpenAIChatCallSettings & OpenAIModelSettings & {
133
+ user?: string;
134
+ }>>): Promise<RESULT>;
135
+ generateTextResponse(prompt: OpenAIChatMessage[], options?: FunctionOptions<OpenAIChatSettings>): Promise<{
136
+ object: "chat.completion";
137
+ model: string;
138
+ id: string;
139
+ created: number;
140
+ usage: {
141
+ prompt_tokens: number;
142
+ total_tokens: number;
143
+ completion_tokens: number;
144
+ };
145
+ choices: {
146
+ message: {
147
+ content: string | null;
148
+ role: "assistant";
149
+ function_call?: {
150
+ name: string;
151
+ arguments: string;
152
+ } | undefined;
153
+ };
154
+ finish_reason: string;
155
+ index: number;
156
+ logprobs?: any;
157
+ }[];
158
+ }>;
159
+ extractText(response: OpenAIChatResponse): string;
160
+ generateDeltaStreamResponse(prompt: OpenAIChatMessage[], options?: FunctionOptions<OpenAIChatSettings>): Promise<AsyncIterable<DeltaEvent<OpenAIChatDelta>>>;
161
+ extractTextDelta(fullDelta: OpenAIChatDelta): string | undefined;
162
+ /**
163
+ * JSON generation uses the OpenAI GPT function calling API.
164
+ * It provides a single function specification and instructs the model to provide parameters for calling the function.
165
+ * The result is returned as parsed JSON.
166
+ *
167
+ * @see https://platform.openai.com/docs/guides/gpt/function-calling
168
+ */
169
+ generateJsonResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: FunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
170
+ mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, OpenAIChatMessage[]>): PromptMappingTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
171
+ withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
172
+ get maxCompletionTokens(): number | undefined;
173
+ withMaxCompletionTokens(maxCompletionTokens: number): this;
174
+ withStopTokens(stopTokens: string[]): this;
175
+ }
176
+ declare const openAIChatResponseSchema: z.ZodObject<{
177
+ id: z.ZodString;
178
+ object: z.ZodLiteral<"chat.completion">;
179
+ created: z.ZodNumber;
180
+ model: z.ZodString;
181
+ choices: z.ZodArray<z.ZodObject<{
182
+ message: z.ZodObject<{
183
+ role: z.ZodLiteral<"assistant">;
184
+ content: z.ZodNullable<z.ZodString>;
185
+ function_call: z.ZodOptional<z.ZodObject<{
186
+ name: z.ZodString;
187
+ arguments: z.ZodString;
188
+ }, "strip", z.ZodTypeAny, {
189
+ name: string;
190
+ arguments: string;
191
+ }, {
192
+ name: string;
193
+ arguments: string;
194
+ }>>;
195
+ }, "strip", z.ZodTypeAny, {
196
+ content: string | null;
197
+ role: "assistant";
198
+ function_call?: {
199
+ name: string;
200
+ arguments: string;
201
+ } | undefined;
202
+ }, {
203
+ content: string | null;
204
+ role: "assistant";
205
+ function_call?: {
206
+ name: string;
207
+ arguments: string;
208
+ } | undefined;
209
+ }>;
210
+ index: z.ZodNumber;
211
+ logprobs: z.ZodNullable<z.ZodAny>;
212
+ finish_reason: z.ZodString;
213
+ }, "strip", z.ZodTypeAny, {
214
+ message: {
215
+ content: string | null;
216
+ role: "assistant";
217
+ function_call?: {
218
+ name: string;
219
+ arguments: string;
220
+ } | undefined;
221
+ };
222
+ finish_reason: string;
223
+ index: number;
224
+ logprobs?: any;
225
+ }, {
226
+ message: {
227
+ content: string | null;
228
+ role: "assistant";
229
+ function_call?: {
230
+ name: string;
231
+ arguments: string;
232
+ } | undefined;
233
+ };
234
+ finish_reason: string;
235
+ index: number;
236
+ logprobs?: any;
237
+ }>, "many">;
238
+ usage: z.ZodObject<{
239
+ prompt_tokens: z.ZodNumber;
240
+ completion_tokens: z.ZodNumber;
241
+ total_tokens: z.ZodNumber;
242
+ }, "strip", z.ZodTypeAny, {
243
+ prompt_tokens: number;
244
+ total_tokens: number;
245
+ completion_tokens: number;
246
+ }, {
247
+ prompt_tokens: number;
248
+ total_tokens: number;
249
+ completion_tokens: number;
250
+ }>;
251
+ }, "strip", z.ZodTypeAny, {
252
+ object: "chat.completion";
253
+ model: string;
254
+ id: string;
255
+ created: number;
256
+ usage: {
257
+ prompt_tokens: number;
258
+ total_tokens: number;
259
+ completion_tokens: number;
260
+ };
261
+ choices: {
262
+ message: {
263
+ content: string | null;
264
+ role: "assistant";
265
+ function_call?: {
266
+ name: string;
267
+ arguments: string;
268
+ } | undefined;
269
+ };
270
+ finish_reason: string;
271
+ index: number;
272
+ logprobs?: any;
273
+ }[];
274
+ }, {
275
+ object: "chat.completion";
276
+ model: string;
277
+ id: string;
278
+ created: number;
279
+ usage: {
280
+ prompt_tokens: number;
281
+ total_tokens: number;
282
+ completion_tokens: number;
283
+ };
284
+ choices: {
285
+ message: {
286
+ content: string | null;
287
+ role: "assistant";
288
+ function_call?: {
289
+ name: string;
290
+ arguments: string;
291
+ } | undefined;
292
+ };
293
+ finish_reason: string;
294
+ index: number;
295
+ logprobs?: any;
296
+ }[];
297
+ }>;
298
+ export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
299
+ export type OpenAIChatResponseFormatType<T> = {
300
+ stream: boolean;
301
+ handler: ResponseHandler<T>;
302
+ };
303
+ export declare const OpenAIChatResponseFormat: {
304
+ /**
305
+ * Returns the response as a JSON object.
306
+ */
307
+ json: {
308
+ stream: false;
309
+ handler: ResponseHandler<{
310
+ object: "chat.completion";
311
+ model: string;
312
+ id: string;
313
+ created: number;
314
+ usage: {
315
+ prompt_tokens: number;
316
+ total_tokens: number;
317
+ completion_tokens: number;
318
+ };
319
+ choices: {
320
+ message: {
321
+ content: string | null;
322
+ role: "assistant";
323
+ function_call?: {
324
+ name: string;
325
+ arguments: string;
326
+ } | undefined;
327
+ };
328
+ finish_reason: string;
329
+ index: number;
330
+ logprobs?: any;
331
+ }[];
332
+ }>;
333
+ };
334
+ /**
335
+ * Returns an async iterable over the text deltas (only the tex different of the first choice).
336
+ */
337
+ deltaIterable: {
338
+ stream: true;
339
+ handler: ({ response }: {
340
+ response: Response;
341
+ }) => Promise<AsyncIterable<DeltaEvent<OpenAIChatDelta>>>;
342
+ };
343
+ };
344
+ export {};