modelfusion 0.104.0 → 0.106.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (292) hide show
  1. package/CHANGELOG.md +60 -0
  2. package/README.md +8 -10
  3. package/core/DefaultRun.cjs +0 -4
  4. package/core/DefaultRun.d.ts +0 -2
  5. package/core/DefaultRun.js +0 -4
  6. package/core/ExtensionFunctionEvent.d.ts +11 -0
  7. package/core/FunctionEvent.d.ts +2 -2
  8. package/extension/index.cjs +22 -3
  9. package/extension/index.d.ts +5 -1
  10. package/extension/index.js +4 -1
  11. package/index.cjs +0 -3
  12. package/index.d.ts +0 -3
  13. package/index.js +0 -3
  14. package/model-function/Delta.d.ts +1 -2
  15. package/model-function/executeStreamCall.cjs +6 -4
  16. package/model-function/executeStreamCall.d.ts +2 -2
  17. package/model-function/executeStreamCall.js +6 -4
  18. package/model-function/generate-speech/streamSpeech.cjs +1 -2
  19. package/model-function/generate-speech/streamSpeech.js +1 -2
  20. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
  21. package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
  22. package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
  23. package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
  24. package/model-function/generate-structure/jsonStructurePrompt.cjs +42 -6
  25. package/model-function/generate-structure/jsonStructurePrompt.d.ts +12 -1
  26. package/model-function/generate-structure/jsonStructurePrompt.js +42 -5
  27. package/model-function/generate-structure/streamStructure.cjs +7 -8
  28. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  29. package/model-function/generate-structure/streamStructure.js +7 -8
  30. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
  31. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
  32. package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
  33. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -1
  34. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +1 -1
  35. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
  36. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
  37. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
  38. package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
  39. package/model-function/generate-text/index.cjs +1 -0
  40. package/model-function/generate-text/index.d.ts +1 -0
  41. package/model-function/generate-text/index.js +1 -0
  42. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -2
  43. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +1 -1
  44. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +8 -5
  45. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +7 -4
  46. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +42 -0
  47. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +27 -5
  48. package/model-function/generate-text/prompt-template/ChatPrompt.js +41 -1
  49. package/model-function/generate-text/prompt-template/{Content.cjs → ContentPart.cjs} +1 -1
  50. package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
  51. package/model-function/generate-text/prompt-template/{Content.js → ContentPart.js} +1 -1
  52. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +3 -2
  53. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +8 -5
  54. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +1 -1
  55. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +6 -3
  56. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +8 -4
  57. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +6 -2
  58. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -4
  59. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +6 -2
  60. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +7 -3
  61. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +6 -2
  62. package/model-function/generate-text/prompt-template/index.cjs +1 -1
  63. package/model-function/generate-text/prompt-template/index.d.ts +1 -1
  64. package/model-function/generate-text/prompt-template/index.js +1 -1
  65. package/model-function/generate-text/streamText.cjs +27 -28
  66. package/model-function/generate-text/streamText.d.ts +1 -0
  67. package/model-function/generate-text/streamText.js +27 -28
  68. package/model-function/index.cjs +0 -1
  69. package/model-function/index.d.ts +0 -1
  70. package/model-function/index.js +0 -1
  71. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +7 -3
  72. package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -1
  73. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
  74. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +11 -2
  75. package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
  76. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
  77. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
  78. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  79. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
  80. package/model-provider/cohere/CohereTextGenerationModel.d.ts +49 -15
  81. package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
  82. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
  83. package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
  84. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
  85. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
  86. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +6 -1
  87. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +6 -1
  88. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
  89. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +171 -20
  90. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
  91. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
  92. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
  93. package/model-provider/mistral/MistralChatModel.cjs +30 -104
  94. package/model-provider/mistral/MistralChatModel.d.ts +47 -14
  95. package/model-provider/mistral/MistralChatModel.js +30 -104
  96. package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
  97. package/model-provider/mistral/MistralChatModel.test.js +49 -0
  98. package/model-provider/mistral/MistralPromptTemplate.cjs +11 -4
  99. package/model-provider/mistral/MistralPromptTemplate.js +9 -2
  100. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +13 -13
  101. package/model-provider/ollama/OllamaChatModel.cjs +7 -43
  102. package/model-provider/ollama/OllamaChatModel.d.ts +67 -14
  103. package/model-provider/ollama/OllamaChatModel.js +8 -44
  104. package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
  105. package/model-provider/ollama/OllamaChatModel.test.js +25 -0
  106. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +34 -4
  107. package/model-provider/ollama/OllamaChatPromptTemplate.js +34 -4
  108. package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
  109. package/model-provider/ollama/OllamaCompletionModel.d.ts +67 -10
  110. package/model-provider/ollama/OllamaCompletionModel.js +24 -45
  111. package/model-provider/ollama/OllamaCompletionModel.test.cjs +95 -13
  112. package/model-provider/ollama/OllamaCompletionModel.test.js +72 -13
  113. package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
  114. package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
  115. package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
  116. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
  117. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
  118. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
  119. package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
  120. package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
  121. package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
  122. package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
  123. package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
  124. package/model-provider/openai/OpenAIChatModel.test.js +92 -0
  125. package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
  126. package/model-provider/openai/{chat/OpenAIChatPromptTemplate.d.ts → OpenAIChatPromptTemplate.d.ts} +3 -3
  127. package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
  128. package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
  129. package/model-provider/openai/OpenAICompletionModel.d.ts +27 -10
  130. package/model-provider/openai/OpenAICompletionModel.js +33 -85
  131. package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
  132. package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
  133. package/model-provider/openai/OpenAIFacade.cjs +2 -2
  134. package/model-provider/openai/OpenAIFacade.d.ts +3 -3
  135. package/model-provider/openai/OpenAIFacade.js +2 -2
  136. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -12
  137. package/model-provider/openai/OpenAITranscriptionModel.d.ts +17 -17
  138. package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
  139. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
  140. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
  141. package/model-provider/openai/index.cjs +6 -7
  142. package/model-provider/openai/index.d.ts +5 -7
  143. package/model-provider/openai/index.js +5 -6
  144. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
  145. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
  146. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
  147. package/model-provider/stability/StabilityImageGenerationModel.d.ts +5 -5
  148. package/package.json +13 -24
  149. package/test/JsonTestServer.cjs +33 -0
  150. package/test/JsonTestServer.d.ts +7 -0
  151. package/test/JsonTestServer.js +29 -0
  152. package/test/StreamingTestServer.cjs +55 -0
  153. package/test/StreamingTestServer.d.ts +7 -0
  154. package/test/StreamingTestServer.js +51 -0
  155. package/test/arrayFromAsync.cjs +13 -0
  156. package/test/arrayFromAsync.d.ts +1 -0
  157. package/test/arrayFromAsync.js +9 -0
  158. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +1 -1
  159. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
  160. package/tool/generate-tool-call/TextGenerationToolCallModel.js +1 -1
  161. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.d.ts +1 -11
  162. package/tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.d.ts +12 -0
  163. package/tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.js +1 -0
  164. package/tool/generate-tool-calls-or-text/index.cjs +1 -0
  165. package/tool/generate-tool-calls-or-text/index.d.ts +1 -0
  166. package/tool/generate-tool-calls-or-text/index.js +1 -0
  167. package/util/index.cjs +0 -1
  168. package/util/index.d.ts +0 -1
  169. package/util/index.js +0 -1
  170. package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
  171. package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
  172. package/util/streaming/createEventSourceResponseHandler.js +5 -0
  173. package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
  174. package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
  175. package/util/streaming/createJsonStreamResponseHandler.js +5 -0
  176. package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
  177. package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
  178. package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
  179. package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
  180. package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
  181. package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
  182. package/browser/MediaSourceAppender.cjs +0 -54
  183. package/browser/MediaSourceAppender.d.ts +0 -11
  184. package/browser/MediaSourceAppender.js +0 -50
  185. package/browser/convertAudioChunksToBase64.cjs +0 -8
  186. package/browser/convertAudioChunksToBase64.d.ts +0 -4
  187. package/browser/convertAudioChunksToBase64.js +0 -4
  188. package/browser/convertBlobToBase64.cjs +0 -23
  189. package/browser/convertBlobToBase64.d.ts +0 -1
  190. package/browser/convertBlobToBase64.js +0 -19
  191. package/browser/index.cjs +0 -22
  192. package/browser/index.d.ts +0 -6
  193. package/browser/index.js +0 -6
  194. package/browser/invokeFlow.cjs +0 -23
  195. package/browser/invokeFlow.d.ts +0 -8
  196. package/browser/invokeFlow.js +0 -19
  197. package/browser/readEventSource.cjs +0 -29
  198. package/browser/readEventSource.d.ts +0 -9
  199. package/browser/readEventSource.js +0 -25
  200. package/browser/readEventSourceStream.cjs +0 -35
  201. package/browser/readEventSourceStream.d.ts +0 -7
  202. package/browser/readEventSourceStream.js +0 -31
  203. package/composed-function/index.cjs +0 -19
  204. package/composed-function/index.d.ts +0 -3
  205. package/composed-function/index.js +0 -3
  206. package/composed-function/summarize/SummarizationFunction.d.ts +0 -4
  207. package/composed-function/summarize/summarizeRecursively.cjs +0 -19
  208. package/composed-function/summarize/summarizeRecursively.d.ts +0 -11
  209. package/composed-function/summarize/summarizeRecursively.js +0 -15
  210. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +0 -25
  211. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +0 -24
  212. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +0 -21
  213. package/cost/Cost.cjs +0 -38
  214. package/cost/Cost.d.ts +0 -16
  215. package/cost/Cost.js +0 -34
  216. package/cost/CostCalculator.d.ts +0 -8
  217. package/cost/calculateCost.cjs +0 -28
  218. package/cost/calculateCost.d.ts +0 -7
  219. package/cost/calculateCost.js +0 -24
  220. package/cost/index.cjs +0 -19
  221. package/cost/index.d.ts +0 -3
  222. package/cost/index.js +0 -3
  223. package/guard/GuardEvent.cjs +0 -2
  224. package/guard/GuardEvent.d.ts +0 -7
  225. package/guard/fixStructure.cjs +0 -75
  226. package/guard/fixStructure.d.ts +0 -64
  227. package/guard/fixStructure.js +0 -71
  228. package/guard/guard.cjs +0 -79
  229. package/guard/guard.d.ts +0 -29
  230. package/guard/guard.js +0 -75
  231. package/guard/index.cjs +0 -19
  232. package/guard/index.d.ts +0 -3
  233. package/guard/index.js +0 -3
  234. package/model-function/SuccessfulModelCall.cjs +0 -10
  235. package/model-function/SuccessfulModelCall.d.ts +0 -12
  236. package/model-function/SuccessfulModelCall.js +0 -6
  237. package/model-function/generate-text/prompt-template/Content.d.ts +0 -25
  238. package/model-provider/openai/OpenAICostCalculator.cjs +0 -89
  239. package/model-provider/openai/OpenAICostCalculator.d.ts +0 -6
  240. package/model-provider/openai/OpenAICostCalculator.js +0 -85
  241. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
  242. package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
  243. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -70
  244. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -63
  245. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
  246. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
  247. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
  248. package/server/fastify/AssetStorage.cjs +0 -2
  249. package/server/fastify/AssetStorage.d.ts +0 -17
  250. package/server/fastify/DefaultFlow.cjs +0 -22
  251. package/server/fastify/DefaultFlow.d.ts +0 -16
  252. package/server/fastify/DefaultFlow.js +0 -18
  253. package/server/fastify/FileSystemAssetStorage.cjs +0 -60
  254. package/server/fastify/FileSystemAssetStorage.d.ts +0 -19
  255. package/server/fastify/FileSystemAssetStorage.js +0 -56
  256. package/server/fastify/FileSystemLogger.cjs +0 -49
  257. package/server/fastify/FileSystemLogger.d.ts +0 -18
  258. package/server/fastify/FileSystemLogger.js +0 -45
  259. package/server/fastify/Flow.cjs +0 -2
  260. package/server/fastify/Flow.d.ts +0 -9
  261. package/server/fastify/FlowRun.cjs +0 -71
  262. package/server/fastify/FlowRun.d.ts +0 -28
  263. package/server/fastify/FlowRun.js +0 -67
  264. package/server/fastify/FlowSchema.cjs +0 -2
  265. package/server/fastify/FlowSchema.d.ts +0 -5
  266. package/server/fastify/Logger.cjs +0 -2
  267. package/server/fastify/Logger.d.ts +0 -13
  268. package/server/fastify/PathProvider.cjs +0 -34
  269. package/server/fastify/PathProvider.d.ts +0 -12
  270. package/server/fastify/PathProvider.js +0 -30
  271. package/server/fastify/index.cjs +0 -24
  272. package/server/fastify/index.d.ts +0 -8
  273. package/server/fastify/index.js +0 -8
  274. package/server/fastify/modelFusionFlowPlugin.cjs +0 -103
  275. package/server/fastify/modelFusionFlowPlugin.d.ts +0 -12
  276. package/server/fastify/modelFusionFlowPlugin.js +0 -99
  277. package/util/getAudioFileExtension.cjs +0 -29
  278. package/util/getAudioFileExtension.d.ts +0 -1
  279. package/util/getAudioFileExtension.js +0 -25
  280. /package/{composed-function/summarize/SummarizationFunction.cjs → core/ExtensionFunctionEvent.cjs} +0 -0
  281. /package/{composed-function/summarize/SummarizationFunction.js → core/ExtensionFunctionEvent.js} +0 -0
  282. /package/{cost/CostCalculator.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
  283. /package/{guard/GuardEvent.js → model-provider/cohere/CohereTextGenerationModel.test.d.ts} +0 -0
  284. /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → llamacpp/LlamaCppTextGenerationModel.test.d.ts} +0 -0
  285. /package/{server/fastify/AssetStorage.js → model-provider/mistral/MistralChatModel.test.d.ts} +0 -0
  286. /package/{server/fastify/Flow.js → model-provider/ollama/OllamaChatModel.test.d.ts} +0 -0
  287. /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
  288. /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
  289. /package/{server/fastify/FlowSchema.js → model-provider/openai/OpenAIChatModel.test.d.ts} +0 -0
  290. /package/{server/fastify/Logger.js → model-provider/openai/OpenAICompletionModel.test.d.ts} +0 -0
  291. /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
  292. /package/{cost/CostCalculator.cjs → tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.cjs} +0 -0
@@ -1,6 +1,8 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = void 0;
4
+ const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
5
+ const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
4
6
  // default Vicuna 1 system message
5
7
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
6
8
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -78,9 +80,12 @@ function chat() {
78
80
  break;
79
81
  }
80
82
  case "assistant": {
81
- text += `ASSISTANT: ${content}`;
83
+ text += `ASSISTANT: ${(0, ContentPart_js_1.validateContentIsString)(content, prompt)}`;
82
84
  break;
83
85
  }
86
+ case "tool": {
87
+ throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
88
+ }
84
89
  default: {
85
90
  const _exhaustiveCheck = role;
86
91
  throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
@@ -1,3 +1,5 @@
1
+ import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
+ import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
1
3
  // default Vicuna 1 system message
2
4
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
3
5
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -74,9 +76,12 @@ export function chat() {
74
76
  break;
75
77
  }
76
78
  case "assistant": {
77
- text += `ASSISTANT: ${content}`;
79
+ text += `ASSISTANT: ${validateContentIsString(content, prompt)}`;
78
80
  break;
79
81
  }
82
+ case "tool": {
83
+ throw new InvalidPromptError("Tool messages are not supported.", prompt);
84
+ }
80
85
  default: {
81
86
  const _exhaustiveCheck = role;
82
87
  throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
@@ -5,11 +5,11 @@ const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
10
11
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
11
12
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
13
13
  const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
14
14
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
15
15
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
@@ -138,6 +138,9 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
138
138
  responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
139
139
  });
140
140
  }
141
+ extractTextDelta(delta) {
142
+ return delta.content;
143
+ }
141
144
  withTextPrompt() {
142
145
  return this.withPromptTemplate({
143
146
  format(prompt) {
@@ -228,7 +231,7 @@ const llamaCppTextGenerationResponseSchema = zod_1.z.object({
228
231
  tokens_predicted: zod_1.z.number(),
229
232
  truncated: zod_1.z.boolean(),
230
233
  });
231
- const llamaCppTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.discriminatedUnion("stop", [
234
+ const llamaCppTextStreamChunkSchema = (0, ZodSchema_js_1.zodSchema)(zod_1.z.discriminatedUnion("stop", [
232
235
  zod_1.z.object({
233
236
  content: zod_1.z.string(),
234
237
  stop: zod_1.z.literal(false),
@@ -237,7 +240,6 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z
237
240
  ]));
238
241
  async function createLlamaCppFullDeltaIterableQueue(stream) {
239
242
  const queue = new AsyncQueue_js_1.AsyncQueue();
240
- let content = "";
241
243
  // process the stream asynchonously (no 'await' on purpose):
242
244
  (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
243
245
  .then(async (events) => {
@@ -246,18 +248,9 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
246
248
  const data = event.data;
247
249
  const eventData = (0, parseJSON_js_1.parseJSON)({
248
250
  text: data,
249
- schema: llamaCppTextStreamingResponseSchema,
250
- });
251
- content += eventData.content;
252
- queue.push({
253
- type: "delta",
254
- fullDelta: {
255
- content,
256
- isComplete: eventData.stop,
257
- delta: eventData.content,
258
- },
259
- valueDelta: eventData.content,
251
+ schema: llamaCppTextStreamChunkSchema,
260
252
  });
253
+ queue.push({ type: "delta", deltaValue: eventData });
261
254
  if (eventData.stop) {
262
255
  queue.close();
263
256
  }
@@ -58,14 +58,14 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
58
58
  countPromptTokens(prompt: LlamaCppTextGenerationPrompt): Promise<number>;
59
59
  doGenerateTexts(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<{
60
60
  response: {
61
- stop: true;
62
61
  model: string;
63
- prompt: string;
62
+ stop: true;
64
63
  content: string;
64
+ prompt: string;
65
65
  generation_settings: {
66
- stop: string[];
67
66
  model: string;
68
67
  stream: boolean;
68
+ stop: string[];
69
69
  seed: number;
70
70
  mirostat: number;
71
71
  frequency_penalty: number;
@@ -116,7 +116,59 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
116
116
  totalTokens: number;
117
117
  };
118
118
  }>;
119
- doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
119
+ doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<{
120
+ model: string;
121
+ stop: true;
122
+ content: string;
123
+ prompt: string;
124
+ generation_settings: {
125
+ model: string;
126
+ stream: boolean;
127
+ stop: string[];
128
+ seed: number;
129
+ mirostat: number;
130
+ frequency_penalty: number;
131
+ ignore_eos: boolean;
132
+ logit_bias: number[];
133
+ mirostat_eta: number;
134
+ mirostat_tau: number;
135
+ n_ctx: number;
136
+ n_keep: number;
137
+ n_predict: number;
138
+ n_probs: number;
139
+ penalize_nl: boolean;
140
+ presence_penalty: number;
141
+ repeat_last_n: number;
142
+ repeat_penalty: number;
143
+ temp: number;
144
+ tfs_z: number;
145
+ top_k: number;
146
+ top_p: number;
147
+ typical_p: number;
148
+ };
149
+ stopped_eos: boolean;
150
+ stopped_limit: boolean;
151
+ stopped_word: boolean;
152
+ stopping_word: string;
153
+ timings: {
154
+ predicted_ms: number;
155
+ predicted_n: number;
156
+ predicted_per_second: number | null;
157
+ predicted_per_token_ms: number | null;
158
+ prompt_ms: number | null;
159
+ prompt_n: number;
160
+ prompt_per_second: number | null;
161
+ prompt_per_token_ms: number | null;
162
+ };
163
+ tokens_cached: number;
164
+ tokens_evaluated: number;
165
+ tokens_predicted: number;
166
+ truncated: boolean;
167
+ } | {
168
+ stop: false;
169
+ content: string;
170
+ }>>>;
171
+ extractTextDelta(delta: unknown): string;
120
172
  withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
121
173
  /**
122
174
  * Maps the prompt for a text version of the Llama.cpp prompt template (without image support).
@@ -156,9 +208,9 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
156
208
  top_p: z.ZodNumber;
157
209
  typical_p: z.ZodNumber;
158
210
  }, "strip", z.ZodTypeAny, {
159
- stop: string[];
160
211
  model: string;
161
212
  stream: boolean;
213
+ stop: string[];
162
214
  seed: number;
163
215
  mirostat: number;
164
216
  frequency_penalty: number;
@@ -180,9 +232,9 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
180
232
  top_p: number;
181
233
  typical_p: number;
182
234
  }, {
183
- stop: string[];
184
235
  model: string;
185
236
  stream: boolean;
237
+ stop: string[];
186
238
  seed: number;
187
239
  mirostat: number;
188
240
  frequency_penalty: number;
@@ -243,14 +295,14 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
243
295
  tokens_predicted: z.ZodNumber;
244
296
  truncated: z.ZodBoolean;
245
297
  }, "strip", z.ZodTypeAny, {
246
- stop: true;
247
298
  model: string;
248
- prompt: string;
299
+ stop: true;
249
300
  content: string;
301
+ prompt: string;
250
302
  generation_settings: {
251
- stop: string[];
252
303
  model: string;
253
304
  stream: boolean;
305
+ stop: string[];
254
306
  seed: number;
255
307
  mirostat: number;
256
308
  frequency_penalty: number;
@@ -291,14 +343,14 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
291
343
  tokens_predicted: number;
292
344
  truncated: boolean;
293
345
  }, {
294
- stop: true;
295
346
  model: string;
296
- prompt: string;
347
+ stop: true;
297
348
  content: string;
349
+ prompt: string;
298
350
  generation_settings: {
299
- stop: string[];
300
351
  model: string;
301
352
  stream: boolean;
353
+ stop: string[];
302
354
  seed: number;
303
355
  mirostat: number;
304
356
  frequency_penalty: number;
@@ -340,11 +392,59 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
340
392
  truncated: boolean;
341
393
  }>;
342
394
  export type LlamaCppTextGenerationResponse = z.infer<typeof llamaCppTextGenerationResponseSchema>;
343
- export type LlamaCppTextGenerationDelta = {
395
+ declare const llamaCppTextStreamChunkSchema: import("../../core/schema/ZodSchema.js").ZodSchema<{
396
+ model: string;
397
+ stop: true;
344
398
  content: string;
345
- isComplete: boolean;
346
- delta: string;
347
- };
399
+ prompt: string;
400
+ generation_settings: {
401
+ model: string;
402
+ stream: boolean;
403
+ stop: string[];
404
+ seed: number;
405
+ mirostat: number;
406
+ frequency_penalty: number;
407
+ ignore_eos: boolean;
408
+ logit_bias: number[];
409
+ mirostat_eta: number;
410
+ mirostat_tau: number;
411
+ n_ctx: number;
412
+ n_keep: number;
413
+ n_predict: number;
414
+ n_probs: number;
415
+ penalize_nl: boolean;
416
+ presence_penalty: number;
417
+ repeat_last_n: number;
418
+ repeat_penalty: number;
419
+ temp: number;
420
+ tfs_z: number;
421
+ top_k: number;
422
+ top_p: number;
423
+ typical_p: number;
424
+ };
425
+ stopped_eos: boolean;
426
+ stopped_limit: boolean;
427
+ stopped_word: boolean;
428
+ stopping_word: string;
429
+ timings: {
430
+ predicted_ms: number;
431
+ predicted_n: number;
432
+ predicted_per_second: number | null;
433
+ predicted_per_token_ms: number | null;
434
+ prompt_ms: number | null;
435
+ prompt_n: number;
436
+ prompt_per_second: number | null;
437
+ prompt_per_token_ms: number | null;
438
+ };
439
+ tokens_cached: number;
440
+ tokens_evaluated: number;
441
+ tokens_predicted: number;
442
+ truncated: boolean;
443
+ } | {
444
+ stop: false;
445
+ content: string;
446
+ }>;
447
+ export type LlamaCppTextStreamChunk = (typeof llamaCppTextStreamChunkSchema)["_type"];
348
448
  export type LlamaCppTextGenerationResponseFormatType<T> = {
349
449
  stream: boolean;
350
450
  handler: ResponseHandler<T>;
@@ -356,14 +456,14 @@ export declare const LlamaCppTextGenerationResponseFormat: {
356
456
  json: {
357
457
  stream: false;
358
458
  handler: ResponseHandler<{
359
- stop: true;
360
459
  model: string;
361
- prompt: string;
460
+ stop: true;
362
461
  content: string;
462
+ prompt: string;
363
463
  generation_settings: {
364
- stop: string[];
365
464
  model: string;
366
465
  stream: boolean;
466
+ stop: string[];
367
467
  seed: number;
368
468
  mirostat: number;
369
469
  frequency_penalty: number;
@@ -413,7 +513,58 @@ export declare const LlamaCppTextGenerationResponseFormat: {
413
513
  stream: true;
414
514
  handler: ({ response }: {
415
515
  response: Response;
416
- }) => Promise<AsyncIterable<Delta<string>>>;
516
+ }) => Promise<AsyncIterable<Delta<{
517
+ model: string;
518
+ stop: true;
519
+ content: string;
520
+ prompt: string;
521
+ generation_settings: {
522
+ model: string;
523
+ stream: boolean;
524
+ stop: string[];
525
+ seed: number;
526
+ mirostat: number;
527
+ frequency_penalty: number;
528
+ ignore_eos: boolean;
529
+ logit_bias: number[];
530
+ mirostat_eta: number;
531
+ mirostat_tau: number;
532
+ n_ctx: number;
533
+ n_keep: number;
534
+ n_predict: number;
535
+ n_probs: number;
536
+ penalize_nl: boolean;
537
+ presence_penalty: number;
538
+ repeat_last_n: number;
539
+ repeat_penalty: number;
540
+ temp: number;
541
+ tfs_z: number;
542
+ top_k: number;
543
+ top_p: number;
544
+ typical_p: number;
545
+ };
546
+ stopped_eos: boolean;
547
+ stopped_limit: boolean;
548
+ stopped_word: boolean;
549
+ stopping_word: string;
550
+ timings: {
551
+ predicted_ms: number;
552
+ predicted_n: number;
553
+ predicted_per_second: number | null;
554
+ predicted_per_token_ms: number | null;
555
+ prompt_ms: number | null;
556
+ prompt_n: number;
557
+ prompt_per_second: number | null;
558
+ prompt_per_token_ms: number | null;
559
+ };
560
+ tokens_cached: number;
561
+ tokens_evaluated: number;
562
+ tokens_predicted: number;
563
+ truncated: boolean;
564
+ } | {
565
+ stop: false;
566
+ content: string;
567
+ }>>>;
417
568
  };
418
569
  };
419
570
  export {};
@@ -1,12 +1,12 @@
1
1
  import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
- import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
+ import { zodSchema } from "../../core/schema/ZodSchema.js";
5
+ import { parseJSON } from "../../core/schema/parseJSON.js";
5
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
8
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
8
9
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
- import { parseJSON } from "../../core/schema/parseJSON.js";
10
10
  import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
11
11
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
12
12
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
@@ -135,6 +135,9 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
135
135
  responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
136
136
  });
137
137
  }
138
+ extractTextDelta(delta) {
139
+ return delta.content;
140
+ }
138
141
  withTextPrompt() {
139
142
  return this.withPromptTemplate({
140
143
  format(prompt) {
@@ -224,7 +227,7 @@ const llamaCppTextGenerationResponseSchema = z.object({
224
227
  tokens_predicted: z.number(),
225
228
  truncated: z.boolean(),
226
229
  });
227
- const llamaCppTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("stop", [
230
+ const llamaCppTextStreamChunkSchema = zodSchema(z.discriminatedUnion("stop", [
228
231
  z.object({
229
232
  content: z.string(),
230
233
  stop: z.literal(false),
@@ -233,7 +236,6 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("
233
236
  ]));
234
237
  async function createLlamaCppFullDeltaIterableQueue(stream) {
235
238
  const queue = new AsyncQueue();
236
- let content = "";
237
239
  // process the stream asynchonously (no 'await' on purpose):
238
240
  parseEventSourceStream({ stream })
239
241
  .then(async (events) => {
@@ -242,18 +244,9 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
242
244
  const data = event.data;
243
245
  const eventData = parseJSON({
244
246
  text: data,
245
- schema: llamaCppTextStreamingResponseSchema,
246
- });
247
- content += eventData.content;
248
- queue.push({
249
- type: "delta",
250
- fullDelta: {
251
- content,
252
- isComplete: eventData.stop,
253
- delta: eventData.content,
254
- },
255
- valueDelta: eventData.content,
247
+ schema: llamaCppTextStreamChunkSchema,
256
248
  });
249
+ queue.push({ type: "delta", deltaValue: eventData });
257
250
  if (eventData.stop) {
258
251
  queue.close();
259
252
  }
@@ -0,0 +1,37 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
4
+ const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
5
+ const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
6
+ const LlamaCppTextGenerationModel_js_1 = require("./LlamaCppTextGenerationModel.cjs");
7
+ describe("streamText", () => {
8
+ const server = new StreamingTestServer_js_1.StreamingTestServer("http://127.0.0.1:8080/completion");
9
+ server.setupTestEnvironment();
10
+ it("should return a text stream", async () => {
11
+ server.responseChunks = [
12
+ `data: {"content":"Hello","multimodal":false,"slot_id":0,"stop":false}\n\n`,
13
+ `data: {"content":", ","multimodal":false,"slot_id":0,"stop":false}\n\n`,
14
+ `data: {"content":"world!","multimodal":false,"slot_id":0,"stop":false}\n\n`,
15
+ `data: {"content":"","generation_settings":{"frequency_penalty":0.0,"grammar":"",` +
16
+ `"ignore_eos":false,"logit_bias":[],"min_p":0.05000000074505806,"mirostat":0,` +
17
+ `"mirostat_eta":0.10000000149011612,"mirostat_tau":5.0,"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
18
+ `"n_ctx":4096,"n_keep":0,"n_predict":-1,"n_probs":0,"penalize_nl":true,"penalty_prompt_tokens":[],` +
19
+ `"presence_penalty":0.0,"repeat_last_n":64,"repeat_penalty":1.100000023841858,"seed":4294967295,` +
20
+ `"stop":[],"stream":true,"temp":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
21
+ `"typical_p":1.0,"use_penalty_prompt_tokens":false},"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
22
+ `"prompt":"hello","slot_id":0,"stop":true,"stopped_eos":true,"stopped_limit":false,` +
23
+ `"stopped_word":false,"stopping_word":"","timings":{"predicted_ms":1054.704,"predicted_n":69,` +
24
+ `"predicted_per_second":65.421198743913,"predicted_per_token_ms":15.285565217391303,` +
25
+ `"prompt_ms":244.228,"prompt_n":5,"prompt_per_second":20.472673075978186,` +
26
+ `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
27
+ `"tokens_predicted":69,"truncated":false}\n\n`,
28
+ ];
29
+ const stream = await (0, streamText_js_1.streamText)(new LlamaCppTextGenerationModel_js_1.LlamaCppTextGenerationModel().withTextPrompt(), "hello");
30
+ // note: space moved to last chunk bc of trimming
31
+ expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
32
+ "Hello",
33
+ ",",
34
+ " world!",
35
+ ]);
36
+ });
37
+ });
@@ -0,0 +1,35 @@
1
+ import { streamText } from "../../model-function/generate-text/streamText.js";
2
+ import { StreamingTestServer } from "../../test/StreamingTestServer.js";
3
+ import { arrayFromAsync } from "../../test/arrayFromAsync.js";
4
+ import { LlamaCppTextGenerationModel } from "./LlamaCppTextGenerationModel.js";
5
+ describe("streamText", () => {
6
+ const server = new StreamingTestServer("http://127.0.0.1:8080/completion");
7
+ server.setupTestEnvironment();
8
+ it("should return a text stream", async () => {
9
+ server.responseChunks = [
10
+ `data: {"content":"Hello","multimodal":false,"slot_id":0,"stop":false}\n\n`,
11
+ `data: {"content":", ","multimodal":false,"slot_id":0,"stop":false}\n\n`,
12
+ `data: {"content":"world!","multimodal":false,"slot_id":0,"stop":false}\n\n`,
13
+ `data: {"content":"","generation_settings":{"frequency_penalty":0.0,"grammar":"",` +
14
+ `"ignore_eos":false,"logit_bias":[],"min_p":0.05000000074505806,"mirostat":0,` +
15
+ `"mirostat_eta":0.10000000149011612,"mirostat_tau":5.0,"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
16
+ `"n_ctx":4096,"n_keep":0,"n_predict":-1,"n_probs":0,"penalize_nl":true,"penalty_prompt_tokens":[],` +
17
+ `"presence_penalty":0.0,"repeat_last_n":64,"repeat_penalty":1.100000023841858,"seed":4294967295,` +
18
+ `"stop":[],"stream":true,"temp":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
19
+ `"typical_p":1.0,"use_penalty_prompt_tokens":false},"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
20
+ `"prompt":"hello","slot_id":0,"stop":true,"stopped_eos":true,"stopped_limit":false,` +
21
+ `"stopped_word":false,"stopping_word":"","timings":{"predicted_ms":1054.704,"predicted_n":69,` +
22
+ `"predicted_per_second":65.421198743913,"predicted_per_token_ms":15.285565217391303,` +
23
+ `"prompt_ms":244.228,"prompt_n":5,"prompt_per_second":20.472673075978186,` +
24
+ `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
25
+ `"tokens_predicted":69,"truncated":false}\n\n`,
26
+ ];
27
+ const stream = await streamText(new LlamaCppTextGenerationModel().withTextPrompt(), "hello");
28
+ // note: space moved to last chunk bc of trimming
29
+ expect(await arrayFromAsync(stream)).toStrictEqual([
30
+ "Hello",
31
+ ",",
32
+ " world!",
33
+ ]);
34
+ });
35
+ });