@alpic80/rivet-core 1.19.1-aidon.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +7 -0
- package/README.md +176 -0
- package/dist/cjs/bundle.cjs +18915 -0
- package/dist/cjs/bundle.cjs.map +7 -0
- package/dist/esm/api/createProcessor.js +131 -0
- package/dist/esm/api/streaming.js +116 -0
- package/dist/esm/exports.js +32 -0
- package/dist/esm/index.js +2 -0
- package/dist/esm/integrations/AudioProvider.js +1 -0
- package/dist/esm/integrations/DatasetProvider.js +92 -0
- package/dist/esm/integrations/EmbeddingGenerator.js +1 -0
- package/dist/esm/integrations/GptTokenizerTokenizer.js +65 -0
- package/dist/esm/integrations/LLMProvider.js +1 -0
- package/dist/esm/integrations/Tokenizer.js +1 -0
- package/dist/esm/integrations/VectorDatabase.js +1 -0
- package/dist/esm/integrations/enableIntegrations.js +3 -0
- package/dist/esm/integrations/integrations.js +19 -0
- package/dist/esm/integrations/openai/OpenAIEmbeddingGenerator.js +23 -0
- package/dist/esm/model/DataValue.js +176 -0
- package/dist/esm/model/Dataset.js +1 -0
- package/dist/esm/model/EditorDefinition.js +1 -0
- package/dist/esm/model/GraphProcessor.js +1198 -0
- package/dist/esm/model/NodeBase.js +1 -0
- package/dist/esm/model/NodeBodySpec.js +1 -0
- package/dist/esm/model/NodeDefinition.js +12 -0
- package/dist/esm/model/NodeGraph.js +14 -0
- package/dist/esm/model/NodeImpl.js +49 -0
- package/dist/esm/model/NodeRegistration.js +144 -0
- package/dist/esm/model/Nodes.js +227 -0
- package/dist/esm/model/PluginLoadSpec.js +1 -0
- package/dist/esm/model/ProcessContext.js +1 -0
- package/dist/esm/model/Project.js +2 -0
- package/dist/esm/model/RivetPlugin.js +1 -0
- package/dist/esm/model/RivetUIContext.js +1 -0
- package/dist/esm/model/Settings.js +1 -0
- package/dist/esm/model/nodes/AbortGraphNode.js +100 -0
- package/dist/esm/model/nodes/AppendToDatasetNode.js +115 -0
- package/dist/esm/model/nodes/ArrayNode.js +144 -0
- package/dist/esm/model/nodes/AssembleMessageNode.js +199 -0
- package/dist/esm/model/nodes/AssemblePromptNode.js +129 -0
- package/dist/esm/model/nodes/AudioNode.js +101 -0
- package/dist/esm/model/nodes/BooleanNode.js +74 -0
- package/dist/esm/model/nodes/CallGraphNode.js +136 -0
- package/dist/esm/model/nodes/ChatNode.js +964 -0
- package/dist/esm/model/nodes/ChunkNode.js +166 -0
- package/dist/esm/model/nodes/CoalesceNode.js +104 -0
- package/dist/esm/model/nodes/CodeNode.js +136 -0
- package/dist/esm/model/nodes/CommentNode.js +69 -0
- package/dist/esm/model/nodes/CompareNode.js +138 -0
- package/dist/esm/model/nodes/ContextNode.js +99 -0
- package/dist/esm/model/nodes/CreateDatasetNode.js +71 -0
- package/dist/esm/model/nodes/DatasetNearestNeigborsNode.js +97 -0
- package/dist/esm/model/nodes/DelayNode.js +105 -0
- package/dist/esm/model/nodes/DelegateFunctionCallNode.js +136 -0
- package/dist/esm/model/nodes/DestructureNode.js +86 -0
- package/dist/esm/model/nodes/EvaluateNode.js +141 -0
- package/dist/esm/model/nodes/ExternalCallNode.js +162 -0
- package/dist/esm/model/nodes/ExtractJsonNode.js +122 -0
- package/dist/esm/model/nodes/ExtractMarkdownCodeBlocksNode.js +100 -0
- package/dist/esm/model/nodes/ExtractObjectPathNode.js +128 -0
- package/dist/esm/model/nodes/ExtractRegexNode.js +201 -0
- package/dist/esm/model/nodes/ExtractYamlNode.js +214 -0
- package/dist/esm/model/nodes/FilterNode.js +73 -0
- package/dist/esm/model/nodes/GetAllDatasetsNode.js +53 -0
- package/dist/esm/model/nodes/GetDatasetRowNode.js +99 -0
- package/dist/esm/model/nodes/GetEmbeddingNode.js +130 -0
- package/dist/esm/model/nodes/GetGlobalNode.js +139 -0
- package/dist/esm/model/nodes/GptFunctionNode.js +169 -0
- package/dist/esm/model/nodes/GraphInputNode.js +130 -0
- package/dist/esm/model/nodes/GraphOutputNode.js +104 -0
- package/dist/esm/model/nodes/GraphReferenceNode.js +128 -0
- package/dist/esm/model/nodes/HashNode.js +97 -0
- package/dist/esm/model/nodes/HttpCallNode.js +257 -0
- package/dist/esm/model/nodes/IfElseNode.js +138 -0
- package/dist/esm/model/nodes/IfNode.js +124 -0
- package/dist/esm/model/nodes/ImageNode.js +107 -0
- package/dist/esm/model/nodes/JoinNode.js +135 -0
- package/dist/esm/model/nodes/ListGraphsNode.js +61 -0
- package/dist/esm/model/nodes/LoadDatasetNode.js +83 -0
- package/dist/esm/model/nodes/LoopControllerNode.js +206 -0
- package/dist/esm/model/nodes/MatchNode.js +137 -0
- package/dist/esm/model/nodes/NumberNode.js +86 -0
- package/dist/esm/model/nodes/ObjectNode.js +121 -0
- package/dist/esm/model/nodes/PassthroughNode.js +78 -0
- package/dist/esm/model/nodes/PlayAudioNode.js +61 -0
- package/dist/esm/model/nodes/PopNode.js +89 -0
- package/dist/esm/model/nodes/PromptNode.js +227 -0
- package/dist/esm/model/nodes/RaceInputsNode.js +86 -0
- package/dist/esm/model/nodes/RaiseEventNode.js +84 -0
- package/dist/esm/model/nodes/RandomNumberNode.js +106 -0
- package/dist/esm/model/nodes/ReadDirectoryNode.js +165 -0
- package/dist/esm/model/nodes/ReadFileNode.js +114 -0
- package/dist/esm/model/nodes/ReplaceDatasetNode.js +118 -0
- package/dist/esm/model/nodes/SetGlobalNode.js +124 -0
- package/dist/esm/model/nodes/ShuffleNode.js +64 -0
- package/dist/esm/model/nodes/SliceNode.js +100 -0
- package/dist/esm/model/nodes/SplitNode.js +101 -0
- package/dist/esm/model/nodes/SubGraphNode.js +181 -0
- package/dist/esm/model/nodes/TextNode.js +97 -0
- package/dist/esm/model/nodes/ToJsonNode.js +78 -0
- package/dist/esm/model/nodes/ToYamlNode.js +68 -0
- package/dist/esm/model/nodes/TrimChatMessagesNode.js +120 -0
- package/dist/esm/model/nodes/URLReferenceNode.js +79 -0
- package/dist/esm/model/nodes/UserInputNode.js +111 -0
- package/dist/esm/model/nodes/VectorNearestNeighborsNode.js +127 -0
- package/dist/esm/model/nodes/VectorStoreNode.js +124 -0
- package/dist/esm/model/nodes/WaitForEventNode.js +88 -0
- package/dist/esm/native/BaseDir.js +32 -0
- package/dist/esm/native/BrowserNativeApi.js +19 -0
- package/dist/esm/native/NativeApi.js +1 -0
- package/dist/esm/plugins/aidon/index.js +2 -0
- package/dist/esm/plugins/aidon/nodes/ChatAidonNode.js +215 -0
- package/dist/esm/plugins/aidon/plugin.js +9 -0
- package/dist/esm/plugins/anthropic/anthropic.js +187 -0
- package/dist/esm/plugins/anthropic/fetchEventSource.js +106 -0
- package/dist/esm/plugins/anthropic/index.js +2 -0
- package/dist/esm/plugins/anthropic/nodes/ChatAnthropicNode.js +652 -0
- package/dist/esm/plugins/anthropic/plugin.js +18 -0
- package/dist/esm/plugins/assemblyAi/LemurActionItemsNode.js +75 -0
- package/dist/esm/plugins/assemblyAi/LemurQaNode.js +155 -0
- package/dist/esm/plugins/assemblyAi/LemurSummaryNode.js +79 -0
- package/dist/esm/plugins/assemblyAi/LemurTaskNode.js +82 -0
- package/dist/esm/plugins/assemblyAi/TranscribeAudioNode.js +125 -0
- package/dist/esm/plugins/assemblyAi/index.js +2 -0
- package/dist/esm/plugins/assemblyAi/lemurHelpers.js +114 -0
- package/dist/esm/plugins/assemblyAi/plugin.js +32 -0
- package/dist/esm/plugins/autoevals/AutoEvalsNode.js +223 -0
- package/dist/esm/plugins/autoevals/index.js +2 -0
- package/dist/esm/plugins/autoevals/plugin.js +8 -0
- package/dist/esm/plugins/gentrace/index.js +2 -0
- package/dist/esm/plugins/gentrace/plugin.js +192 -0
- package/dist/esm/plugins/google/google.js +60 -0
- package/dist/esm/plugins/google/index.js +2 -0
- package/dist/esm/plugins/google/nodes/ChatGoogleNode.js +364 -0
- package/dist/esm/plugins/google/plugin.js +32 -0
- package/dist/esm/plugins/huggingface/index.js +2 -0
- package/dist/esm/plugins/huggingface/nodes/ChatHuggingFace.js +243 -0
- package/dist/esm/plugins/huggingface/nodes/TextToImageHuggingFace.js +189 -0
- package/dist/esm/plugins/huggingface/plugin.js +26 -0
- package/dist/esm/plugins/openai/handleOpenaiError.js +17 -0
- package/dist/esm/plugins/openai/index.js +2 -0
- package/dist/esm/plugins/openai/nodes/AttachAssistantFileNode.js +123 -0
- package/dist/esm/plugins/openai/nodes/CreateAssistantNode.js +289 -0
- package/dist/esm/plugins/openai/nodes/CreateThreadMessageNode.js +176 -0
- package/dist/esm/plugins/openai/nodes/CreateThreadNode.js +157 -0
- package/dist/esm/plugins/openai/nodes/DeleteAssistantNode.js +104 -0
- package/dist/esm/plugins/openai/nodes/DeleteThreadNode.js +97 -0
- package/dist/esm/plugins/openai/nodes/GetAssistantNode.js +118 -0
- package/dist/esm/plugins/openai/nodes/GetOpenAIFileNode.js +100 -0
- package/dist/esm/plugins/openai/nodes/GetThreadNode.js +108 -0
- package/dist/esm/plugins/openai/nodes/ListAssistantsNode.js +202 -0
- package/dist/esm/plugins/openai/nodes/ListOpenAIFilesNode.js +94 -0
- package/dist/esm/plugins/openai/nodes/ListThreadMessagesNode.js +224 -0
- package/dist/esm/plugins/openai/nodes/RunThreadNode.js +630 -0
- package/dist/esm/plugins/openai/nodes/ThreadMessageNode.js +145 -0
- package/dist/esm/plugins/openai/nodes/UploadFileNode.js +121 -0
- package/dist/esm/plugins/openai/plugin.js +44 -0
- package/dist/esm/plugins/pinecone/PineconeVectorDatabase.js +88 -0
- package/dist/esm/plugins/pinecone/index.js +2 -0
- package/dist/esm/plugins/pinecone/plugin.js +19 -0
- package/dist/esm/plugins.js +21 -0
- package/dist/esm/recording/ExecutionRecorder.js +177 -0
- package/dist/esm/recording/RecordedEvents.js +1 -0
- package/dist/esm/utils/assertNever.js +3 -0
- package/dist/esm/utils/base64.js +25 -0
- package/dist/esm/utils/chatMessageToOpenAIChatCompletionMessage.js +60 -0
- package/dist/esm/utils/coerceType.js +322 -0
- package/dist/esm/utils/compatibility.js +27 -0
- package/dist/esm/utils/copyToClipboard.js +10 -0
- package/dist/esm/utils/defaults.js +2 -0
- package/dist/esm/utils/errors.js +7 -0
- package/dist/esm/utils/expectType.js +34 -0
- package/dist/esm/utils/fetchEventSource.js +120 -0
- package/dist/esm/utils/genericUtilFunctions.js +25 -0
- package/dist/esm/utils/getPluginConfig.js +23 -0
- package/dist/esm/utils/handleEscapeCharacters.js +11 -0
- package/dist/esm/utils/index.js +14 -0
- package/dist/esm/utils/inputs.js +16 -0
- package/dist/esm/utils/interpolation.js +6 -0
- package/dist/esm/utils/misc.js +1 -0
- package/dist/esm/utils/newId.js +4 -0
- package/dist/esm/utils/openai.js +219 -0
- package/dist/esm/utils/outputs.js +14 -0
- package/dist/esm/utils/serialization/serialization.js +86 -0
- package/dist/esm/utils/serialization/serializationUtils.js +13 -0
- package/dist/esm/utils/serialization/serialization_v1.js +19 -0
- package/dist/esm/utils/serialization/serialization_v2.js +24 -0
- package/dist/esm/utils/serialization/serialization_v3.js +145 -0
- package/dist/esm/utils/serialization/serialization_v4.js +200 -0
- package/dist/esm/utils/symbols.js +2 -0
- package/dist/esm/utils/time.js +14 -0
- package/dist/esm/utils/typeSafety.js +42 -0
- package/dist/types/api/createProcessor.d.ts +37 -0
- package/dist/types/api/streaming.d.ts +56 -0
- package/dist/types/exports.d.ts +33 -0
- package/dist/types/index.d.ts +4 -0
- package/dist/types/integrations/AudioProvider.d.ts +4 -0
- package/dist/types/integrations/DatasetProvider.d.ts +33 -0
- package/dist/types/integrations/EmbeddingGenerator.d.ts +3 -0
- package/dist/types/integrations/GptTokenizerTokenizer.d.ts +20 -0
- package/dist/types/integrations/LLMProvider.d.ts +7 -0
- package/dist/types/integrations/Tokenizer.d.ts +11 -0
- package/dist/types/integrations/VectorDatabase.d.ts +7 -0
- package/dist/types/integrations/enableIntegrations.d.ts +1 -0
- package/dist/types/integrations/integrations.d.ts +12 -0
- package/dist/types/integrations/openai/OpenAIEmbeddingGenerator.d.ts +10 -0
- package/dist/types/model/DataValue.d.ts +138 -0
- package/dist/types/model/Dataset.d.ts +19 -0
- package/dist/types/model/EditorDefinition.d.ts +142 -0
- package/dist/types/model/GraphProcessor.d.ts +192 -0
- package/dist/types/model/NodeBase.d.ts +110 -0
- package/dist/types/model/NodeBodySpec.d.ts +19 -0
- package/dist/types/model/NodeDefinition.d.ts +13 -0
- package/dist/types/model/NodeGraph.d.ts +15 -0
- package/dist/types/model/NodeImpl.d.ts +55 -0
- package/dist/types/model/NodeRegistration.d.ts +24 -0
- package/dist/types/model/Nodes.d.ts +84 -0
- package/dist/types/model/PluginLoadSpec.d.ts +17 -0
- package/dist/types/model/ProcessContext.d.ts +69 -0
- package/dist/types/model/Project.d.ts +17 -0
- package/dist/types/model/RivetPlugin.d.ts +45 -0
- package/dist/types/model/RivetUIContext.d.ts +18 -0
- package/dist/types/model/Settings.d.ts +15 -0
- package/dist/types/model/nodes/AbortGraphNode.d.ts +22 -0
- package/dist/types/model/nodes/AppendToDatasetNode.d.ts +21 -0
- package/dist/types/model/nodes/ArrayNode.d.ts +20 -0
- package/dist/types/model/nodes/AssembleMessageNode.d.ts +23 -0
- package/dist/types/model/nodes/AssemblePromptNode.d.ts +20 -0
- package/dist/types/model/nodes/AudioNode.d.ts +20 -0
- package/dist/types/model/nodes/BooleanNode.d.ts +19 -0
- package/dist/types/model/nodes/CallGraphNode.d.ts +16 -0
- package/dist/types/model/nodes/ChatNode.d.ts +77 -0
- package/dist/types/model/nodes/ChunkNode.d.ts +22 -0
- package/dist/types/model/nodes/CoalesceNode.d.ts +14 -0
- package/dist/types/model/nodes/CodeNode.d.ts +21 -0
- package/dist/types/model/nodes/CommentNode.d.ts +20 -0
- package/dist/types/model/nodes/CompareNode.d.ts +19 -0
- package/dist/types/model/nodes/ContextNode.d.ts +24 -0
- package/dist/types/model/nodes/CreateDatasetNode.d.ts +13 -0
- package/dist/types/model/nodes/DatasetNearestNeigborsNode.d.ts +19 -0
- package/dist/types/model/nodes/DelayNode.d.ts +20 -0
- package/dist/types/model/nodes/DelegateFunctionCallNode.d.ts +25 -0
- package/dist/types/model/nodes/DestructureNode.d.ts +18 -0
- package/dist/types/model/nodes/EvaluateNode.d.ts +19 -0
- package/dist/types/model/nodes/ExternalCallNode.d.ts +22 -0
- package/dist/types/model/nodes/ExtractJsonNode.d.ts +13 -0
- package/dist/types/model/nodes/ExtractMarkdownCodeBlocksNode.d.ts +12 -0
- package/dist/types/model/nodes/ExtractObjectPathNode.d.ts +19 -0
- package/dist/types/model/nodes/ExtractRegexNode.d.ts +22 -0
- package/dist/types/model/nodes/ExtractYamlNode.d.ts +21 -0
- package/dist/types/model/nodes/FilterNode.d.ts +13 -0
- package/dist/types/model/nodes/GetAllDatasetsNode.d.ts +14 -0
- package/dist/types/model/nodes/GetDatasetRowNode.d.ts +19 -0
- package/dist/types/model/nodes/GetEmbeddingNode.d.ts +24 -0
- package/dist/types/model/nodes/GetGlobalNode.d.ts +29 -0
- package/dist/types/model/nodes/GptFunctionNode.d.ts +25 -0
- package/dist/types/model/nodes/GraphInputNode.d.ts +24 -0
- package/dist/types/model/nodes/GraphOutputNode.d.ts +22 -0
- package/dist/types/model/nodes/GraphReferenceNode.d.ts +22 -0
- package/dist/types/model/nodes/HashNode.d.ts +17 -0
- package/dist/types/model/nodes/HttpCallNode.d.ts +27 -0
- package/dist/types/model/nodes/IfElseNode.d.ts +18 -0
- package/dist/types/model/nodes/IfNode.d.ts +17 -0
- package/dist/types/model/nodes/ImageNode.d.ts +20 -0
- package/dist/types/model/nodes/JoinNode.d.ts +21 -0
- package/dist/types/model/nodes/ListGraphsNode.d.ts +14 -0
- package/dist/types/model/nodes/LoadDatasetNode.d.ts +17 -0
- package/dist/types/model/nodes/LoopControllerNode.d.ts +20 -0
- package/dist/types/model/nodes/MatchNode.d.ts +19 -0
- package/dist/types/model/nodes/NumberNode.d.ts +21 -0
- package/dist/types/model/nodes/ObjectNode.d.ts +18 -0
- package/dist/types/model/nodes/PassthroughNode.d.ts +14 -0
- package/dist/types/model/nodes/PlayAudioNode.d.ts +15 -0
- package/dist/types/model/nodes/PopNode.d.ts +19 -0
- package/dist/types/model/nodes/PromptNode.d.ts +23 -0
- package/dist/types/model/nodes/RaceInputsNode.d.ts +17 -0
- package/dist/types/model/nodes/RaiseEventNode.d.ts +22 -0
- package/dist/types/model/nodes/RandomNumberNode.d.ts +23 -0
- package/dist/types/model/nodes/ReadDirectoryNode.d.ts +30 -0
- package/dist/types/model/nodes/ReadFileNode.d.ts +23 -0
- package/dist/types/model/nodes/ReplaceDatasetNode.d.ts +21 -0
- package/dist/types/model/nodes/SetGlobalNode.d.ts +23 -0
- package/dist/types/model/nodes/ShuffleNode.d.ts +12 -0
- package/dist/types/model/nodes/SliceNode.d.ts +22 -0
- package/dist/types/model/nodes/SplitNode.d.ts +19 -0
- package/dist/types/model/nodes/SubGraphNode.d.ts +29 -0
- package/dist/types/model/nodes/TextNode.d.ts +18 -0
- package/dist/types/model/nodes/ToJsonNode.d.ts +18 -0
- package/dist/types/model/nodes/ToYamlNode.d.ts +13 -0
- package/dist/types/model/nodes/TrimChatMessagesNode.d.ts +20 -0
- package/dist/types/model/nodes/URLReferenceNode.d.ts +19 -0
- package/dist/types/model/nodes/UserInputNode.d.ts +21 -0
- package/dist/types/model/nodes/VectorNearestNeighborsNode.d.ts +24 -0
- package/dist/types/model/nodes/VectorStoreNode.d.ts +22 -0
- package/dist/types/model/nodes/WaitForEventNode.d.ts +21 -0
- package/dist/types/native/BaseDir.d.ts +29 -0
- package/dist/types/native/BrowserNativeApi.d.ts +11 -0
- package/dist/types/native/NativeApi.d.ts +17 -0
- package/dist/types/plugins/aidon/index.d.ts +2 -0
- package/dist/types/plugins/aidon/nodes/ChatAidonNode.d.ts +3 -0
- package/dist/types/plugins/aidon/plugin.d.ts +2 -0
- package/dist/types/plugins/anthropic/anthropic.d.ts +216 -0
- package/dist/types/plugins/anthropic/fetchEventSource.d.ts +11 -0
- package/dist/types/plugins/anthropic/index.d.ts +2 -0
- package/dist/types/plugins/anthropic/nodes/ChatAnthropicNode.d.ts +30 -0
- package/dist/types/plugins/anthropic/plugin.d.ts +2 -0
- package/dist/types/plugins/assemblyAi/LemurActionItemsNode.d.ts +6 -0
- package/dist/types/plugins/assemblyAi/LemurQaNode.d.ts +22 -0
- package/dist/types/plugins/assemblyAi/LemurSummaryNode.d.ts +8 -0
- package/dist/types/plugins/assemblyAi/LemurTaskNode.d.ts +8 -0
- package/dist/types/plugins/assemblyAi/TranscribeAudioNode.d.ts +7 -0
- package/dist/types/plugins/assemblyAi/index.d.ts +2 -0
- package/dist/types/plugins/assemblyAi/lemurHelpers.d.ts +67 -0
- package/dist/types/plugins/assemblyAi/plugin.d.ts +2 -0
- package/dist/types/plugins/autoevals/AutoEvalsNode.d.ts +8 -0
- package/dist/types/plugins/autoevals/index.d.ts +2 -0
- package/dist/types/plugins/autoevals/plugin.d.ts +2 -0
- package/dist/types/plugins/gentrace/index.d.ts +2 -0
- package/dist/types/plugins/gentrace/plugin.d.ts +5 -0
- package/dist/types/plugins/google/google.d.ts +60 -0
- package/dist/types/plugins/google/index.d.ts +2 -0
- package/dist/types/plugins/google/nodes/ChatGoogleNode.d.ts +27 -0
- package/dist/types/plugins/google/plugin.d.ts +2 -0
- package/dist/types/plugins/huggingface/index.d.ts +2 -0
- package/dist/types/plugins/huggingface/nodes/ChatHuggingFace.d.ts +24 -0
- package/dist/types/plugins/huggingface/nodes/TextToImageHuggingFace.d.ts +20 -0
- package/dist/types/plugins/huggingface/plugin.d.ts +2 -0
- package/dist/types/plugins/openai/handleOpenaiError.d.ts +1 -0
- package/dist/types/plugins/openai/index.d.ts +2 -0
- package/dist/types/plugins/openai/nodes/AttachAssistantFileNode.d.ts +10 -0
- package/dist/types/plugins/openai/nodes/CreateAssistantNode.d.ts +26 -0
- package/dist/types/plugins/openai/nodes/CreateThreadMessageNode.d.ts +15 -0
- package/dist/types/plugins/openai/nodes/CreateThreadNode.d.ts +13 -0
- package/dist/types/plugins/openai/nodes/DeleteAssistantNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/DeleteThreadNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/GetAssistantNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/GetOpenAIFileNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/GetThreadNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/ListAssistantsNode.d.ts +14 -0
- package/dist/types/plugins/openai/nodes/ListOpenAIFilesNode.d.ts +8 -0
- package/dist/types/plugins/openai/nodes/ListThreadMessagesNode.d.ts +16 -0
- package/dist/types/plugins/openai/nodes/RunThreadNode.d.ts +28 -0
- package/dist/types/plugins/openai/nodes/ThreadMessageNode.d.ts +14 -0
- package/dist/types/plugins/openai/nodes/UploadFileNode.d.ts +7 -0
- package/dist/types/plugins/openai/plugin.d.ts +2 -0
- package/dist/types/plugins/pinecone/PineconeVectorDatabase.d.ts +9 -0
- package/dist/types/plugins/pinecone/index.d.ts +2 -0
- package/dist/types/plugins/pinecone/plugin.d.ts +2 -0
- package/dist/types/plugins.d.ts +20 -0
- package/dist/types/recording/ExecutionRecorder.d.ts +25 -0
- package/dist/types/recording/RecordedEvents.d.ts +100 -0
- package/dist/types/utils/assertNever.d.ts +1 -0
- package/dist/types/utils/base64.d.ts +2 -0
- package/dist/types/utils/chatMessageToOpenAIChatCompletionMessage.d.ts +3 -0
- package/dist/types/utils/coerceType.d.ts +6 -0
- package/dist/types/utils/compatibility.d.ts +3 -0
- package/dist/types/utils/copyToClipboard.d.ts +1 -0
- package/dist/types/utils/defaults.d.ts +2 -0
- package/dist/types/utils/errors.d.ts +2 -0
- package/dist/types/utils/expectType.d.ts +3 -0
- package/dist/types/utils/fetchEventSource.d.ts +12 -0
- package/dist/types/utils/genericUtilFunctions.d.ts +21 -0
- package/dist/types/utils/getPluginConfig.d.ts +2 -0
- package/dist/types/utils/handleEscapeCharacters.d.ts +2 -0
- package/dist/types/utils/index.d.ts +14 -0
- package/dist/types/utils/inputs.d.ts +3 -0
- package/dist/types/utils/interpolation.d.ts +1 -0
- package/dist/types/utils/misc.d.ts +1 -0
- package/dist/types/utils/newId.d.ts +1 -0
- package/dist/types/utils/openai.d.ts +739 -0
- package/dist/types/utils/outputs.d.ts +3 -0
- package/dist/types/utils/serialization/serialization.d.ts +12 -0
- package/dist/types/utils/serialization/serializationUtils.d.ts +6 -0
- package/dist/types/utils/serialization/serialization_v1.d.ts +3 -0
- package/dist/types/utils/serialization/serialization_v2.d.ts +3 -0
- package/dist/types/utils/serialization/serialization_v3.d.ts +19 -0
- package/dist/types/utils/serialization/serialization_v4.d.ts +9 -0
- package/dist/types/utils/symbols.d.ts +3 -0
- package/dist/types/utils/time.d.ts +22 -0
- package/dist/types/utils/typeSafety.d.ts +37 -0
- package/package.json +97 -0
|
@@ -0,0 +1,964 @@
|
|
|
1
|
+
import {} from '../NodeBase.js';
|
|
2
|
+
import { nanoid } from 'nanoid/non-secure';
|
|
3
|
+
import { NodeImpl } from '../NodeImpl.js';
|
|
4
|
+
import { getScalarTypeOf, isArrayDataValue } from '../DataValue.js';
|
|
5
|
+
import { addWarning } from '../../utils/outputs.js';
|
|
6
|
+
import { OpenAIError, openAiModelOptions, openaiModels, streamChatCompletions, } from '../../utils/openai.js';
|
|
7
|
+
import retry from 'p-retry';
|
|
8
|
+
import { match } from 'ts-pattern';
|
|
9
|
+
import { coerceType, coerceTypeOptional } from '../../utils/coerceType.js';
|
|
10
|
+
import {} from '../ProcessContext.js';
|
|
11
|
+
import {} from '../../index.js';
|
|
12
|
+
import { dedent } from 'ts-dedent';
|
|
13
|
+
import { getInputOrData, cleanHeaders } from '../../utils/inputs.js';
|
|
14
|
+
import { getError } from '../../utils/errors.js';
|
|
15
|
+
import { nodeDefinition } from '../NodeDefinition.js';
|
|
16
|
+
import { DEFAULT_CHAT_ENDPOINT } from '../../utils/defaults.js';
|
|
17
|
+
import { chatMessageToOpenAIChatCompletionMessage } from '../../utils/chatMessageToOpenAIChatCompletionMessage.js';
|
|
18
|
+
// Temporary
|
|
19
|
+
const cache = new Map();
|
|
20
|
+
export class ChatNodeImpl extends NodeImpl {
|
|
21
|
+
static create() {
|
|
22
|
+
const chartNode = {
|
|
23
|
+
type: 'chat',
|
|
24
|
+
title: 'Chat',
|
|
25
|
+
id: nanoid(),
|
|
26
|
+
visualData: {
|
|
27
|
+
x: 0,
|
|
28
|
+
y: 0,
|
|
29
|
+
width: 200,
|
|
30
|
+
},
|
|
31
|
+
data: {
|
|
32
|
+
model: 'gpt-4o-mini',
|
|
33
|
+
useModelInput: false,
|
|
34
|
+
temperature: 0.5,
|
|
35
|
+
useTemperatureInput: false,
|
|
36
|
+
top_p: 1,
|
|
37
|
+
useTopPInput: false,
|
|
38
|
+
useTopP: false,
|
|
39
|
+
useUseTopPInput: false,
|
|
40
|
+
maxTokens: 1024,
|
|
41
|
+
useMaxTokensInput: false,
|
|
42
|
+
useStop: false,
|
|
43
|
+
stop: '',
|
|
44
|
+
useStopInput: false,
|
|
45
|
+
presencePenalty: undefined,
|
|
46
|
+
usePresencePenaltyInput: false,
|
|
47
|
+
frequencyPenalty: undefined,
|
|
48
|
+
useFrequencyPenaltyInput: false,
|
|
49
|
+
user: undefined,
|
|
50
|
+
useUserInput: false,
|
|
51
|
+
enableFunctionUse: false,
|
|
52
|
+
cache: false,
|
|
53
|
+
useAsGraphPartialOutput: true,
|
|
54
|
+
parallelFunctionCalling: true,
|
|
55
|
+
additionalParameters: [],
|
|
56
|
+
useAdditionalParametersInput: false,
|
|
57
|
+
},
|
|
58
|
+
};
|
|
59
|
+
return chartNode;
|
|
60
|
+
}
|
|
61
|
+
getInputDefinitions() {
|
|
62
|
+
const inputs = [];
|
|
63
|
+
if (this.data.useEndpointInput) {
|
|
64
|
+
inputs.push({
|
|
65
|
+
dataType: 'string',
|
|
66
|
+
id: 'endpoint',
|
|
67
|
+
title: 'Endpoint',
|
|
68
|
+
description: 'The endpoint to use for the OpenAI API. You can use this to replace with any OpenAI-compatible API. Leave blank for the default: https://api.openai.com/api/v1/chat/completions',
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
inputs.push({
|
|
72
|
+
id: 'systemPrompt',
|
|
73
|
+
title: 'System Prompt',
|
|
74
|
+
dataType: 'string',
|
|
75
|
+
required: false,
|
|
76
|
+
description: 'The system prompt to send to the model.',
|
|
77
|
+
coerced: true,
|
|
78
|
+
});
|
|
79
|
+
if (this.data.useModelInput) {
|
|
80
|
+
inputs.push({
|
|
81
|
+
id: 'model',
|
|
82
|
+
title: 'Model',
|
|
83
|
+
dataType: 'string',
|
|
84
|
+
required: false,
|
|
85
|
+
description: 'The model to use for the chat.',
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
if (this.data.useTemperatureInput) {
|
|
89
|
+
inputs.push({
|
|
90
|
+
dataType: 'number',
|
|
91
|
+
id: 'temperature',
|
|
92
|
+
title: 'Temperature',
|
|
93
|
+
description: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.',
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
if (this.data.useTopPInput) {
|
|
97
|
+
inputs.push({
|
|
98
|
+
dataType: 'number',
|
|
99
|
+
id: 'top_p',
|
|
100
|
+
title: 'Top P',
|
|
101
|
+
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
if (this.data.useUseTopPInput) {
|
|
105
|
+
inputs.push({
|
|
106
|
+
dataType: 'boolean',
|
|
107
|
+
id: 'useTopP',
|
|
108
|
+
title: 'Use Top P',
|
|
109
|
+
description: 'Whether to use top p sampling, or temperature sampling.',
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
if (this.data.useMaxTokensInput) {
|
|
113
|
+
inputs.push({
|
|
114
|
+
dataType: 'number',
|
|
115
|
+
id: 'maxTokens',
|
|
116
|
+
title: 'Max Tokens',
|
|
117
|
+
description: 'The maximum number of tokens to generate in the chat completion.',
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
if (this.data.useStopInput) {
|
|
121
|
+
inputs.push({
|
|
122
|
+
dataType: 'string',
|
|
123
|
+
id: 'stop',
|
|
124
|
+
title: 'Stop',
|
|
125
|
+
description: 'A sequence where the API will stop generating further tokens.',
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
if (this.data.usePresencePenaltyInput) {
|
|
129
|
+
inputs.push({
|
|
130
|
+
dataType: 'number',
|
|
131
|
+
id: 'presencePenalty',
|
|
132
|
+
title: 'Presence Penalty',
|
|
133
|
+
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`,
|
|
134
|
+
});
|
|
135
|
+
}
|
|
136
|
+
if (this.data.useFrequencyPenaltyInput) {
|
|
137
|
+
inputs.push({
|
|
138
|
+
dataType: 'number',
|
|
139
|
+
id: 'frequencyPenalty',
|
|
140
|
+
title: 'Frequency Penalty',
|
|
141
|
+
description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`,
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
if (this.data.useUserInput) {
|
|
145
|
+
inputs.push({
|
|
146
|
+
dataType: 'string',
|
|
147
|
+
id: 'user',
|
|
148
|
+
title: 'User',
|
|
149
|
+
description: 'A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.',
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
if (this.data.useNumberOfChoicesInput) {
|
|
153
|
+
inputs.push({
|
|
154
|
+
dataType: 'number',
|
|
155
|
+
id: 'numberOfChoices',
|
|
156
|
+
title: 'Number of Choices',
|
|
157
|
+
description: 'If greater than 1, the model will return multiple choices and the response will be an array.',
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
if (this.data.useHeadersInput) {
|
|
161
|
+
inputs.push({
|
|
162
|
+
dataType: 'object',
|
|
163
|
+
id: 'headers',
|
|
164
|
+
title: 'Headers',
|
|
165
|
+
description: 'Additional headers to send to the API.',
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
inputs.push({
|
|
169
|
+
dataType: ['chat-message', 'chat-message[]'],
|
|
170
|
+
id: 'prompt',
|
|
171
|
+
title: 'Prompt',
|
|
172
|
+
description: 'The prompt message or messages to send to the model.',
|
|
173
|
+
coerced: true,
|
|
174
|
+
});
|
|
175
|
+
if (this.data.enableFunctionUse) {
|
|
176
|
+
inputs.push({
|
|
177
|
+
dataType: ['gpt-function', 'gpt-function[]'],
|
|
178
|
+
id: 'functions',
|
|
179
|
+
title: 'Functions',
|
|
180
|
+
description: 'Functions to use in the model. To connect multiple functions, use an Array node.',
|
|
181
|
+
coerced: false,
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
if (this.data.useSeedInput) {
|
|
185
|
+
inputs.push({
|
|
186
|
+
dataType: 'number',
|
|
187
|
+
id: 'seed',
|
|
188
|
+
title: 'Seed',
|
|
189
|
+
coerced: true,
|
|
190
|
+
description: 'If specified, OpenAI will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.',
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
if (this.data.useToolChoiceInput) {
|
|
194
|
+
inputs.push({
|
|
195
|
+
dataType: 'string',
|
|
196
|
+
id: 'toolChoice',
|
|
197
|
+
title: 'Tool Choice',
|
|
198
|
+
coerced: true,
|
|
199
|
+
description: 'Controls which (if any) function is called by the model. `none` is the default when no functions are present. `auto` is the default if functions are present. `function` forces the model to call a function.',
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
if (this.data.useToolChoiceInput || this.data.useToolChoiceFunctionInput) {
|
|
203
|
+
inputs.push({
|
|
204
|
+
dataType: 'string',
|
|
205
|
+
id: 'toolChoiceFunction',
|
|
206
|
+
title: 'Tool Choice Function',
|
|
207
|
+
coerced: true,
|
|
208
|
+
description: 'The name of the function to force the model to call.',
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
if (this.data.useResponseFormatInput) {
|
|
212
|
+
inputs.push({
|
|
213
|
+
dataType: 'string',
|
|
214
|
+
id: 'responseFormat',
|
|
215
|
+
title: 'Response Format',
|
|
216
|
+
coerced: true,
|
|
217
|
+
description: 'The format to force the model to reply in.',
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
if (this.data.useAdditionalParametersInput) {
|
|
221
|
+
inputs.push({
|
|
222
|
+
dataType: 'object',
|
|
223
|
+
id: 'additionalParameters',
|
|
224
|
+
title: 'Additional Parameters',
|
|
225
|
+
description: 'Additional chat completion parameters to send to the API.',
|
|
226
|
+
});
|
|
227
|
+
}
|
|
228
|
+
if (this.data.responseFormat === 'json_schema') {
|
|
229
|
+
inputs.push({
|
|
230
|
+
dataType: 'object',
|
|
231
|
+
id: 'responseSchema',
|
|
232
|
+
title: 'Response Schema',
|
|
233
|
+
description: 'The JSON schema that the response will adhere to (Structured Outputs).',
|
|
234
|
+
required: true,
|
|
235
|
+
});
|
|
236
|
+
if (this.data.useResponseSchemaNameInput) {
|
|
237
|
+
inputs.push({
|
|
238
|
+
dataType: 'string',
|
|
239
|
+
id: 'responseSchemaName',
|
|
240
|
+
title: 'Response Schema Name',
|
|
241
|
+
description: 'The name of the JSON schema that the response will adhere to (Structured Outputs).',
|
|
242
|
+
required: false,
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
return inputs;
|
|
247
|
+
}
|
|
248
|
+
getOutputDefinitions() {
|
|
249
|
+
const outputs = [];
|
|
250
|
+
if (this.data.useNumberOfChoicesInput || (this.data.numberOfChoices ?? 1) > 1) {
|
|
251
|
+
outputs.push({
|
|
252
|
+
dataType: 'string[]',
|
|
253
|
+
id: 'response',
|
|
254
|
+
title: 'Responses',
|
|
255
|
+
description: 'All responses from the model.',
|
|
256
|
+
});
|
|
257
|
+
}
|
|
258
|
+
else {
|
|
259
|
+
outputs.push({
|
|
260
|
+
dataType: 'string',
|
|
261
|
+
id: 'response',
|
|
262
|
+
title: 'Response',
|
|
263
|
+
description: 'The textual response from the model.',
|
|
264
|
+
});
|
|
265
|
+
}
|
|
266
|
+
if (this.data.enableFunctionUse) {
|
|
267
|
+
if (this.data.parallelFunctionCalling) {
|
|
268
|
+
outputs.push({
|
|
269
|
+
dataType: 'object[]',
|
|
270
|
+
id: 'function-calls',
|
|
271
|
+
title: 'Function Calls',
|
|
272
|
+
description: 'The function calls that were made, if any.',
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
else {
|
|
276
|
+
outputs.push({
|
|
277
|
+
dataType: 'object',
|
|
278
|
+
id: 'function-call',
|
|
279
|
+
title: 'Function Call',
|
|
280
|
+
description: 'The function call that was made, if any.',
|
|
281
|
+
});
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
outputs.push({
|
|
285
|
+
dataType: 'chat-message[]',
|
|
286
|
+
id: 'in-messages',
|
|
287
|
+
title: 'Messages Sent',
|
|
288
|
+
description: 'All messages sent to the model.',
|
|
289
|
+
});
|
|
290
|
+
if (!(this.data.useNumberOfChoicesInput || (this.data.numberOfChoices ?? 1) > 1)) {
|
|
291
|
+
outputs.push({
|
|
292
|
+
dataType: 'chat-message[]',
|
|
293
|
+
id: 'all-messages',
|
|
294
|
+
title: 'All Messages',
|
|
295
|
+
description: 'All messages, with the response appended.',
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
outputs.push({
|
|
299
|
+
dataType: 'number',
|
|
300
|
+
id: 'responseTokens',
|
|
301
|
+
title: 'Response Tokens',
|
|
302
|
+
description: 'The number of tokens in the response from the LLM. For a multi-response, this is the sum.',
|
|
303
|
+
});
|
|
304
|
+
return outputs;
|
|
305
|
+
}
|
|
306
|
+
static getUIData() {
|
|
307
|
+
return {
|
|
308
|
+
infoBoxBody: dedent `
|
|
309
|
+
Makes a call to an LLM chat model. Supports GPT and any OpenAI-compatible API. The settings contains many options for tweaking the model's behavior.
|
|
310
|
+
|
|
311
|
+
The \`System Prompt\` input specifies a system prompt as the first message to the model. This is useful for providing context to the model.
|
|
312
|
+
|
|
313
|
+
The \`Prompt\` input takes one or more strings or chat-messages (from a Prompt node) to send to the model.
|
|
314
|
+
`,
|
|
315
|
+
contextMenuTitle: 'Chat',
|
|
316
|
+
infoBoxTitle: 'Chat Node',
|
|
317
|
+
group: ['Common', 'AI'],
|
|
318
|
+
};
|
|
319
|
+
}
|
|
320
|
+
getEditors() {
|
|
321
|
+
return [
|
|
322
|
+
{
|
|
323
|
+
type: 'dropdown',
|
|
324
|
+
label: 'GPT Model',
|
|
325
|
+
dataKey: 'model',
|
|
326
|
+
useInputToggleDataKey: 'useModelInput',
|
|
327
|
+
options: openAiModelOptions,
|
|
328
|
+
disableIf: (data) => {
|
|
329
|
+
return !!data.overrideModel?.trim();
|
|
330
|
+
},
|
|
331
|
+
helperMessage: (data) => {
|
|
332
|
+
if (data.overrideModel?.trim()) {
|
|
333
|
+
return `Model overridden to: ${data.overrideModel}`;
|
|
334
|
+
}
|
|
335
|
+
if (data.model === 'local-model') {
|
|
336
|
+
return 'Local model is an indicator for your own convenience, it does not affect the local LLM used.';
|
|
337
|
+
}
|
|
338
|
+
},
|
|
339
|
+
},
|
|
340
|
+
{
|
|
341
|
+
type: 'group',
|
|
342
|
+
label: 'Parameters',
|
|
343
|
+
editors: [
|
|
344
|
+
{
|
|
345
|
+
type: 'number',
|
|
346
|
+
label: 'Temperature',
|
|
347
|
+
dataKey: 'temperature',
|
|
348
|
+
useInputToggleDataKey: 'useTemperatureInput',
|
|
349
|
+
min: 0,
|
|
350
|
+
max: 2,
|
|
351
|
+
step: 0.1,
|
|
352
|
+
helperMessage: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.',
|
|
353
|
+
},
|
|
354
|
+
{
|
|
355
|
+
type: 'number',
|
|
356
|
+
label: 'Top P',
|
|
357
|
+
dataKey: 'top_p',
|
|
358
|
+
useInputToggleDataKey: 'useTopPInput',
|
|
359
|
+
min: 0,
|
|
360
|
+
max: 1,
|
|
361
|
+
step: 0.1,
|
|
362
|
+
helperMessage: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.',
|
|
363
|
+
},
|
|
364
|
+
{
|
|
365
|
+
type: 'toggle',
|
|
366
|
+
label: 'Use Top P',
|
|
367
|
+
dataKey: 'useTopP',
|
|
368
|
+
useInputToggleDataKey: 'useUseTopPInput',
|
|
369
|
+
helperMessage: 'Whether to use top p sampling, or temperature sampling.',
|
|
370
|
+
},
|
|
371
|
+
{
|
|
372
|
+
type: 'number',
|
|
373
|
+
label: 'Max Tokens',
|
|
374
|
+
dataKey: 'maxTokens',
|
|
375
|
+
useInputToggleDataKey: 'useMaxTokensInput',
|
|
376
|
+
min: 0,
|
|
377
|
+
max: Number.MAX_SAFE_INTEGER,
|
|
378
|
+
step: 1,
|
|
379
|
+
helperMessage: 'The maximum number of tokens to generate in the chat completion.',
|
|
380
|
+
},
|
|
381
|
+
{
|
|
382
|
+
type: 'string',
|
|
383
|
+
label: 'Stop',
|
|
384
|
+
dataKey: 'stop',
|
|
385
|
+
useInputToggleDataKey: 'useStopInput',
|
|
386
|
+
helperMessage: 'A sequence where the API will stop generating further tokens.',
|
|
387
|
+
},
|
|
388
|
+
{
|
|
389
|
+
type: 'number',
|
|
390
|
+
label: 'Presence Penalty',
|
|
391
|
+
dataKey: 'presencePenalty',
|
|
392
|
+
useInputToggleDataKey: 'usePresencePenaltyInput',
|
|
393
|
+
min: 0,
|
|
394
|
+
max: 2,
|
|
395
|
+
step: 0.1,
|
|
396
|
+
allowEmpty: true,
|
|
397
|
+
helperMessage: `Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.`,
|
|
398
|
+
},
|
|
399
|
+
{
|
|
400
|
+
type: 'number',
|
|
401
|
+
label: 'Frequency Penalty',
|
|
402
|
+
dataKey: 'frequencyPenalty',
|
|
403
|
+
useInputToggleDataKey: 'useFrequencyPenaltyInput',
|
|
404
|
+
min: 0,
|
|
405
|
+
max: 2,
|
|
406
|
+
step: 0.1,
|
|
407
|
+
allowEmpty: true,
|
|
408
|
+
helperMessage: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`,
|
|
409
|
+
},
|
|
410
|
+
{
|
|
411
|
+
type: 'dropdown',
|
|
412
|
+
label: 'Response Format',
|
|
413
|
+
dataKey: 'responseFormat',
|
|
414
|
+
useInputToggleDataKey: 'useResponseFormatInput',
|
|
415
|
+
options: [
|
|
416
|
+
{ value: '', label: 'Default' },
|
|
417
|
+
{ value: 'text', label: 'Text' },
|
|
418
|
+
{ value: 'json', label: 'JSON Object' },
|
|
419
|
+
{ value: 'json_schema', label: 'JSON Schema' },
|
|
420
|
+
],
|
|
421
|
+
defaultValue: '',
|
|
422
|
+
helperMessage: 'The format to force the model to reply in.',
|
|
423
|
+
},
|
|
424
|
+
{
|
|
425
|
+
type: 'string',
|
|
426
|
+
label: 'Response Schema Name',
|
|
427
|
+
dataKey: 'responseSchemaName',
|
|
428
|
+
useInputToggleDataKey: 'useResponseSchemaNameInput',
|
|
429
|
+
helperMessage: 'The name of the JSON schema that the response will adhere to (Structured Outputs). Defaults to response_schema',
|
|
430
|
+
hideIf: (data) => data.responseFormat !== 'json_schema',
|
|
431
|
+
},
|
|
432
|
+
{
|
|
433
|
+
type: 'number',
|
|
434
|
+
label: 'Seed',
|
|
435
|
+
dataKey: 'seed',
|
|
436
|
+
useInputToggleDataKey: 'useSeedInput',
|
|
437
|
+
step: 1,
|
|
438
|
+
allowEmpty: true,
|
|
439
|
+
helperMessage: 'If specified, OpenAI will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.',
|
|
440
|
+
},
|
|
441
|
+
],
|
|
442
|
+
},
|
|
443
|
+
{
|
|
444
|
+
type: 'group',
|
|
445
|
+
label: 'GPT Tools',
|
|
446
|
+
editors: [
|
|
447
|
+
{
|
|
448
|
+
type: 'toggle',
|
|
449
|
+
label: 'Enable Function Use',
|
|
450
|
+
dataKey: 'enableFunctionUse',
|
|
451
|
+
},
|
|
452
|
+
{
|
|
453
|
+
type: 'toggle',
|
|
454
|
+
label: 'Enable Parallel Function Calling',
|
|
455
|
+
dataKey: 'parallelFunctionCalling',
|
|
456
|
+
hideIf: (data) => !data.enableFunctionUse,
|
|
457
|
+
},
|
|
458
|
+
{
|
|
459
|
+
type: 'dropdown',
|
|
460
|
+
label: 'Tool Choice',
|
|
461
|
+
dataKey: 'toolChoice',
|
|
462
|
+
useInputToggleDataKey: 'useToolChoiceInput',
|
|
463
|
+
options: [
|
|
464
|
+
{ value: '', label: 'Default' },
|
|
465
|
+
{ value: 'none', label: 'None' },
|
|
466
|
+
{ value: 'auto', label: 'Auto' },
|
|
467
|
+
{ value: 'function', label: 'Function' },
|
|
468
|
+
{ value: 'required', label: 'Required' },
|
|
469
|
+
],
|
|
470
|
+
defaultValue: '',
|
|
471
|
+
helperMessage: 'Controls which (if any) function is called by the model. None is the default when no functions are present. Auto is the default if functions are present.',
|
|
472
|
+
hideIf: (data) => !data.enableFunctionUse,
|
|
473
|
+
},
|
|
474
|
+
{
|
|
475
|
+
type: 'string',
|
|
476
|
+
label: 'Tool Choice Function',
|
|
477
|
+
dataKey: 'toolChoiceFunction',
|
|
478
|
+
useInputToggleDataKey: 'useToolChoiceFunctionInput',
|
|
479
|
+
helperMessage: 'The name of the function to force the model to call.',
|
|
480
|
+
hideIf: (data) => data.toolChoice !== 'function' || !data.enableFunctionUse,
|
|
481
|
+
},
|
|
482
|
+
],
|
|
483
|
+
},
|
|
484
|
+
{
|
|
485
|
+
type: 'group',
|
|
486
|
+
label: 'Advanced',
|
|
487
|
+
editors: [
|
|
488
|
+
{
|
|
489
|
+
type: 'string',
|
|
490
|
+
label: 'User',
|
|
491
|
+
dataKey: 'user',
|
|
492
|
+
useInputToggleDataKey: 'useUserInput',
|
|
493
|
+
helperMessage: 'A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.',
|
|
494
|
+
},
|
|
495
|
+
{
|
|
496
|
+
type: 'number',
|
|
497
|
+
label: 'Number of Choices',
|
|
498
|
+
dataKey: 'numberOfChoices',
|
|
499
|
+
useInputToggleDataKey: 'useNumberOfChoicesInput',
|
|
500
|
+
min: 1,
|
|
501
|
+
max: 10,
|
|
502
|
+
step: 1,
|
|
503
|
+
defaultValue: 1,
|
|
504
|
+
helperMessage: 'If greater than 1, the model will return multiple choices and the response will be an array.',
|
|
505
|
+
},
|
|
506
|
+
{
|
|
507
|
+
type: 'string',
|
|
508
|
+
label: 'Endpoint',
|
|
509
|
+
dataKey: 'endpoint',
|
|
510
|
+
useInputToggleDataKey: 'useEndpointInput',
|
|
511
|
+
helperMessage: 'The endpoint to use for the OpenAI API. You can use this to replace with any OpenAI-compatible API. Leave blank for the default: https://api.openai.com/api/v1/chat/completions',
|
|
512
|
+
},
|
|
513
|
+
{
|
|
514
|
+
type: 'string',
|
|
515
|
+
label: 'Custom Model',
|
|
516
|
+
dataKey: 'overrideModel',
|
|
517
|
+
helperMessage: 'Overrides the model selected above with a custom string for the model.',
|
|
518
|
+
},
|
|
519
|
+
{
|
|
520
|
+
type: 'number',
|
|
521
|
+
label: 'Custom Max Tokens',
|
|
522
|
+
dataKey: 'overrideMaxTokens',
|
|
523
|
+
allowEmpty: true,
|
|
524
|
+
helperMessage: 'Overrides the max number of tokens a model can support. Leave blank for preconfigured token limits.',
|
|
525
|
+
},
|
|
526
|
+
{
|
|
527
|
+
type: 'keyValuePair',
|
|
528
|
+
label: 'Headers',
|
|
529
|
+
dataKey: 'headers',
|
|
530
|
+
useInputToggleDataKey: 'useHeadersInput',
|
|
531
|
+
keyPlaceholder: 'Header',
|
|
532
|
+
helperMessage: 'Additional headers to send to the API.',
|
|
533
|
+
},
|
|
534
|
+
{
|
|
535
|
+
type: 'toggle',
|
|
536
|
+
label: 'Cache In Rivet',
|
|
537
|
+
dataKey: 'cache',
|
|
538
|
+
helperMessage: 'If on, requests with the same parameters and messages will be cached in Rivet, for immediate responses without an API call.',
|
|
539
|
+
},
|
|
540
|
+
{
|
|
541
|
+
type: 'toggle',
|
|
542
|
+
label: 'Use for subgraph partial output',
|
|
543
|
+
dataKey: 'useAsGraphPartialOutput',
|
|
544
|
+
helperMessage: 'If on, streaming responses from this node will be shown in Subgraph nodes that call this graph.',
|
|
545
|
+
},
|
|
546
|
+
{
|
|
547
|
+
type: 'keyValuePair',
|
|
548
|
+
label: 'Additional Parameters',
|
|
549
|
+
dataKey: 'additionalParameters',
|
|
550
|
+
useInputToggleDataKey: 'useAdditionalParametersInput',
|
|
551
|
+
keyPlaceholder: 'Parameter',
|
|
552
|
+
valuePlaceholder: 'Value',
|
|
553
|
+
helperMessage: 'Additional chat completion parameters to send to the API. If the value appears to be a number, it will be sent as a number.',
|
|
554
|
+
},
|
|
555
|
+
],
|
|
556
|
+
},
|
|
557
|
+
];
|
|
558
|
+
}
|
|
559
|
+
getBody() {
|
|
560
|
+
return dedent `
|
|
561
|
+
${this.data.endpoint ? `${this.data.endpoint}` : ''}
|
|
562
|
+
${this.data.useMaxTokensInput ? 'Max Tokens: (Using Input)' : `${this.data.maxTokens} tokens`}
|
|
563
|
+
Model: ${this.data.useModelInput ? '(Using Input)' : this.data.overrideModel || this.data.model}
|
|
564
|
+
${this.data.useTopP ? 'Top P' : 'Temperature'}:
|
|
565
|
+
${this.data.useTopP
|
|
566
|
+
? this.data.useTopPInput
|
|
567
|
+
? '(Using Input)'
|
|
568
|
+
: this.data.top_p
|
|
569
|
+
: this.data.useTemperatureInput
|
|
570
|
+
? '(Using Input)'
|
|
571
|
+
: this.data.temperature}
|
|
572
|
+
${this.data.useStop ? `Stop: ${this.data.useStopInput ? '(Using Input)' : this.data.stop}` : ''}
|
|
573
|
+
${(this.data.frequencyPenalty ?? 0) !== 0
|
|
574
|
+
? `Frequency Penalty: ${this.data.useFrequencyPenaltyInput ? '(Using Input)' : this.data.frequencyPenalty}`
|
|
575
|
+
: ''}
|
|
576
|
+
${(this.data.presencePenalty ?? 0) !== 0
|
|
577
|
+
? `Presence Penalty: ${this.data.usePresencePenaltyInput ? '(Using Input)' : this.data.presencePenalty}`
|
|
578
|
+
: ''}
|
|
579
|
+
`.trim();
|
|
580
|
+
}
|
|
581
|
+
async process(inputs, context) {
|
|
582
|
+
const output = {};
|
|
583
|
+
const model = getInputOrData(this.data, inputs, 'model');
|
|
584
|
+
const temperature = getInputOrData(this.data, inputs, 'temperature', 'number');
|
|
585
|
+
const topP = this.data.useTopPInput
|
|
586
|
+
? coerceTypeOptional(inputs['top_p'], 'number') ?? this.data.top_p
|
|
587
|
+
: this.data.top_p;
|
|
588
|
+
const useTopP = getInputOrData(this.data, inputs, 'useTopP', 'boolean');
|
|
589
|
+
const stop = this.data.useStopInput
|
|
590
|
+
? this.data.useStop
|
|
591
|
+
? coerceTypeOptional(inputs['stop'], 'string') ?? this.data.stop
|
|
592
|
+
: undefined
|
|
593
|
+
: this.data.stop;
|
|
594
|
+
const presencePenalty = getInputOrData(this.data, inputs, 'presencePenalty', 'number');
|
|
595
|
+
const frequencyPenalty = getInputOrData(this.data, inputs, 'frequencyPenalty', 'number');
|
|
596
|
+
const numberOfChoices = getInputOrData(this.data, inputs, 'numberOfChoices', 'number');
|
|
597
|
+
const endpoint = getInputOrData(this.data, inputs, 'endpoint');
|
|
598
|
+
const overrideModel = getInputOrData(this.data, inputs, 'overrideModel');
|
|
599
|
+
const seed = getInputOrData(this.data, inputs, 'seed', 'number');
|
|
600
|
+
const responseFormat = getInputOrData(this.data, inputs, 'responseFormat');
|
|
601
|
+
const toolChoiceMode = getInputOrData(this.data, inputs, 'toolChoice', 'string');
|
|
602
|
+
const toolChoice = !toolChoiceMode || !this.data.enableFunctionUse
|
|
603
|
+
? undefined
|
|
604
|
+
: toolChoiceMode === 'function'
|
|
605
|
+
? {
|
|
606
|
+
type: 'function',
|
|
607
|
+
function: {
|
|
608
|
+
name: getInputOrData(this.data, inputs, 'toolChoiceFunction', 'string'),
|
|
609
|
+
},
|
|
610
|
+
}
|
|
611
|
+
: toolChoiceMode;
|
|
612
|
+
let responseSchema;
|
|
613
|
+
const responseSchemaInput = inputs['responseSchema'];
|
|
614
|
+
if (responseSchemaInput?.type === 'gpt-function') {
|
|
615
|
+
responseSchema = responseSchemaInput.value.parameters;
|
|
616
|
+
}
|
|
617
|
+
else if (responseSchemaInput != null) {
|
|
618
|
+
responseSchema = coerceType(responseSchemaInput, 'object');
|
|
619
|
+
}
|
|
620
|
+
const openaiResponseFormat = !responseFormat?.trim()
|
|
621
|
+
? undefined
|
|
622
|
+
: responseFormat === 'json'
|
|
623
|
+
? {
|
|
624
|
+
type: 'json_object',
|
|
625
|
+
}
|
|
626
|
+
: responseFormat === 'json_schema'
|
|
627
|
+
? {
|
|
628
|
+
type: 'json_schema',
|
|
629
|
+
json_schema: {
|
|
630
|
+
name: getInputOrData(this.data, inputs, 'responseSchemaName', 'string') || 'response_schema',
|
|
631
|
+
strict: true,
|
|
632
|
+
schema: responseSchema ?? {},
|
|
633
|
+
},
|
|
634
|
+
}
|
|
635
|
+
: {
|
|
636
|
+
type: 'text',
|
|
637
|
+
};
|
|
638
|
+
const headersFromData = (this.data.headers ?? []).reduce((acc, header) => {
|
|
639
|
+
acc[header.key] = header.value;
|
|
640
|
+
return acc;
|
|
641
|
+
}, {});
|
|
642
|
+
const additionalHeaders = this.data.useHeadersInput
|
|
643
|
+
? coerceTypeOptional(inputs['headers'], 'object') ??
|
|
644
|
+
headersFromData
|
|
645
|
+
: headersFromData;
|
|
646
|
+
const additionalParametersFromData = (this.data.additionalParameters ?? []).reduce((acc, param) => {
|
|
647
|
+
acc[param.key] = Number.isNaN(parseFloat(param.value)) ? param.value : parseFloat(param.value);
|
|
648
|
+
return acc;
|
|
649
|
+
}, {});
|
|
650
|
+
const additionalParameters = this.data.useAdditionalParametersInput
|
|
651
|
+
? coerceTypeOptional(inputs['additionalParameters'], 'object') ?? additionalParametersFromData
|
|
652
|
+
: additionalParametersFromData;
|
|
653
|
+
// If using a model input, that's priority, otherwise override > main
|
|
654
|
+
const finalModel = this.data.useModelInput && inputs['model'] != null ? model : overrideModel || model;
|
|
655
|
+
const functions = coerceTypeOptional(inputs['functions'], 'gpt-function[]');
|
|
656
|
+
const tools = (functions ?? []).map((fn) => ({
|
|
657
|
+
function: fn,
|
|
658
|
+
type: 'function',
|
|
659
|
+
}));
|
|
660
|
+
const { messages } = getChatNodeMessages(inputs);
|
|
661
|
+
const completionMessages = await Promise.all(messages.map((message) => chatMessageToOpenAIChatCompletionMessage(message)));
|
|
662
|
+
let { maxTokens } = this.data;
|
|
663
|
+
const openaiModel = {
|
|
664
|
+
...(openaiModels[model] ?? {
|
|
665
|
+
maxTokens: this.data.overrideMaxTokens ?? 8192,
|
|
666
|
+
cost: {
|
|
667
|
+
completion: 0,
|
|
668
|
+
prompt: 0,
|
|
669
|
+
},
|
|
670
|
+
displayName: 'Custom Model',
|
|
671
|
+
}),
|
|
672
|
+
};
|
|
673
|
+
if (this.data.overrideMaxTokens) {
|
|
674
|
+
openaiModel.maxTokens = this.data.overrideMaxTokens;
|
|
675
|
+
}
|
|
676
|
+
const isMultiResponse = this.data.useNumberOfChoicesInput || (this.data.numberOfChoices ?? 1) > 1;
|
|
677
|
+
// Resolve to final endpoint if configured in ProcessContext
|
|
678
|
+
const configuredEndpoint = endpoint || context.settings.openAiEndpoint || DEFAULT_CHAT_ENDPOINT;
|
|
679
|
+
const resolvedEndpointAndHeaders = context.getChatNodeEndpoint
|
|
680
|
+
? await context.getChatNodeEndpoint(configuredEndpoint, finalModel)
|
|
681
|
+
: {
|
|
682
|
+
endpoint: configuredEndpoint,
|
|
683
|
+
headers: {},
|
|
684
|
+
};
|
|
685
|
+
const allAdditionalHeaders = cleanHeaders({
|
|
686
|
+
...context.settings.chatNodeHeaders,
|
|
687
|
+
...additionalHeaders,
|
|
688
|
+
...resolvedEndpointAndHeaders.headers,
|
|
689
|
+
});
|
|
690
|
+
const tokenizerInfo = {
|
|
691
|
+
node: this.chartNode,
|
|
692
|
+
model: finalModel,
|
|
693
|
+
endpoint: resolvedEndpointAndHeaders.endpoint,
|
|
694
|
+
};
|
|
695
|
+
const tokenCount = await context.tokenizer.getTokenCountForMessages(messages, functions, tokenizerInfo);
|
|
696
|
+
if (tokenCount >= openaiModel.maxTokens) {
|
|
697
|
+
throw new Error(`The model ${model} can only handle ${openaiModel.maxTokens} tokens, but ${tokenCount} were provided in the prompts alone.`);
|
|
698
|
+
}
|
|
699
|
+
if (tokenCount + maxTokens > openaiModel.maxTokens) {
|
|
700
|
+
const message = `The model can only handle a maximum of ${openaiModel.maxTokens} tokens, but the prompts and max tokens together exceed this limit. The max tokens has been reduced to ${openaiModel.maxTokens - tokenCount}.`;
|
|
701
|
+
addWarning(output, message);
|
|
702
|
+
maxTokens = Math.floor((openaiModel.maxTokens - tokenCount) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
|
|
703
|
+
}
|
|
704
|
+
try {
|
|
705
|
+
return await retry(async () => {
|
|
706
|
+
const options = {
|
|
707
|
+
messages: completionMessages,
|
|
708
|
+
model: finalModel,
|
|
709
|
+
temperature: useTopP ? undefined : temperature,
|
|
710
|
+
top_p: useTopP ? topP : undefined,
|
|
711
|
+
max_tokens: maxTokens,
|
|
712
|
+
n: numberOfChoices,
|
|
713
|
+
frequency_penalty: frequencyPenalty,
|
|
714
|
+
presence_penalty: presencePenalty,
|
|
715
|
+
stop: stop || undefined,
|
|
716
|
+
tools: tools.length > 0 ? tools : undefined,
|
|
717
|
+
endpoint: resolvedEndpointAndHeaders.endpoint,
|
|
718
|
+
seed,
|
|
719
|
+
response_format: openaiResponseFormat,
|
|
720
|
+
tool_choice: toolChoice,
|
|
721
|
+
...additionalParameters,
|
|
722
|
+
};
|
|
723
|
+
const cacheKey = JSON.stringify(options);
|
|
724
|
+
if (this.data.cache) {
|
|
725
|
+
const cached = cache.get(cacheKey);
|
|
726
|
+
if (cached) {
|
|
727
|
+
return cached;
|
|
728
|
+
}
|
|
729
|
+
}
|
|
730
|
+
const startTime = Date.now();
|
|
731
|
+
const chunks = streamChatCompletions({
|
|
732
|
+
auth: {
|
|
733
|
+
apiKey: context.settings.openAiKey ?? '',
|
|
734
|
+
organization: context.settings.openAiOrganization,
|
|
735
|
+
},
|
|
736
|
+
headers: allAdditionalHeaders,
|
|
737
|
+
signal: context.signal,
|
|
738
|
+
timeout: context.settings.chatNodeTimeout,
|
|
739
|
+
...options,
|
|
740
|
+
});
|
|
741
|
+
const responseChoicesParts = [];
|
|
742
|
+
// First array is the function calls per choice, inner array is the functions calls inside the choice
|
|
743
|
+
const functionCalls = [];
|
|
744
|
+
for await (const chunk of chunks) {
|
|
745
|
+
if (!chunk.choices) {
|
|
746
|
+
// Could be error for some reason 🤷♂️ but ignoring has worked for me so far.
|
|
747
|
+
continue;
|
|
748
|
+
}
|
|
749
|
+
for (const { delta, index } of chunk.choices) {
|
|
750
|
+
if (delta.content != null) {
|
|
751
|
+
responseChoicesParts[index] ??= [];
|
|
752
|
+
responseChoicesParts[index].push(delta.content);
|
|
753
|
+
}
|
|
754
|
+
if (delta.tool_calls) {
|
|
755
|
+
// Are we sure that tool_calls will always be full and not a bunch of deltas?
|
|
756
|
+
functionCalls[index] ??= [];
|
|
757
|
+
for (const toolCall of delta.tool_calls) {
|
|
758
|
+
functionCalls[index][toolCall.index] ??= {
|
|
759
|
+
type: 'function',
|
|
760
|
+
arguments: '',
|
|
761
|
+
lastParsedArguments: undefined,
|
|
762
|
+
name: '',
|
|
763
|
+
id: '',
|
|
764
|
+
};
|
|
765
|
+
if (toolCall.id) {
|
|
766
|
+
functionCalls[index][toolCall.index].id = toolCall.id;
|
|
767
|
+
}
|
|
768
|
+
if (toolCall.function.name) {
|
|
769
|
+
functionCalls[index][toolCall.index].name += toolCall.function.name;
|
|
770
|
+
}
|
|
771
|
+
if (toolCall.function.arguments) {
|
|
772
|
+
functionCalls[index][toolCall.index].arguments += toolCall.function.arguments;
|
|
773
|
+
try {
|
|
774
|
+
functionCalls[index][toolCall.index].lastParsedArguments = JSON.parse(functionCalls[index][toolCall.index].arguments);
|
|
775
|
+
}
|
|
776
|
+
catch (error) {
|
|
777
|
+
// Ignore
|
|
778
|
+
}
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
if (isMultiResponse) {
|
|
784
|
+
output['response'] = {
|
|
785
|
+
type: 'string[]',
|
|
786
|
+
value: responseChoicesParts.map((parts) => parts.join('')),
|
|
787
|
+
};
|
|
788
|
+
}
|
|
789
|
+
else {
|
|
790
|
+
output['response'] = {
|
|
791
|
+
type: 'string',
|
|
792
|
+
value: responseChoicesParts[0]?.join('') ?? '',
|
|
793
|
+
};
|
|
794
|
+
}
|
|
795
|
+
if (functionCalls.length > 0) {
|
|
796
|
+
if (isMultiResponse) {
|
|
797
|
+
output['function-call'] = {
|
|
798
|
+
type: 'object[]',
|
|
799
|
+
value: functionCalls.flat().map((functionCall) => ({
|
|
800
|
+
name: functionCall.name,
|
|
801
|
+
arguments: functionCall.lastParsedArguments,
|
|
802
|
+
id: functionCall.id,
|
|
803
|
+
})),
|
|
804
|
+
};
|
|
805
|
+
}
|
|
806
|
+
else {
|
|
807
|
+
output['function-call'] = {
|
|
808
|
+
type: 'object[]',
|
|
809
|
+
value: functionCalls[0].map((functionCall) => ({
|
|
810
|
+
name: functionCall.name,
|
|
811
|
+
arguments: functionCall.lastParsedArguments,
|
|
812
|
+
id: functionCall.id,
|
|
813
|
+
})),
|
|
814
|
+
};
|
|
815
|
+
}
|
|
816
|
+
}
|
|
817
|
+
context.onPartialOutputs?.(output);
|
|
818
|
+
}
|
|
819
|
+
if (!isMultiResponse) {
|
|
820
|
+
output['all-messages'] = {
|
|
821
|
+
type: 'chat-message[]',
|
|
822
|
+
value: [
|
|
823
|
+
...messages,
|
|
824
|
+
{
|
|
825
|
+
type: 'assistant',
|
|
826
|
+
message: responseChoicesParts[0]?.join('') ?? '',
|
|
827
|
+
function_call: functionCalls[0]
|
|
828
|
+
? {
|
|
829
|
+
name: functionCalls[0][0].name,
|
|
830
|
+
arguments: functionCalls[0][0].arguments, // Needs the stringified one here in chat list
|
|
831
|
+
id: functionCalls[0][0].id,
|
|
832
|
+
}
|
|
833
|
+
: undefined,
|
|
834
|
+
function_calls: functionCalls[0]
|
|
835
|
+
? functionCalls[0].map((fc) => ({
|
|
836
|
+
name: fc.name,
|
|
837
|
+
arguments: fc.arguments,
|
|
838
|
+
id: fc.id,
|
|
839
|
+
}))
|
|
840
|
+
: undefined,
|
|
841
|
+
},
|
|
842
|
+
],
|
|
843
|
+
};
|
|
844
|
+
}
|
|
845
|
+
const endTime = Date.now();
|
|
846
|
+
if (responseChoicesParts.length === 0 && functionCalls.length === 0) {
|
|
847
|
+
throw new Error('No response from OpenAI');
|
|
848
|
+
}
|
|
849
|
+
output['in-messages'] = { type: 'chat-message[]', value: messages };
|
|
850
|
+
output['requestTokens'] = { type: 'number', value: tokenCount * (numberOfChoices ?? 1) };
|
|
851
|
+
let responseTokenCount = 0;
|
|
852
|
+
for (const choiceParts of responseChoicesParts) {
|
|
853
|
+
responseTokenCount += await context.tokenizer.getTokenCountForString(choiceParts.join(), tokenizerInfo);
|
|
854
|
+
}
|
|
855
|
+
output['responseTokens'] = { type: 'number', value: responseTokenCount };
|
|
856
|
+
const promptCostPerThousand = model in openaiModels ? openaiModels[model].cost.prompt : 0;
|
|
857
|
+
const completionCostPerThousand = model in openaiModels ? openaiModels[model].cost.completion : 0;
|
|
858
|
+
const promptCost = getCostForTokens(tokenCount, 'prompt', promptCostPerThousand);
|
|
859
|
+
const completionCost = getCostForTokens(responseTokenCount, 'completion', completionCostPerThousand);
|
|
860
|
+
const cost = promptCost + completionCost;
|
|
861
|
+
output['cost'] = { type: 'number', value: cost };
|
|
862
|
+
output['__hidden_token_count'] = { type: 'number', value: tokenCount + responseTokenCount };
|
|
863
|
+
const duration = endTime - startTime;
|
|
864
|
+
output['duration'] = { type: 'number', value: duration };
|
|
865
|
+
Object.freeze(output);
|
|
866
|
+
cache.set(cacheKey, output);
|
|
867
|
+
return output;
|
|
868
|
+
}, {
|
|
869
|
+
forever: true,
|
|
870
|
+
retries: 10000,
|
|
871
|
+
maxRetryTime: 1000 * 60 * 5,
|
|
872
|
+
factor: 2.5,
|
|
873
|
+
minTimeout: 500,
|
|
874
|
+
maxTimeout: 5000,
|
|
875
|
+
randomize: true,
|
|
876
|
+
signal: context.signal,
|
|
877
|
+
onFailedAttempt(err) {
|
|
878
|
+
if (err.toString().includes('fetch failed') && err.cause) {
|
|
879
|
+
const cause = getError(err.cause) instanceof AggregateError
|
|
880
|
+
? err.cause.errors[0]
|
|
881
|
+
: getError(err.cause);
|
|
882
|
+
err = cause;
|
|
883
|
+
}
|
|
884
|
+
context.trace(`ChatNode failed, retrying: ${err.toString()}`);
|
|
885
|
+
if (context.signal.aborted) {
|
|
886
|
+
throw new Error('Aborted');
|
|
887
|
+
}
|
|
888
|
+
const { retriesLeft } = err;
|
|
889
|
+
if (!(err instanceof OpenAIError)) {
|
|
890
|
+
if ('code' in err) {
|
|
891
|
+
throw err;
|
|
892
|
+
}
|
|
893
|
+
return; // Just retry?
|
|
894
|
+
}
|
|
895
|
+
if (err.status === 429) {
|
|
896
|
+
if (retriesLeft) {
|
|
897
|
+
context.onPartialOutputs?.({
|
|
898
|
+
['response']: {
|
|
899
|
+
type: 'string',
|
|
900
|
+
value: 'OpenAI API rate limit exceeded, retrying...',
|
|
901
|
+
},
|
|
902
|
+
});
|
|
903
|
+
return;
|
|
904
|
+
}
|
|
905
|
+
}
|
|
906
|
+
if (err.status === 408) {
|
|
907
|
+
if (retriesLeft) {
|
|
908
|
+
context.onPartialOutputs?.({
|
|
909
|
+
['response']: {
|
|
910
|
+
type: 'string',
|
|
911
|
+
value: 'OpenAI API timed out, retrying...',
|
|
912
|
+
},
|
|
913
|
+
});
|
|
914
|
+
return;
|
|
915
|
+
}
|
|
916
|
+
}
|
|
917
|
+
// We did something wrong (besides rate limit)
|
|
918
|
+
if (err.status >= 400 && err.status < 500) {
|
|
919
|
+
throw new Error(err.message);
|
|
920
|
+
}
|
|
921
|
+
},
|
|
922
|
+
});
|
|
923
|
+
}
|
|
924
|
+
catch (error) {
|
|
925
|
+
context.trace(getError(error).stack ?? 'Missing stack');
|
|
926
|
+
throw new Error(`Error processing ChatNode: ${error.message}`);
|
|
927
|
+
}
|
|
928
|
+
}
|
|
929
|
+
}
|
|
930
|
+
export const chatNode = nodeDefinition(ChatNodeImpl, 'Chat');
|
|
931
|
+
export function getChatNodeMessages(inputs) {
|
|
932
|
+
const prompt = inputs['prompt'];
|
|
933
|
+
let messages = match(prompt)
|
|
934
|
+
.with({ type: 'chat-message' }, (p) => [p.value])
|
|
935
|
+
.with({ type: 'chat-message[]' }, (p) => p.value)
|
|
936
|
+
.with({ type: 'string' }, (p) => [{ type: 'user', message: p.value }])
|
|
937
|
+
.with({ type: 'string[]' }, (p) => p.value.map((v) => ({ type: 'user', message: v })))
|
|
938
|
+
.otherwise((p) => {
|
|
939
|
+
if (!p) {
|
|
940
|
+
return [];
|
|
941
|
+
}
|
|
942
|
+
if (isArrayDataValue(p)) {
|
|
943
|
+
const stringValues = p.value.map((v) => coerceType({
|
|
944
|
+
type: getScalarTypeOf(p.type),
|
|
945
|
+
value: v,
|
|
946
|
+
}, 'string'));
|
|
947
|
+
return stringValues.filter((v) => v != null).map((v) => ({ type: 'user', message: v }));
|
|
948
|
+
}
|
|
949
|
+
const coercedMessage = coerceTypeOptional(p, 'chat-message');
|
|
950
|
+
if (coercedMessage != null) {
|
|
951
|
+
return [coercedMessage];
|
|
952
|
+
}
|
|
953
|
+
const coercedString = coerceTypeOptional(p, 'string');
|
|
954
|
+
return coercedString != null ? [{ type: 'user', message: coerceType(p, 'string') }] : [];
|
|
955
|
+
});
|
|
956
|
+
const systemPrompt = inputs['systemPrompt'];
|
|
957
|
+
if (systemPrompt) {
|
|
958
|
+
messages = [{ type: 'system', message: coerceType(systemPrompt, 'string') }, ...messages];
|
|
959
|
+
}
|
|
960
|
+
return { messages, systemPrompt };
|
|
961
|
+
}
|
|
962
|
+
export function getCostForTokens(tokenCount, type, costPerThousand) {
|
|
963
|
+
return (tokenCount / 1000) * costPerThousand;
|
|
964
|
+
}
|