@smythos/sre 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +62 -0
- package/LICENSE +18 -0
- package/package.json +127 -115
- package/src/Components/APICall/APICall.class.ts +155 -0
- package/src/Components/APICall/AccessTokenManager.ts +130 -0
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -0
- package/src/Components/APICall/OAuth.helper.ts +294 -0
- package/src/Components/APICall/mimeTypeCategories.ts +46 -0
- package/src/Components/APICall/parseData.ts +167 -0
- package/src/Components/APICall/parseHeaders.ts +41 -0
- package/src/Components/APICall/parseProxy.ts +68 -0
- package/src/Components/APICall/parseUrl.ts +91 -0
- package/src/Components/APIEndpoint.class.ts +234 -0
- package/src/Components/APIOutput.class.ts +58 -0
- package/src/Components/AgentPlugin.class.ts +102 -0
- package/src/Components/Async.class.ts +155 -0
- package/src/Components/Await.class.ts +90 -0
- package/src/Components/Classifier.class.ts +158 -0
- package/src/Components/Component.class.ts +94 -0
- package/src/Components/ComponentHost.class.ts +38 -0
- package/src/Components/DataSourceCleaner.class.ts +92 -0
- package/src/Components/DataSourceIndexer.class.ts +181 -0
- package/src/Components/DataSourceLookup.class.ts +141 -0
- package/src/Components/FEncDec.class.ts +29 -0
- package/src/Components/FHash.class.ts +33 -0
- package/src/Components/FSign.class.ts +80 -0
- package/src/Components/FSleep.class.ts +25 -0
- package/src/Components/FTimestamp.class.ts +25 -0
- package/src/Components/FileStore.class.ts +75 -0
- package/src/Components/ForEach.class.ts +97 -0
- package/src/Components/GPTPlugin.class.ts +70 -0
- package/src/Components/GenAILLM.class.ts +395 -0
- package/src/Components/HuggingFace.class.ts +314 -0
- package/src/Components/Image/imageSettings.config.ts +70 -0
- package/src/Components/ImageGenerator.class.ts +407 -0
- package/src/Components/JSONFilter.class.ts +54 -0
- package/src/Components/LLMAssistant.class.ts +213 -0
- package/src/Components/LogicAND.class.ts +28 -0
- package/src/Components/LogicAtLeast.class.ts +85 -0
- package/src/Components/LogicAtMost.class.ts +86 -0
- package/src/Components/LogicOR.class.ts +29 -0
- package/src/Components/LogicXOR.class.ts +34 -0
- package/src/Components/MCPClient.class.ts +112 -0
- package/src/Components/PromptGenerator.class.ts +122 -0
- package/src/Components/ScrapflyWebScrape.class.ts +159 -0
- package/src/Components/TavilyWebSearch.class.ts +98 -0
- package/src/Components/index.ts +77 -0
- package/src/Core/AgentProcess.helper.ts +240 -0
- package/src/Core/Connector.class.ts +123 -0
- package/src/Core/ConnectorsService.ts +192 -0
- package/src/Core/DummyConnector.ts +49 -0
- package/src/Core/HookService.ts +105 -0
- package/src/Core/SmythRuntime.class.ts +292 -0
- package/src/Core/SystemEvents.ts +15 -0
- package/src/Core/boot.ts +55 -0
- package/src/config.ts +15 -0
- package/src/constants.ts +125 -0
- package/src/data/hugging-face.params.json +580 -0
- package/src/helpers/BinaryInput.helper.ts +324 -0
- package/src/helpers/Conversation.helper.ts +1094 -0
- package/src/helpers/JsonContent.helper.ts +97 -0
- package/src/helpers/LocalCache.helper.ts +97 -0
- package/src/helpers/Log.helper.ts +234 -0
- package/src/helpers/OpenApiParser.helper.ts +150 -0
- package/src/helpers/S3Cache.helper.ts +129 -0
- package/src/helpers/SmythURI.helper.ts +5 -0
- package/src/helpers/TemplateString.helper.ts +243 -0
- package/src/helpers/TypeChecker.helper.ts +329 -0
- package/src/index.ts +179 -0
- package/src/index.ts.bak +179 -0
- package/src/subsystems/AgentManager/Agent.class.ts +1108 -0
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -0
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -0
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -0
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -0
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -0
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -0
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -0
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -0
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -0
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +59 -0
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -0
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -0
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +153 -0
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -0
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +99 -0
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +63 -0
- package/src/subsystems/ComputeManager/Code.service/index.ts +11 -0
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -0
- package/src/subsystems/IO/CLI.service/index.ts +9 -0
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -0
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -0
- package/src/subsystems/IO/Log.service/index.ts +13 -0
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +41 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -0
- package/src/subsystems/IO/NKV.service/index.ts +12 -0
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -0
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -0
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -0
- package/src/subsystems/IO/Router.service/index.ts +11 -0
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +472 -0
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -0
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +305 -0
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +418 -0
- package/src/subsystems/IO/Storage.service/index.ts +13 -0
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -0
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +450 -0
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +373 -0
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +420 -0
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +106 -0
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -0
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -0
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -0
- package/src/subsystems/LLMManager/LLM.helper.ts +221 -0
- package/src/subsystems/LLMManager/LLM.inference.ts +335 -0
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +374 -0
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +145 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +632 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +405 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +81 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +689 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +257 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +848 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +255 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +193 -0
- package/src/subsystems/LLMManager/LLM.service/index.ts +43 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +281 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.ts +229 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -0
- package/src/subsystems/LLMManager/custom-models.ts +854 -0
- package/src/subsystems/LLMManager/models.ts +2539 -0
- package/src/subsystems/LLMManager/paramMappings.ts +69 -0
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -0
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -0
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -0
- package/src/subsystems/MemoryManager/LLMContext.ts +125 -0
- package/src/subsystems/MemoryManager/RuntimeContext.ts +249 -0
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -0
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +76 -0
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -0
- package/src/subsystems/Security/Account.service/AccountConnector.ts +41 -0
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -0
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -0
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -0
- package/src/subsystems/Security/Account.service/index.ts +14 -0
- package/src/subsystems/Security/Credentials.helper.ts +62 -0
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +34 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +57 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -0
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -0
- package/src/subsystems/Security/SecureConnector.class.ts +110 -0
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -0
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +26 -0
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -0
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +166 -0
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -0
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -0
- package/src/subsystems/Security/Vault.service/index.ts +12 -0
- package/src/types/ACL.types.ts +104 -0
- package/src/types/AWS.types.ts +9 -0
- package/src/types/Agent.types.ts +61 -0
- package/src/types/AgentLogger.types.ts +17 -0
- package/src/types/Cache.types.ts +1 -0
- package/src/types/Common.types.ts +3 -0
- package/src/types/LLM.types.ts +419 -0
- package/src/types/Redis.types.ts +8 -0
- package/src/types/SRE.types.ts +64 -0
- package/src/types/Security.types.ts +18 -0
- package/src/types/Storage.types.ts +5 -0
- package/src/types/VectorDB.types.ts +78 -0
- package/src/utils/base64.utils.ts +275 -0
- package/src/utils/cli.utils.ts +68 -0
- package/src/utils/data.utils.ts +263 -0
- package/src/utils/date-time.utils.ts +22 -0
- package/src/utils/general.utils.ts +238 -0
- package/src/utils/index.ts +12 -0
- package/src/utils/numbers.utils.ts +13 -0
- package/src/utils/oauth.utils.ts +35 -0
- package/src/utils/string.utils.ts +414 -0
- package/src/utils/url.utils.ts +19 -0
- package/src/utils/validation.utils.ts +74 -0
|
@@ -0,0 +1,689 @@
|
|
|
1
|
+
import os from 'os';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import EventEmitter from 'events';
|
|
4
|
+
import fs from 'fs';
|
|
5
|
+
|
|
6
|
+
import { GoogleGenerativeAI, ModelParams, GenerationConfig, GenerateContentRequest, UsageMetadata } from '@google/generative-ai';
|
|
7
|
+
import { GoogleAIFileManager, FileState } from '@google/generative-ai/server';
|
|
8
|
+
|
|
9
|
+
import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
|
|
10
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
11
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
12
|
+
import { uid } from '@sre/utils';
|
|
13
|
+
|
|
14
|
+
import { processWithConcurrencyLimit } from '@sre/utils';
|
|
15
|
+
|
|
16
|
+
import {
|
|
17
|
+
TLLMMessageBlock,
|
|
18
|
+
ToolData,
|
|
19
|
+
TLLMMessageRole,
|
|
20
|
+
TLLMToolResultMessageBlock,
|
|
21
|
+
APIKeySource,
|
|
22
|
+
TLLMEvent,
|
|
23
|
+
TLLMParams,
|
|
24
|
+
BasicCredentials,
|
|
25
|
+
ILLMRequestFuncParams,
|
|
26
|
+
TLLMChatResponse,
|
|
27
|
+
TGoogleAIRequestBody,
|
|
28
|
+
TLLMConnectorParams,
|
|
29
|
+
ILLMRequestContext,
|
|
30
|
+
} from '@sre/types/LLM.types';
|
|
31
|
+
import { LLMHelper } from '@sre/LLMManager/LLM.helper';
|
|
32
|
+
|
|
33
|
+
import { SystemEvents } from '@sre/Core/SystemEvents';
|
|
34
|
+
import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
35
|
+
|
|
36
|
+
import { LLMConnector } from '../LLMConnector';
|
|
37
|
+
|
|
38
|
+
const MODELS_SUPPORT_SYSTEM_INSTRUCTION = [
|
|
39
|
+
'gemini-1.5-pro-exp-0801',
|
|
40
|
+
'gemini-1.5-pro-latest',
|
|
41
|
+
'gemini-1.5-pro-latest',
|
|
42
|
+
'gemini-1.5-pro',
|
|
43
|
+
'gemini-1.5-pro-001',
|
|
44
|
+
'gemini-1.5-flash-latest',
|
|
45
|
+
'gemini-1.5-flash-001',
|
|
46
|
+
'gemini-1.5-flash',
|
|
47
|
+
];
|
|
48
|
+
const MODELS_SUPPORT_JSON_RESPONSE = MODELS_SUPPORT_SYSTEM_INSTRUCTION;
|
|
49
|
+
|
|
50
|
+
// Supported file MIME types for Google AI's Gemini models
|
|
51
|
+
const VALID_MIME_TYPES = [
|
|
52
|
+
...SUPPORTED_MIME_TYPES_MAP.GoogleAI.image,
|
|
53
|
+
...SUPPORTED_MIME_TYPES_MAP.GoogleAI.audio,
|
|
54
|
+
...SUPPORTED_MIME_TYPES_MAP.GoogleAI.video,
|
|
55
|
+
...SUPPORTED_MIME_TYPES_MAP.GoogleAI.document,
|
|
56
|
+
];
|
|
57
|
+
|
|
58
|
+
// will be removed after updating the SDK
|
|
59
|
+
type UsageMetadataWithThoughtsToken = UsageMetadata & { thoughtsTokenCount: number };
|
|
60
|
+
|
|
61
|
+
export class GoogleAIConnector extends LLMConnector {
|
|
62
|
+
public name = 'LLM:GoogleAI';
|
|
63
|
+
|
|
64
|
+
private validMimeTypes = {
|
|
65
|
+
all: VALID_MIME_TYPES,
|
|
66
|
+
image: SUPPORTED_MIME_TYPES_MAP.GoogleAI.image,
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
private async getClient(params: ILLMRequestContext): Promise<GoogleGenerativeAI> {
|
|
70
|
+
const apiKey = (params.credentials as BasicCredentials)?.apiKey;
|
|
71
|
+
|
|
72
|
+
if (!apiKey) throw new Error('Please provide an API key for Google AI');
|
|
73
|
+
|
|
74
|
+
return new GoogleGenerativeAI(apiKey);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
|
|
78
|
+
try {
|
|
79
|
+
const prompt = body.messages;
|
|
80
|
+
delete body.messages;
|
|
81
|
+
|
|
82
|
+
const genAI = await this.getClient(context);
|
|
83
|
+
const $model = genAI.getGenerativeModel(body);
|
|
84
|
+
|
|
85
|
+
const result = await $model.generateContent(prompt);
|
|
86
|
+
|
|
87
|
+
const response = await result.response;
|
|
88
|
+
const content = response.text();
|
|
89
|
+
const finishReason = response.candidates[0].finishReason || 'stop';
|
|
90
|
+
const usage = response?.usageMetadata as UsageMetadataWithThoughtsToken;
|
|
91
|
+
this.reportUsage(usage, {
|
|
92
|
+
modelEntryName: context.modelEntryName,
|
|
93
|
+
keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
|
|
94
|
+
agentId: context.agentId,
|
|
95
|
+
teamId: context.teamId,
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
const toolCalls = response.candidates[0]?.content?.parts?.filter((part) => part.functionCall);
|
|
99
|
+
|
|
100
|
+
let toolsData: ToolData[] = [];
|
|
101
|
+
let useTool = false;
|
|
102
|
+
|
|
103
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
104
|
+
toolsData = toolCalls.map((toolCall, index) => ({
|
|
105
|
+
index,
|
|
106
|
+
id: `tool-${index}`,
|
|
107
|
+
type: 'function',
|
|
108
|
+
name: toolCall.functionCall.name,
|
|
109
|
+
arguments: JSON.stringify(toolCall.functionCall.args),
|
|
110
|
+
role: TLLMMessageRole.Assistant,
|
|
111
|
+
}));
|
|
112
|
+
useTool = true;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
content,
|
|
117
|
+
finishReason,
|
|
118
|
+
useTool,
|
|
119
|
+
toolsData,
|
|
120
|
+
message: { content, role: 'assistant' },
|
|
121
|
+
usage,
|
|
122
|
+
};
|
|
123
|
+
} catch (error: any) {
|
|
124
|
+
throw error;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
|
|
129
|
+
const emitter = new EventEmitter();
|
|
130
|
+
|
|
131
|
+
const prompt = body.messages;
|
|
132
|
+
delete body.messages;
|
|
133
|
+
|
|
134
|
+
const genAI = await this.getClient(context);
|
|
135
|
+
const $model = genAI.getGenerativeModel(body);
|
|
136
|
+
|
|
137
|
+
try {
|
|
138
|
+
const result = await $model.generateContentStream(prompt);
|
|
139
|
+
|
|
140
|
+
let toolsData: ToolData[] = [];
|
|
141
|
+
let usage: UsageMetadataWithThoughtsToken;
|
|
142
|
+
|
|
143
|
+
// Process stream asynchronously while as we need to return emitter immediately
|
|
144
|
+
(async () => {
|
|
145
|
+
for await (const chunk of result.stream) {
|
|
146
|
+
const chunkText = chunk.text();
|
|
147
|
+
emitter.emit('content', chunkText);
|
|
148
|
+
|
|
149
|
+
if (chunk.candidates[0]?.content?.parts) {
|
|
150
|
+
const toolCalls = chunk.candidates[0].content.parts.filter((part) => part.functionCall);
|
|
151
|
+
if (toolCalls.length > 0) {
|
|
152
|
+
toolsData = toolCalls.map((toolCall, index) => ({
|
|
153
|
+
index,
|
|
154
|
+
id: `tool-${index}`,
|
|
155
|
+
type: 'function',
|
|
156
|
+
name: toolCall.functionCall.name,
|
|
157
|
+
arguments: JSON.stringify(toolCall.functionCall.args),
|
|
158
|
+
role: TLLMMessageRole.Assistant,
|
|
159
|
+
}));
|
|
160
|
+
emitter.emit(TLLMEvent.ToolInfo, toolsData);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// the same usage is sent on each emit. IMPORTANT: google does not send usage for each chunk but
|
|
165
|
+
// rather just sends the same usage for the entire request.
|
|
166
|
+
// notice that the output tokens are only sent in the last chunk usage metadata.
|
|
167
|
+
// so we will just update a var to hold the latest usage and report it when the stream ends.
|
|
168
|
+
// e.g emit1: { input_tokens: 500, output_tokens: undefined } -> same input_tokens
|
|
169
|
+
// e.g emit2: { input_tokens: 500, output_tokens: undefined } -> same input_tokens
|
|
170
|
+
// e.g emit3: { input_tokens: 500, output_tokens: 10 } -> same input_tokens, new output_tokens in the last chunk
|
|
171
|
+
if (chunk?.usageMetadata) {
|
|
172
|
+
usage = chunk.usageMetadata as UsageMetadataWithThoughtsToken;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
if (usage) {
|
|
177
|
+
this.reportUsage(usage, {
|
|
178
|
+
modelEntryName: context.modelEntryName,
|
|
179
|
+
keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
|
|
180
|
+
agentId: context.agentId,
|
|
181
|
+
teamId: context.teamId,
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
setTimeout(() => {
|
|
186
|
+
emitter.emit('end', toolsData);
|
|
187
|
+
}, 100);
|
|
188
|
+
})();
|
|
189
|
+
|
|
190
|
+
return emitter;
|
|
191
|
+
} catch (error: any) {
|
|
192
|
+
throw error;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
protected async webSearchRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<any> {
|
|
197
|
+
throw new Error('Web search is not supported for Google AI');
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
protected async reqBodyAdapter(params: TLLMParams): Promise<TGoogleAIRequestBody> {
|
|
201
|
+
const model = params?.model;
|
|
202
|
+
|
|
203
|
+
const messages = await this.prepareMessages(params);
|
|
204
|
+
|
|
205
|
+
let body: ModelParams & { messages: string | TLLMMessageBlock[] | GenerateContentRequest } = {
|
|
206
|
+
model: model as string,
|
|
207
|
+
messages,
|
|
208
|
+
};
|
|
209
|
+
|
|
210
|
+
const responseFormat = params?.responseFormat || '';
|
|
211
|
+
let responseMimeType = '';
|
|
212
|
+
let systemInstruction = '';
|
|
213
|
+
|
|
214
|
+
if (responseFormat === 'json') {
|
|
215
|
+
systemInstruction += JSON_RESPONSE_INSTRUCTION;
|
|
216
|
+
|
|
217
|
+
if (MODELS_SUPPORT_JSON_RESPONSE.includes(model as string)) {
|
|
218
|
+
responseMimeType = 'application/json';
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
const config: GenerationConfig = {};
|
|
223
|
+
|
|
224
|
+
if (params.maxTokens !== undefined) config.maxOutputTokens = params.maxTokens;
|
|
225
|
+
if (params.temperature !== undefined) config.temperature = params.temperature;
|
|
226
|
+
if (params.topP !== undefined) config.topP = params.topP;
|
|
227
|
+
if (params.topK !== undefined) config.topK = params.topK;
|
|
228
|
+
if (params.stopSequences?.length) config.stopSequences = params.stopSequences;
|
|
229
|
+
if (responseMimeType) config.responseMimeType = responseMimeType;
|
|
230
|
+
|
|
231
|
+
if (systemInstruction) body.systemInstruction = systemInstruction;
|
|
232
|
+
if (Object.keys(config).length > 0) {
|
|
233
|
+
body.generationConfig = config;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
return body;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
protected reportUsage(
|
|
240
|
+
usage: UsageMetadataWithThoughtsToken,
|
|
241
|
+
metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
|
|
242
|
+
) {
|
|
243
|
+
const modelEntryName = metadata.modelEntryName;
|
|
244
|
+
let tier = '';
|
|
245
|
+
|
|
246
|
+
const tierThresholds = {
|
|
247
|
+
'gemini-1.5-pro': 128_000,
|
|
248
|
+
'gemini-2.5-pro': 200_000,
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
const textInputTokens =
|
|
252
|
+
usage?.['promptTokensDetails']?.find((detail) => detail.modality === 'TEXT')?.tokenCount || usage?.promptTokenCount || 0;
|
|
253
|
+
const audioInputTokens = usage?.['promptTokensDetails']?.find((detail) => detail.modality === 'AUDIO')?.tokenCount || 0;
|
|
254
|
+
|
|
255
|
+
// Find matching model and set tier based on threshold
|
|
256
|
+
const modelWithTier = Object.keys(tierThresholds).find((model) => modelEntryName.includes(model));
|
|
257
|
+
if (modelWithTier) {
|
|
258
|
+
tier = textInputTokens < tierThresholds[modelWithTier] ? 'tier1' : 'tier2';
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// #endregion
|
|
262
|
+
|
|
263
|
+
// SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
|
|
264
|
+
const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
|
|
265
|
+
|
|
266
|
+
const usageData = {
|
|
267
|
+
sourceId: `llm:${modelName}`,
|
|
268
|
+
input_tokens: textInputTokens,
|
|
269
|
+
output_tokens: usage.candidatesTokenCount,
|
|
270
|
+
input_tokens_audio: audioInputTokens,
|
|
271
|
+
input_tokens_cache_read: usage.cachedContentTokenCount || 0,
|
|
272
|
+
input_tokens_cache_write: 0,
|
|
273
|
+
reasoning_tokens: usage.thoughtsTokenCount,
|
|
274
|
+
keySource: metadata.keySource,
|
|
275
|
+
agentId: metadata.agentId,
|
|
276
|
+
teamId: metadata.teamId,
|
|
277
|
+
tier,
|
|
278
|
+
};
|
|
279
|
+
SystemEvents.emit('USAGE:LLM', usageData);
|
|
280
|
+
|
|
281
|
+
return usageData;
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
public formatToolsConfig({ toolDefinitions, toolChoice = 'auto' }) {
|
|
285
|
+
const tools = toolDefinitions.map((tool) => {
|
|
286
|
+
const { name, description, properties, requiredFields } = tool;
|
|
287
|
+
|
|
288
|
+
// Ensure the function name is valid
|
|
289
|
+
const validName = this.sanitizeFunctionName(name);
|
|
290
|
+
|
|
291
|
+
// Ensure properties are non-empty for OBJECT type
|
|
292
|
+
const validProperties = properties && Object.keys(properties).length > 0 ? properties : { dummy: { type: 'string' } };
|
|
293
|
+
|
|
294
|
+
return {
|
|
295
|
+
functionDeclarations: [
|
|
296
|
+
{
|
|
297
|
+
name: validName,
|
|
298
|
+
description: description || '',
|
|
299
|
+
parameters: {
|
|
300
|
+
type: 'OBJECT',
|
|
301
|
+
properties: validProperties,
|
|
302
|
+
required: requiredFields || [],
|
|
303
|
+
},
|
|
304
|
+
},
|
|
305
|
+
],
|
|
306
|
+
};
|
|
307
|
+
});
|
|
308
|
+
|
|
309
|
+
return {
|
|
310
|
+
tools,
|
|
311
|
+
toolChoice: {
|
|
312
|
+
type: toolChoice,
|
|
313
|
+
},
|
|
314
|
+
};
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
public transformToolMessageBlocks({
|
|
318
|
+
messageBlock,
|
|
319
|
+
toolsData,
|
|
320
|
+
}: {
|
|
321
|
+
messageBlock: TLLMMessageBlock;
|
|
322
|
+
toolsData: ToolData[];
|
|
323
|
+
}): TLLMToolResultMessageBlock[] {
|
|
324
|
+
const messageBlocks: TLLMToolResultMessageBlock[] = [];
|
|
325
|
+
|
|
326
|
+
if (messageBlock) {
|
|
327
|
+
const content = [];
|
|
328
|
+
if (typeof messageBlock.content === 'string') {
|
|
329
|
+
content.push({ text: messageBlock.content });
|
|
330
|
+
} else if (Array.isArray(messageBlock.content)) {
|
|
331
|
+
content.push(...messageBlock.content);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
if (messageBlock.parts) {
|
|
335
|
+
const functionCalls = messageBlock.parts.filter((part) => part.functionCall);
|
|
336
|
+
if (functionCalls.length > 0) {
|
|
337
|
+
content.push(
|
|
338
|
+
...functionCalls.map((call) => ({
|
|
339
|
+
functionCall: {
|
|
340
|
+
name: call.functionCall.name,
|
|
341
|
+
args: JSON.parse(call.functionCall.args),
|
|
342
|
+
},
|
|
343
|
+
}))
|
|
344
|
+
);
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
messageBlocks.push({
|
|
349
|
+
role: messageBlock.role,
|
|
350
|
+
parts: content,
|
|
351
|
+
});
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
const transformedToolsData = toolsData.map(
|
|
355
|
+
(toolData): TLLMToolResultMessageBlock => ({
|
|
356
|
+
role: TLLMMessageRole.Function,
|
|
357
|
+
parts: [
|
|
358
|
+
{
|
|
359
|
+
functionResponse: {
|
|
360
|
+
name: toolData.name,
|
|
361
|
+
response: {
|
|
362
|
+
name: toolData.name,
|
|
363
|
+
content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result),
|
|
364
|
+
},
|
|
365
|
+
},
|
|
366
|
+
},
|
|
367
|
+
],
|
|
368
|
+
})
|
|
369
|
+
);
|
|
370
|
+
|
|
371
|
+
return [...messageBlocks, ...transformedToolsData];
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
public getConsistentMessages(messages: TLLMMessageBlock[]): TLLMMessageBlock[] {
|
|
375
|
+
const _messages = LLMHelper.removeDuplicateUserMessages(messages);
|
|
376
|
+
|
|
377
|
+
return _messages.map((message) => {
|
|
378
|
+
const _message = { ...message };
|
|
379
|
+
let textContent = '';
|
|
380
|
+
|
|
381
|
+
// Map roles to valid Google AI roles
|
|
382
|
+
switch (_message.role) {
|
|
383
|
+
case TLLMMessageRole.Assistant:
|
|
384
|
+
case TLLMMessageRole.System:
|
|
385
|
+
_message.role = TLLMMessageRole.Model;
|
|
386
|
+
break;
|
|
387
|
+
case TLLMMessageRole.User:
|
|
388
|
+
// User role is already valid
|
|
389
|
+
break;
|
|
390
|
+
default:
|
|
391
|
+
_message.role = TLLMMessageRole.User; // Default to user for unknown roles
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
// * empty text causes error that's why we added '...'
|
|
395
|
+
|
|
396
|
+
if (_message?.parts) {
|
|
397
|
+
textContent = _message.parts.map((textBlock) => textBlock?.text || '...').join(' ');
|
|
398
|
+
} else if (Array.isArray(_message?.content)) {
|
|
399
|
+
textContent = _message.content.map((textBlock) => textBlock?.text || '...').join(' ');
|
|
400
|
+
} else if (_message?.content) {
|
|
401
|
+
textContent = (_message.content as string) || '...';
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
_message.parts = [{ text: textContent || '...' }];
|
|
405
|
+
|
|
406
|
+
delete _message.content; // Remove content to avoid error
|
|
407
|
+
|
|
408
|
+
return _message;
|
|
409
|
+
});
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
private async prepareMessages(params: TLLMParams): Promise<string | TLLMMessageBlock[] | GenerateContentRequest> {
|
|
413
|
+
let messages: string | TLLMMessageBlock[] | GenerateContentRequest = params?.messages || '';
|
|
414
|
+
|
|
415
|
+
const files: BinaryInput[] = params?.files || [];
|
|
416
|
+
|
|
417
|
+
if (files.length > 0) {
|
|
418
|
+
messages = await this.prepareMessagesWithFiles(params);
|
|
419
|
+
} else if (params?.toolsConfig?.tools?.length > 0) {
|
|
420
|
+
messages = await this.prepareMessagesWithTools(params);
|
|
421
|
+
} else {
|
|
422
|
+
messages = await this.prepareMessagesWithTextQuery(params);
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
return messages;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
private async prepareMessagesWithFiles(params: TLLMParams): Promise<string> {
|
|
429
|
+
const model = params.model;
|
|
430
|
+
|
|
431
|
+
let messages: string | TLLMMessageBlock[] = params?.messages || '';
|
|
432
|
+
let systemInstruction = '';
|
|
433
|
+
const files: BinaryInput[] = params?.files || [];
|
|
434
|
+
|
|
435
|
+
// #region Upload files
|
|
436
|
+
const promises = [];
|
|
437
|
+
const _files = [];
|
|
438
|
+
|
|
439
|
+
for (let image of files) {
|
|
440
|
+
const binaryInput = BinaryInput.from(image);
|
|
441
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
|
|
442
|
+
|
|
443
|
+
_files.push(binaryInput);
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
await Promise.all(promises);
|
|
447
|
+
// #endregion Upload files
|
|
448
|
+
|
|
449
|
+
// If user provide mix of valid and invalid files, we will only process the valid files
|
|
450
|
+
const validFiles = this.getValidFiles(_files, 'all');
|
|
451
|
+
|
|
452
|
+
const hasVideo = validFiles.some((file) => file?.mimetype?.includes('video'));
|
|
453
|
+
|
|
454
|
+
// GoogleAI only supports one video file at a time
|
|
455
|
+
if (hasVideo && validFiles.length > 1) {
|
|
456
|
+
throw new Error('Only one video file is supported at a time.');
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
const fileUploadingTasks = validFiles.map((file) => async () => {
|
|
460
|
+
try {
|
|
461
|
+
const uploadedFile = await this.uploadFile({
|
|
462
|
+
file,
|
|
463
|
+
apiKey: (params.credentials as BasicCredentials).apiKey,
|
|
464
|
+
agentId: params.agentId,
|
|
465
|
+
});
|
|
466
|
+
|
|
467
|
+
return { url: uploadedFile.url, mimetype: file.mimetype };
|
|
468
|
+
} catch {
|
|
469
|
+
return null;
|
|
470
|
+
}
|
|
471
|
+
});
|
|
472
|
+
|
|
473
|
+
const uploadedFiles = await processWithConcurrencyLimit(fileUploadingTasks);
|
|
474
|
+
|
|
475
|
+
// We throw error when there are no valid uploaded files,
|
|
476
|
+
if (uploadedFiles && uploadedFiles?.length === 0) {
|
|
477
|
+
throw new Error(`There is an issue during upload file in Google AI Server!`);
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
const fileData = this.getFileData(uploadedFiles);
|
|
481
|
+
|
|
482
|
+
const userMessage: TLLMMessageBlock = Array.isArray(messages) ? messages.pop() : { role: TLLMMessageRole.User, content: '' };
|
|
483
|
+
let prompt = userMessage?.content || '';
|
|
484
|
+
|
|
485
|
+
// if the the model does not support system instruction, we will add it to the prompt
|
|
486
|
+
if (!MODELS_SUPPORT_SYSTEM_INSTRUCTION.includes(model as string)) {
|
|
487
|
+
prompt = `${prompt}\n${systemInstruction}`;
|
|
488
|
+
}
|
|
489
|
+
//#endregion Separate system message and add JSON response instruction if needed
|
|
490
|
+
|
|
491
|
+
// Adjust input structure handling for multiple image files to accommodate variations.
|
|
492
|
+
messages = fileData.length === 1 ? ([...fileData, { text: prompt }] as any) : ([prompt, ...fileData] as any);
|
|
493
|
+
|
|
494
|
+
return messages as string;
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
private async prepareMessagesWithTools(params: TLLMParams): Promise<GenerateContentRequest> {
|
|
498
|
+
let formattedMessages: TLLMMessageBlock[];
|
|
499
|
+
let systemInstruction = '';
|
|
500
|
+
|
|
501
|
+
let messages = params?.messages || [];
|
|
502
|
+
|
|
503
|
+
const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
|
|
504
|
+
|
|
505
|
+
if (hasSystemMessage) {
|
|
506
|
+
const separateMessages = LLMHelper.separateSystemMessages(messages);
|
|
507
|
+
const systemMessageContent = (separateMessages.systemMessage as TLLMMessageBlock)?.content;
|
|
508
|
+
systemInstruction = typeof systemMessageContent === 'string' ? systemMessageContent : '';
|
|
509
|
+
formattedMessages = separateMessages.otherMessages;
|
|
510
|
+
} else {
|
|
511
|
+
formattedMessages = messages;
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
const toolsPrompt: GenerateContentRequest = {
|
|
515
|
+
contents: formattedMessages as any,
|
|
516
|
+
};
|
|
517
|
+
|
|
518
|
+
if (systemInstruction) {
|
|
519
|
+
toolsPrompt.systemInstruction = systemInstruction;
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
if (params?.toolsConfig?.tools) toolsPrompt.tools = params?.toolsConfig?.tools as any;
|
|
523
|
+
if (params?.toolsConfig?.tool_choice) {
|
|
524
|
+
toolsPrompt.toolConfig = {
|
|
525
|
+
functionCallingConfig: { mode: (params?.toolsConfig?.tool_choice as any) || 'auto' },
|
|
526
|
+
};
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
return toolsPrompt;
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
private async prepareMessagesWithTextQuery(params: TLLMParams): Promise<string> {
|
|
533
|
+
const model = params.model;
|
|
534
|
+
let systemInstruction = '';
|
|
535
|
+
let prompt = '';
|
|
536
|
+
|
|
537
|
+
const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(params?.messages as TLLMMessageBlock[]);
|
|
538
|
+
|
|
539
|
+
if ('content' in systemMessage) {
|
|
540
|
+
systemInstruction = systemMessage.content as string;
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
const responseFormat = params?.responseFormat || '';
|
|
544
|
+
let responseMimeType = '';
|
|
545
|
+
|
|
546
|
+
if (responseFormat === 'json') {
|
|
547
|
+
systemInstruction += JSON_RESPONSE_INSTRUCTION;
|
|
548
|
+
|
|
549
|
+
if (MODELS_SUPPORT_JSON_RESPONSE.includes(model as string)) {
|
|
550
|
+
responseMimeType = 'application/json';
|
|
551
|
+
}
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
if (otherMessages?.length > 0) {
|
|
555
|
+
// Concatenate messages with prompt and remove messages from params as it's not supported
|
|
556
|
+
prompt += otherMessages.map((message) => message?.parts?.[0]?.text || '').join('\n');
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
// if the the model does not support system instruction, we will add it to the prompt
|
|
560
|
+
if (!MODELS_SUPPORT_SYSTEM_INSTRUCTION.includes(model as string)) {
|
|
561
|
+
prompt = `${prompt}\n${systemInstruction}`;
|
|
562
|
+
}
|
|
563
|
+
//#endregion Separate system message and add JSON response instruction if needed
|
|
564
|
+
|
|
565
|
+
return prompt;
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
// Add this helper method to sanitize function names
|
|
569
|
+
private sanitizeFunctionName(name: string): string {
|
|
570
|
+
// Check if name is undefined or null
|
|
571
|
+
if (name == null) {
|
|
572
|
+
return '_unnamed_function';
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
// Remove any characters that are not alphanumeric, underscore, dot, or dash
|
|
576
|
+
let sanitized = name.replace(/[^a-zA-Z0-9_.-]/g, '');
|
|
577
|
+
|
|
578
|
+
// Ensure the name starts with a letter or underscore
|
|
579
|
+
if (!/^[a-zA-Z_]/.test(sanitized)) {
|
|
580
|
+
sanitized = '_' + sanitized;
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
// If sanitized is empty after removing invalid characters, use a default name
|
|
584
|
+
if (sanitized === '') {
|
|
585
|
+
sanitized = '_unnamed_function';
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
// Truncate to 64 characters if longer
|
|
589
|
+
sanitized = sanitized.slice(0, 64);
|
|
590
|
+
|
|
591
|
+
return sanitized;
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
private async uploadFile({ file, apiKey, agentId }: { file: BinaryInput; apiKey: string; agentId: string }): Promise<{ url: string }> {
|
|
595
|
+
try {
|
|
596
|
+
if (!apiKey || !file?.mimetype) {
|
|
597
|
+
throw new Error('Missing required parameters to save file for Google AI!');
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
// Create a temporary directory
|
|
601
|
+
const tempDir = os.tmpdir();
|
|
602
|
+
const fileName = uid();
|
|
603
|
+
const tempFilePath = path.join(tempDir, fileName);
|
|
604
|
+
|
|
605
|
+
const bufferData = await file.readData(AccessCandidate.agent(agentId));
|
|
606
|
+
|
|
607
|
+
// Write buffer data to temp file
|
|
608
|
+
await fs.promises.writeFile(tempFilePath, new Uint8Array(bufferData));
|
|
609
|
+
|
|
610
|
+
// Upload the file to the Google File Manager
|
|
611
|
+
const fileManager = new GoogleAIFileManager(apiKey);
|
|
612
|
+
|
|
613
|
+
const uploadResponse = await fileManager.uploadFile(tempFilePath, {
|
|
614
|
+
mimeType: file.mimetype,
|
|
615
|
+
displayName: fileName,
|
|
616
|
+
});
|
|
617
|
+
|
|
618
|
+
const name = uploadResponse.file.name;
|
|
619
|
+
|
|
620
|
+
// Poll getFile() on a set interval (10 seconds here) to check file state.
|
|
621
|
+
let uploadedFile = await fileManager.getFile(name);
|
|
622
|
+
while (uploadedFile.state === FileState.PROCESSING) {
|
|
623
|
+
process.stdout.write('.');
|
|
624
|
+
// Sleep for 10 seconds
|
|
625
|
+
await new Promise((resolve) => setTimeout(resolve, 10_000));
|
|
626
|
+
// Fetch the file from the API again
|
|
627
|
+
uploadedFile = await fileManager.getFile(name);
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
if (uploadedFile.state === FileState.FAILED) {
|
|
631
|
+
throw new Error('File processing failed.');
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
// Clean up temp file
|
|
635
|
+
await fs.promises.unlink(tempFilePath);
|
|
636
|
+
|
|
637
|
+
return {
|
|
638
|
+
url: uploadResponse.file.uri || '',
|
|
639
|
+
};
|
|
640
|
+
} catch (error) {
|
|
641
|
+
throw new Error(`Error uploading file for Google AI: ${error.message}`);
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
private getValidFiles(files: BinaryInput[], type: 'image' | 'all') {
|
|
646
|
+
const validSources = [];
|
|
647
|
+
|
|
648
|
+
for (let file of files) {
|
|
649
|
+
if (this.validMimeTypes[type].includes(file?.mimetype)) {
|
|
650
|
+
validSources.push(file);
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
if (validSources?.length === 0) {
|
|
655
|
+
throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validMimeTypes[type].join(', ')}`);
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
return validSources;
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
private getFileData(
|
|
662
|
+
files: {
|
|
663
|
+
url: string;
|
|
664
|
+
mimetype: string;
|
|
665
|
+
}[]
|
|
666
|
+
): {
|
|
667
|
+
fileData: {
|
|
668
|
+
mimeType: string;
|
|
669
|
+
fileUri: string;
|
|
670
|
+
};
|
|
671
|
+
}[] {
|
|
672
|
+
try {
|
|
673
|
+
const imageData = [];
|
|
674
|
+
|
|
675
|
+
for (let file of files) {
|
|
676
|
+
imageData.push({
|
|
677
|
+
fileData: {
|
|
678
|
+
mimeType: file.mimetype,
|
|
679
|
+
fileUri: file.url,
|
|
680
|
+
},
|
|
681
|
+
});
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
return imageData;
|
|
685
|
+
} catch (error) {
|
|
686
|
+
throw error;
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
}
|