@smythos/sre 1.5.53 → 1.5.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. package/CHANGELOG +98 -98
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +3 -3
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
  9. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  10. package/package.json +1 -1
  11. package/src/Components/APICall/APICall.class.ts +157 -157
  12. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  13. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  14. package/src/Components/APICall/OAuth.helper.ts +447 -447
  15. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  16. package/src/Components/APICall/parseData.ts +167 -167
  17. package/src/Components/APICall/parseHeaders.ts +41 -41
  18. package/src/Components/APICall/parseProxy.ts +68 -68
  19. package/src/Components/APICall/parseUrl.ts +91 -91
  20. package/src/Components/APIEndpoint.class.ts +234 -234
  21. package/src/Components/APIOutput.class.ts +58 -58
  22. package/src/Components/AgentPlugin.class.ts +102 -102
  23. package/src/Components/Async.class.ts +155 -155
  24. package/src/Components/Await.class.ts +90 -90
  25. package/src/Components/Classifier.class.ts +158 -158
  26. package/src/Components/Component.class.ts +132 -132
  27. package/src/Components/ComponentHost.class.ts +38 -38
  28. package/src/Components/DataSourceCleaner.class.ts +92 -92
  29. package/src/Components/DataSourceIndexer.class.ts +181 -181
  30. package/src/Components/DataSourceLookup.class.ts +161 -161
  31. package/src/Components/ECMASandbox.class.ts +71 -71
  32. package/src/Components/FEncDec.class.ts +29 -29
  33. package/src/Components/FHash.class.ts +33 -33
  34. package/src/Components/FSign.class.ts +80 -80
  35. package/src/Components/FSleep.class.ts +25 -25
  36. package/src/Components/FTimestamp.class.ts +25 -25
  37. package/src/Components/FileStore.class.ts +78 -78
  38. package/src/Components/ForEach.class.ts +97 -97
  39. package/src/Components/GPTPlugin.class.ts +70 -70
  40. package/src/Components/GenAILLM.class.ts +586 -586
  41. package/src/Components/HuggingFace.class.ts +314 -314
  42. package/src/Components/Image/imageSettings.config.ts +70 -70
  43. package/src/Components/ImageGenerator.class.ts +502 -502
  44. package/src/Components/JSONFilter.class.ts +54 -54
  45. package/src/Components/LLMAssistant.class.ts +213 -213
  46. package/src/Components/LogicAND.class.ts +28 -28
  47. package/src/Components/LogicAtLeast.class.ts +85 -85
  48. package/src/Components/LogicAtMost.class.ts +86 -86
  49. package/src/Components/LogicOR.class.ts +29 -29
  50. package/src/Components/LogicXOR.class.ts +34 -34
  51. package/src/Components/MCPClient.class.ts +138 -138
  52. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  53. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  54. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  55. package/src/Components/MemoryWriteObject.class.ts +97 -97
  56. package/src/Components/MultimodalLLM.class.ts +128 -128
  57. package/src/Components/OpenAPI.class.ts +72 -72
  58. package/src/Components/PromptGenerator.class.ts +122 -122
  59. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  60. package/src/Components/ServerlessCode.class.ts +123 -123
  61. package/src/Components/TavilyWebSearch.class.ts +98 -98
  62. package/src/Components/VisionLLM.class.ts +104 -104
  63. package/src/Components/ZapierAction.class.ts +127 -127
  64. package/src/Components/index.ts +97 -97
  65. package/src/Core/AgentProcess.helper.ts +240 -240
  66. package/src/Core/Connector.class.ts +123 -123
  67. package/src/Core/ConnectorsService.ts +197 -197
  68. package/src/Core/DummyConnector.ts +49 -49
  69. package/src/Core/HookService.ts +105 -105
  70. package/src/Core/SmythRuntime.class.ts +235 -235
  71. package/src/Core/SystemEvents.ts +16 -16
  72. package/src/Core/boot.ts +56 -56
  73. package/src/config.ts +15 -15
  74. package/src/constants.ts +126 -126
  75. package/src/data/hugging-face.params.json +579 -579
  76. package/src/helpers/AWSLambdaCode.helper.ts +590 -590
  77. package/src/helpers/BinaryInput.helper.ts +331 -331
  78. package/src/helpers/Conversation.helper.ts +1119 -1119
  79. package/src/helpers/ECMASandbox.helper.ts +54 -54
  80. package/src/helpers/JsonContent.helper.ts +97 -97
  81. package/src/helpers/LocalCache.helper.ts +97 -97
  82. package/src/helpers/Log.helper.ts +274 -274
  83. package/src/helpers/OpenApiParser.helper.ts +150 -150
  84. package/src/helpers/S3Cache.helper.ts +147 -147
  85. package/src/helpers/SmythURI.helper.ts +5 -5
  86. package/src/helpers/Sysconfig.helper.ts +77 -77
  87. package/src/helpers/TemplateString.helper.ts +243 -243
  88. package/src/helpers/TypeChecker.helper.ts +329 -329
  89. package/src/index.ts +3 -3
  90. package/src/index.ts.bak +3 -3
  91. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  92. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  93. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  94. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  95. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  97. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  98. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -297
  99. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  100. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  101. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  102. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  103. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  104. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  105. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  106. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  107. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  108. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  109. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  110. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  111. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  112. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  113. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  114. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  115. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  116. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  117. package/src/subsystems/IO/Log.service/index.ts +13 -13
  118. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  119. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  120. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  121. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  122. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  123. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  124. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  125. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  126. package/src/subsystems/IO/Router.service/index.ts +11 -11
  127. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  128. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  129. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  130. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  131. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  132. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  133. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  134. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  135. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  136. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  137. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  138. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  139. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  140. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  141. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  142. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  143. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  144. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  145. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  147. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  149. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  150. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  151. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  152. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  160. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  161. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  162. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  163. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  164. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  165. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  166. package/src/subsystems/LLMManager/models.ts +2540 -2540
  167. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  168. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  169. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  170. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  173. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  174. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  175. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  176. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  177. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  178. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  179. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  180. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  181. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  182. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  183. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  184. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  185. package/src/subsystems/Security/Account.service/index.ts +14 -14
  186. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  187. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  188. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  189. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  190. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  191. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  192. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  193. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  194. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  195. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  196. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  197. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  198. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  199. package/src/types/ACL.types.ts +104 -104
  200. package/src/types/AWS.types.ts +10 -10
  201. package/src/types/Agent.types.ts +61 -61
  202. package/src/types/AgentLogger.types.ts +17 -17
  203. package/src/types/Cache.types.ts +1 -1
  204. package/src/types/Common.types.ts +2 -2
  205. package/src/types/LLM.types.ts +496 -496
  206. package/src/types/Redis.types.ts +8 -8
  207. package/src/types/SRE.types.ts +64 -64
  208. package/src/types/Security.types.ts +14 -14
  209. package/src/types/Storage.types.ts +5 -5
  210. package/src/types/VectorDB.types.ts +86 -86
  211. package/src/utils/base64.utils.ts +275 -275
  212. package/src/utils/cli.utils.ts +68 -68
  213. package/src/utils/data.utils.ts +322 -322
  214. package/src/utils/date-time.utils.ts +22 -22
  215. package/src/utils/general.utils.ts +238 -238
  216. package/src/utils/index.ts +12 -12
  217. package/src/utils/lazy-client.ts +261 -261
  218. package/src/utils/numbers.utils.ts +13 -13
  219. package/src/utils/oauth.utils.ts +35 -35
  220. package/src/utils/string.utils.ts +414 -414
  221. package/src/utils/url.utils.ts +19 -19
  222. package/src/utils/validation.utils.ts +74 -74
  223. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,502 +1,502 @@
1
- //TODO: this component need to be fully refactored to use the same approach as GenAI LLM
2
-
3
- import { IRequestImage, Runware } from '@runware/sdk-js';
4
- import { OpenAI } from 'openai';
5
-
6
- import { TemplateString } from '@sre/helpers/TemplateString.helper';
7
- import { LLMInference } from '@sre/LLMManager/LLM.inference';
8
- import { IAgent } from '@sre/types/Agent.types';
9
- import { APIKeySource, GenerateImageConfig } from '@sre/types/LLM.types';
10
- import Joi from 'joi';
11
- import { Component } from './Component.class';
12
-
13
- import { SystemEvents } from '@sre/Core/SystemEvents';
14
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
15
-
16
- import { BUILT_IN_MODEL_PREFIX, SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
17
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
18
- import { normalizeImageInput } from '@sre/utils/data.utils';
19
- import { ImageSettingsConfig } from './Image/imageSettings.config';
20
- import { getCredentials } from '../subsystems/Security/Credentials.helper';
21
-
22
- enum DALL_E_MODELS {
23
- DALL_E_2 = 'dall-e-2',
24
- DALL_E_3 = 'dall-e-3',
25
- }
26
-
27
- const IMAGE_GEN_COST_MAP = {
28
- [DALL_E_MODELS.DALL_E_3]: {
29
- standard: {
30
- '1024x1024': 0.04,
31
- '1024x1792': 0.08,
32
- '1792x1024': 0.08,
33
- },
34
- hd: {
35
- '1024x1024': 0.08,
36
- '1024x1792': 0.12,
37
- '1792x1024': 0.12,
38
- },
39
- },
40
- [DALL_E_MODELS.DALL_E_2]: {
41
- '256x256': 0.016,
42
- '512x512': 0.018,
43
- '1024x1024': 0.02,
44
- },
45
- };
46
-
47
- // Imagen 4 cost map - fixed cost per image
48
- const IMAGEN_4_COST_MAP = {
49
- 'imagen-4': 0.04, // Standard Imagen 4
50
- 'imagen-4-ultra': 0.06, // Imagen 4 Ultra
51
- };
52
-
53
- export class ImageGenerator extends Component {
54
- protected configSchema = Joi.object({
55
- model: Joi.string().max(100).required(),
56
- prompt: Joi.string().optional().min(2).max(2000).label('Prompt'),
57
-
58
- // #region OpenAI (DALL·E)
59
- sizeDalle2: Joi.string().valid('256x256', '512x512', '1024x1024').optional(),
60
- sizeDalle3: Joi.string().valid('1024x1024', '1792x1024', '1024x1792').optional(),
61
- quality: Joi.string().valid('standard', 'hd', 'auto', 'high', 'medium', 'low').allow('').optional(),
62
- style: Joi.string().valid('vivid', 'natural').optional(),
63
- isRawInputPrompt: Joi.boolean().strict().optional(),
64
- // #endregion
65
-
66
- // #region Runware
67
- negativePrompt: Joi.string().optional().allow('').min(2).max(2000).label('Negative Prompt'),
68
- width: Joi.number().min(128).max(2048).multiple(64).optional().messages({
69
- 'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
70
- }),
71
- height: Joi.number().min(128).max(2048).multiple(64).optional().messages({
72
- 'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
73
- }),
74
- outputFormat: Joi.string().valid('PNG', 'JPEG', 'WEBP', 'auto', 'jpeg', 'png', 'webp').optional(),
75
- strength: ImageSettingsConfig.strength,
76
- // #endregion
77
-
78
- // #region GPT model
79
- size: Joi.string().optional().allow('').max(100).label('Size'),
80
- // #endregion
81
-
82
- // #region Google AI model
83
- aspectRatio: Joi.string().valid('1:1', '3:4', '4:3', '9:16', '16:9').optional().allow('').label('Aspect Ratio'),
84
- personGeneration: Joi.string().valid('dont_allow', 'allow_adult', 'allow_all').optional().allow('').label('Person Generation'),
85
- // #endregion
86
- });
87
- constructor() {
88
- super();
89
- }
90
- init() {}
91
- async process(input, config, agent: IAgent) {
92
- await super.process(input, config, agent);
93
-
94
- const logger = this.createComponentLogger(agent, config);
95
-
96
- logger.debug(`=== Image Generator Log ===`);
97
-
98
- let model = config?.data?.model;
99
-
100
- if (!model) {
101
- return { _error: 'Model Not Found: ', _debug: logger.output };
102
- }
103
-
104
- logger.debug(`Model: ${model}`);
105
-
106
- let prompt = config.data?.prompt || input?.Prompt;
107
- prompt = typeof prompt === 'string' ? prompt : JSON.stringify(prompt);
108
- prompt = TemplateString(prompt).parse(input).result;
109
-
110
- if (!prompt) {
111
- return { _error: 'Please provide a prompt or Image', _debug: logger.output };
112
- }
113
-
114
- logger.debug(`Prompt: \n`, prompt);
115
-
116
- const modelFamily = await getModelFamily(model, agent);
117
-
118
- if (typeof imageGenerator[modelFamily] !== 'function') {
119
- return { _error: `The model '${model}' is not available. Please try a different one.`, _debug: logger.output };
120
- }
121
-
122
- try {
123
- const { output } = await imageGenerator[modelFamily]({ model, config, input, logger, agent, prompt });
124
-
125
- logger.debug(`Output: `, output);
126
-
127
- return { Output: output, _debug: logger.output };
128
- } catch (error: any) {
129
- return { _error: `Generating Image(s)\n${error?.message || JSON.stringify(error)}`, _debug: logger.output };
130
- }
131
- }
132
- }
133
-
134
- // TODO: Create a separate service for image generation, similar to LLM.service.
135
-
136
- // TODO: Hopefully we will have the proper type with new OpenAI SDK, then we can use their type
137
- type TokenUsage = OpenAI.Completions.CompletionUsage & {
138
- prompt_tokens_details?: { cached_tokens?: number };
139
- input_tokens_details: { image_tokens?: number; text_tokens?: number };
140
- output_tokens: number;
141
- };
142
-
143
- enum MODEL_FAMILY {
144
- GPT = 'gpt',
145
- RUNWARE = 'runware',
146
- DALL_E = 'dall-e',
147
- IMAGEN = 'imagen',
148
- }
149
-
150
- const imageGenerator = {
151
- [MODEL_FAMILY.GPT]: async ({ model, prompt, config, logger, agent, input }) => {
152
- let args: GenerateImageConfig & { files?: BinaryInput[] } = {
153
- model,
154
- size: config?.data?.size || 'auto',
155
- quality: config?.data?.quality || 'auto',
156
- };
157
-
158
- try {
159
- const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
160
-
161
- // if the llm is undefined, then it means we removed the model from our system
162
- if (!llmInference.connector) {
163
- return {
164
- _error: `The model '${model}' is not available. Please try a different one.`,
165
- _debug: logger.output,
166
- };
167
- }
168
-
169
- const provider = await agent.modelsProvider.getProvider(model);
170
-
171
- const files: any[] = parseFiles(input, config);
172
- const validFiles = files.filter((file) => imageGenerator.isValidImageFile(provider, file.mimetype));
173
-
174
- if (files.length > 0 && validFiles.length === 0) {
175
- throw new Error('Supported image file types are: ' + SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.join(', '));
176
- }
177
-
178
- let response;
179
-
180
- if (validFiles.length > 0) {
181
- response = await llmInference.imageEditRequest({ query: prompt, files: validFiles, params: { ...args, agentId: agent.id } });
182
- } else {
183
- response = await llmInference.imageGenRequest({ query: prompt, params: { ...args, agentId: agent.id } });
184
- }
185
-
186
- if (response?.usage) {
187
- imageGenerator.reportTokenUsage(response.usage, {
188
- modelEntryName: model,
189
- keySource: model.startsWith(BUILT_IN_MODEL_PREFIX) ? APIKeySource.Smyth : APIKeySource.User,
190
- agentId: agent.id,
191
- teamId: agent.teamId,
192
- });
193
- }
194
-
195
- let output = response?.data?.[0]?.b64_json;
196
-
197
- const binaryInput = BinaryInput.from(output);
198
- const agentId = typeof agent == 'object' && agent.id ? agent.id : agent;
199
- const smythFile = await binaryInput.getJsonData(AccessCandidate.agent(agentId));
200
-
201
- return { output: smythFile };
202
- } catch (error: any) {
203
- throw new Error(`OpenAI Image Generation Error: ${error?.message || JSON.stringify(error)}`);
204
- }
205
- },
206
- [MODEL_FAMILY.DALL_E]: async ({ model, prompt, config, logger, agent, input }) => {
207
- let _finalPrompt = prompt;
208
-
209
- const files: any[] = parseFiles(input, config);
210
-
211
- if (files.length > 0) {
212
- throw new Error('OpenAI Image Generation Error: DALL-E models do not support image editing or variations. Please use a different model.');
213
- }
214
-
215
- const responseFormat = config?.data?.responseFormat || 'url';
216
-
217
- let args: GenerateImageConfig & { responseFormat: 'url' | 'b64_json' } = {
218
- responseFormat,
219
- model,
220
- };
221
-
222
- let cost = 0;
223
-
224
- if (model === DALL_E_MODELS.DALL_E_3) {
225
- const size = config?.data?.sizeDalle3 || '1024x1024';
226
- const quality = config?.data?.quality || 'standard';
227
- const style = config?.data?.style || 'vivid';
228
- args.size = size;
229
- args.quality = quality;
230
- args.style = style;
231
-
232
- const isRawInputPrompt = config?.data?.isRawInputPrompt || false;
233
-
234
- if (isRawInputPrompt) {
235
- _finalPrompt = `I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS: ${prompt}`;
236
- }
237
-
238
- cost = IMAGE_GEN_COST_MAP[model][quality][size];
239
- } else if (model === DALL_E_MODELS.DALL_E_2) {
240
- const size = config?.data?.sizeDalle2 || '256x256';
241
- const numberOfImages = parseInt(config?.data?.numberOfImages) || 1;
242
- args.size = size;
243
- args.n = numberOfImages;
244
-
245
- cost = IMAGE_GEN_COST_MAP[model][size];
246
- }
247
-
248
- const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
249
-
250
- // if the llm is undefined, then it means we removed the model from our system
251
- if (!llmInference.connector) {
252
- return {
253
- _error: `The model '${model}' is not available. Please try a different one.`,
254
- _debug: logger.output,
255
- };
256
- }
257
-
258
- const response: any = await llmInference.imageGenRequest({ query: _finalPrompt, params: { ...args, agentId: agent.id } });
259
-
260
- let output = response?.data?.[0]?.[responseFormat];
261
- const revised_prompt = response?.data?.[0]?.revised_prompt;
262
-
263
- if (revised_prompt && prompt !== revised_prompt) {
264
- logger.debug(`Revised Prompt:\n${revised_prompt}`);
265
- }
266
-
267
- imageGenerator.reportUsage({ cost }, { modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId });
268
-
269
- return { output };
270
- },
271
- [MODEL_FAMILY.RUNWARE]: async ({ model, prompt, config, agent, input }) => {
272
- // Initialize Runware client
273
- const teamId = agent.teamId;
274
- const apiKey = (await getCredentials(AccessCandidate.team(teamId), 'runware')) as string;
275
-
276
- if (!apiKey) {
277
- throw new Error('Runware API key is missing. Please provide a valid key to continue.');
278
- }
279
-
280
- const runware = new Runware({ apiKey });
281
- await runware.ensureConnection();
282
-
283
- const negativePrompt = config?.data?.negativePrompt || '';
284
-
285
- const files: any[] = parseFiles(input, config);
286
- let seedImage = Array.isArray(files) ? files[0] : files;
287
- seedImage = await normalizeImageInput(seedImage);
288
-
289
- const modelId = await agent.modelsProvider.getModelId(model);
290
- const imageRequestArgs: IRequestImage = {
291
- model: modelId,
292
- positivePrompt: prompt,
293
- width: +config?.data?.width || 1024,
294
- height: +config?.data?.height || 1024,
295
- numberResults: 1, // For Image Generation we only need 1 image
296
- outputType: 'URL', // For Image Generation we only need the URL
297
- outputFormat: config?.data?.outputFormat || 'JPEG',
298
- includeCost: true,
299
- };
300
-
301
- if (seedImage) {
302
- imageRequestArgs.seedImage = seedImage;
303
- imageRequestArgs.strength = +config?.data?.strength || 0.5;
304
- }
305
-
306
- // If a negative prompt is provided, add it to the request args
307
- if (negativePrompt) {
308
- imageRequestArgs.negativePrompt = negativePrompt;
309
- }
310
-
311
- try {
312
- const response = await runware.requestImages(imageRequestArgs);
313
-
314
- // Get first image from response array
315
- const firstImage = response[0];
316
-
317
- // Map response to match expected format
318
- let output = firstImage.imageURL;
319
-
320
- imageGenerator.reportUsage(
321
- { cost: firstImage.cost },
322
- { modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId }
323
- );
324
-
325
- return { output };
326
- } catch (error: any) {
327
- throw new Error(`Runware Image Generation Error: ${error?.message || JSON.stringify(error)}`);
328
- } finally {
329
- // Clean up connection
330
- await runware.disconnect();
331
- }
332
- },
333
- [MODEL_FAMILY.IMAGEN]: async ({ model, prompt, config, logger, agent, input }) => {
334
- try {
335
- const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
336
-
337
- // if the llm is undefined, then it means we removed the model from our system
338
- if (!llmInference.connector) {
339
- return {
340
- _error: `The model '${model}' is not available. Please try a different one.`,
341
- _debug: logger.output,
342
- };
343
- }
344
-
345
- const files: any[] = parseFiles(input, config);
346
-
347
- // Imagen models only support image generation, not image editing
348
- if (files.length > 0) {
349
- throw new Error('Google AI Image Generation Error: Image editing is not supported. Imagen models only support image generation.');
350
- }
351
-
352
- let args: GenerateImageConfig & {
353
- aspectRatio?: string;
354
- numberOfImages?: number;
355
- personGeneration?: string;
356
- } = {
357
- model,
358
- aspectRatio: config?.data?.aspectRatio || config?.data?.size || '1:1',
359
- numberOfImages: config?.data?.numberOfImages || 1,
360
- personGeneration: config?.data?.personGeneration || 'allow_adult',
361
- };
362
-
363
- const response = await llmInference.imageGenRequest({ query: prompt, params: { ...args, agentId: agent.id } });
364
-
365
- // Calculate fixed cost for Imagen 4
366
- const modelName = model.replace(BUILT_IN_MODEL_PREFIX, '');
367
- const cost = IMAGEN_4_COST_MAP[modelName];
368
-
369
- if (cost && cost > 0) {
370
- // Multiply by number of images generated
371
- const numberOfImages = args.numberOfImages || 1;
372
- const totalCost = cost * numberOfImages;
373
-
374
- // Report fixed cost usage
375
- imageGenerator.reportUsage(
376
- { cost: totalCost },
377
- {
378
- modelEntryName: model,
379
- keySource: model.startsWith(BUILT_IN_MODEL_PREFIX) ? APIKeySource.Smyth : APIKeySource.User,
380
- agentId: agent.id,
381
- teamId: agent.teamId,
382
- }
383
- );
384
- }
385
-
386
- let output = response?.data?.[0]?.b64_json;
387
-
388
- if (output) {
389
- const binaryInput = BinaryInput.from(output);
390
- const agentId = typeof agent == 'object' && agent.id ? agent.id : agent;
391
- const smythFile = await binaryInput.getJsonData(AccessCandidate.agent(agentId));
392
- return { output: smythFile };
393
- } else {
394
- // Handle URL response format
395
- output = response?.data?.[0]?.url;
396
- return { output };
397
- }
398
- } catch (error: any) {
399
- throw new Error(`Google AI Image Generation Error: ${error?.message || JSON.stringify(error)}`);
400
- }
401
- },
402
- reportTokenUsage(usage: TokenUsage, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
403
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
404
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
405
-
406
- const usageData = {
407
- sourceId: `api:imagegen.${modelName}`,
408
- keySource: metadata.keySource,
409
-
410
- input_tokens_txt: usage?.input_tokens_details?.text_tokens || 0,
411
- input_tokens_img: usage?.input_tokens_details?.image_tokens || 0,
412
- output_tokens: usage?.output_tokens,
413
- input_tokens_cache_read: usage?.prompt_tokens_details?.cached_tokens || 0,
414
-
415
- agentId: metadata.agentId,
416
- teamId: metadata.teamId,
417
- };
418
- SystemEvents.emit('USAGE:API', usageData);
419
-
420
- return usageData;
421
- },
422
- reportUsage(usage: { cost: number }, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
423
- const usageData = {
424
- sourceId: `api:imagegen.smyth`,
425
- keySource: metadata.keySource,
426
-
427
- cost: usage?.cost,
428
-
429
- agentId: metadata.agentId,
430
- teamId: metadata.teamId,
431
- };
432
- SystemEvents.emit('USAGE:API', usageData);
433
-
434
- return usageData;
435
- },
436
- isValidImageFile(provider: string, mimetype: string) {
437
- return SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.includes(mimetype);
438
- },
439
- };
440
-
441
- enum PROVIDERS {
442
- OPENAI = 'OpenAI',
443
- RUNWARE = 'Runware',
444
- GOOGLEAI = 'GoogleAI',
445
- }
446
-
447
- /**
448
- * Gets the model family from a model identifier
449
- * @param model The model identifier
450
- * @returns The model family or null if not recognized
451
- */
452
- async function getModelFamily(model: string, agent: IAgent): Promise<string | null> {
453
- if (await isGPTModel(model)) return MODEL_FAMILY.GPT;
454
- if (await isRunwareModel(model, agent)) return MODEL_FAMILY.RUNWARE;
455
- if (await isDallEModel(model)) return MODEL_FAMILY.DALL_E;
456
- if (await isGoogleAIModel(model, agent)) return MODEL_FAMILY.IMAGEN;
457
-
458
- return null;
459
- }
460
-
461
- function isGPTModel(model: string) {
462
- return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.GPT);
463
- }
464
-
465
- async function isRunwareModel(model: string, agent: IAgent): Promise<boolean> {
466
- const provider = await agent.modelsProvider.getProvider(model);
467
- return provider === PROVIDERS.RUNWARE || provider?.toLowerCase() === PROVIDERS.RUNWARE.toLowerCase();
468
- }
469
-
470
- function isDallEModel(model: string) {
471
- return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.DALL_E);
472
- }
473
-
474
- async function isGoogleAIModel(model: string, agent: IAgent): Promise<boolean> {
475
- const provider = await agent.modelsProvider.getProvider(model);
476
- return (
477
- provider === PROVIDERS.GOOGLEAI ||
478
- provider?.toLowerCase() === PROVIDERS.GOOGLEAI.toLowerCase() ||
479
- model?.replace(BUILT_IN_MODEL_PREFIX, '')?.includes('imagen')
480
- );
481
- }
482
-
483
- function parseFiles(input: any, config: any) {
484
- const mediaTypes = ['Image', 'Audio', 'Video', 'Binary'];
485
-
486
- // Parse media inputs from config
487
- const inputFiles =
488
- config.inputs
489
- ?.filter((_input) => mediaTypes.includes(_input.type))
490
- ?.flatMap((_input) => {
491
- const value = input[_input.name];
492
-
493
- if (Array.isArray(value)) {
494
- return value.map((item) => TemplateString(item).parseRaw(input).result);
495
- } else {
496
- return TemplateString(value).parseRaw(input).result;
497
- }
498
- })
499
- ?.filter((file) => file) || [];
500
-
501
- return inputFiles;
502
- }
1
+ //TODO: this component need to be fully refactored to use the same approach as GenAI LLM
2
+
3
+ import { IRequestImage, Runware } from '@runware/sdk-js';
4
+ import { OpenAI } from 'openai';
5
+
6
+ import { TemplateString } from '@sre/helpers/TemplateString.helper';
7
+ import { LLMInference } from '@sre/LLMManager/LLM.inference';
8
+ import { IAgent } from '@sre/types/Agent.types';
9
+ import { APIKeySource, GenerateImageConfig } from '@sre/types/LLM.types';
10
+ import Joi from 'joi';
11
+ import { Component } from './Component.class';
12
+
13
+ import { SystemEvents } from '@sre/Core/SystemEvents';
14
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
15
+
16
+ import { BUILT_IN_MODEL_PREFIX, SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
17
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
18
+ import { normalizeImageInput } from '@sre/utils/data.utils';
19
+ import { ImageSettingsConfig } from './Image/imageSettings.config';
20
+ import { getCredentials } from '../subsystems/Security/Credentials.helper';
21
+
22
+ enum DALL_E_MODELS {
23
+ DALL_E_2 = 'dall-e-2',
24
+ DALL_E_3 = 'dall-e-3',
25
+ }
26
+
27
+ const IMAGE_GEN_COST_MAP = {
28
+ [DALL_E_MODELS.DALL_E_3]: {
29
+ standard: {
30
+ '1024x1024': 0.04,
31
+ '1024x1792': 0.08,
32
+ '1792x1024': 0.08,
33
+ },
34
+ hd: {
35
+ '1024x1024': 0.08,
36
+ '1024x1792': 0.12,
37
+ '1792x1024': 0.12,
38
+ },
39
+ },
40
+ [DALL_E_MODELS.DALL_E_2]: {
41
+ '256x256': 0.016,
42
+ '512x512': 0.018,
43
+ '1024x1024': 0.02,
44
+ },
45
+ };
46
+
47
+ // Imagen 4 cost map - fixed cost per image
48
+ const IMAGEN_4_COST_MAP = {
49
+ 'imagen-4': 0.04, // Standard Imagen 4
50
+ 'imagen-4-ultra': 0.06, // Imagen 4 Ultra
51
+ };
52
+
53
+ export class ImageGenerator extends Component {
54
+ protected configSchema = Joi.object({
55
+ model: Joi.string().max(100).required(),
56
+ prompt: Joi.string().optional().min(2).max(2000).label('Prompt'),
57
+
58
+ // #region OpenAI (DALL·E)
59
+ sizeDalle2: Joi.string().valid('256x256', '512x512', '1024x1024').optional(),
60
+ sizeDalle3: Joi.string().valid('1024x1024', '1792x1024', '1024x1792').optional(),
61
+ quality: Joi.string().valid('standard', 'hd', 'auto', 'high', 'medium', 'low').allow('').optional(),
62
+ style: Joi.string().valid('vivid', 'natural').optional(),
63
+ isRawInputPrompt: Joi.boolean().strict().optional(),
64
+ // #endregion
65
+
66
+ // #region Runware
67
+ negativePrompt: Joi.string().optional().allow('').min(2).max(2000).label('Negative Prompt'),
68
+ width: Joi.number().min(128).max(2048).multiple(64).optional().messages({
69
+ 'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
70
+ }),
71
+ height: Joi.number().min(128).max(2048).multiple(64).optional().messages({
72
+ 'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
73
+ }),
74
+ outputFormat: Joi.string().valid('PNG', 'JPEG', 'WEBP', 'auto', 'jpeg', 'png', 'webp').optional(),
75
+ strength: ImageSettingsConfig.strength,
76
+ // #endregion
77
+
78
+ // #region GPT model
79
+ size: Joi.string().optional().allow('').max(100).label('Size'),
80
+ // #endregion
81
+
82
+ // #region Google AI model
83
+ aspectRatio: Joi.string().valid('1:1', '3:4', '4:3', '9:16', '16:9').optional().allow('').label('Aspect Ratio'),
84
+ personGeneration: Joi.string().valid('dont_allow', 'allow_adult', 'allow_all').optional().allow('').label('Person Generation'),
85
+ // #endregion
86
+ });
87
+ constructor() {
88
+ super();
89
+ }
90
+ init() {}
91
+ async process(input, config, agent: IAgent) {
92
+ await super.process(input, config, agent);
93
+
94
+ const logger = this.createComponentLogger(agent, config);
95
+
96
+ logger.debug(`=== Image Generator Log ===`);
97
+
98
+ let model = config?.data?.model;
99
+
100
+ if (!model) {
101
+ return { _error: 'Model Not Found: ', _debug: logger.output };
102
+ }
103
+
104
+ logger.debug(`Model: ${model}`);
105
+
106
+ let prompt = config.data?.prompt || input?.Prompt;
107
+ prompt = typeof prompt === 'string' ? prompt : JSON.stringify(prompt);
108
+ prompt = TemplateString(prompt).parse(input).result;
109
+
110
+ if (!prompt) {
111
+ return { _error: 'Please provide a prompt or Image', _debug: logger.output };
112
+ }
113
+
114
+ logger.debug(`Prompt: \n`, prompt);
115
+
116
+ const modelFamily = await getModelFamily(model, agent);
117
+
118
+ if (typeof imageGenerator[modelFamily] !== 'function') {
119
+ return { _error: `The model '${model}' is not available. Please try a different one.`, _debug: logger.output };
120
+ }
121
+
122
+ try {
123
+ const { output } = await imageGenerator[modelFamily]({ model, config, input, logger, agent, prompt });
124
+
125
+ logger.debug(`Output: `, output);
126
+
127
+ return { Output: output, _debug: logger.output };
128
+ } catch (error: any) {
129
+ return { _error: `Generating Image(s)\n${error?.message || JSON.stringify(error)}`, _debug: logger.output };
130
+ }
131
+ }
132
+ }
133
+
134
+ // TODO: Create a separate service for image generation, similar to LLM.service.
135
+
136
+ // TODO: Hopefully we will have the proper type with new OpenAI SDK, then we can use their type
137
+ type TokenUsage = OpenAI.Completions.CompletionUsage & {
138
+ prompt_tokens_details?: { cached_tokens?: number };
139
+ input_tokens_details: { image_tokens?: number; text_tokens?: number };
140
+ output_tokens: number;
141
+ };
142
+
143
+ enum MODEL_FAMILY {
144
+ GPT = 'gpt',
145
+ RUNWARE = 'runware',
146
+ DALL_E = 'dall-e',
147
+ IMAGEN = 'imagen',
148
+ }
149
+
150
+ const imageGenerator = {
151
+ [MODEL_FAMILY.GPT]: async ({ model, prompt, config, logger, agent, input }) => {
152
+ let args: GenerateImageConfig & { files?: BinaryInput[] } = {
153
+ model,
154
+ size: config?.data?.size || 'auto',
155
+ quality: config?.data?.quality || 'auto',
156
+ };
157
+
158
+ try {
159
+ const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
160
+
161
+ // if the llm is undefined, then it means we removed the model from our system
162
+ if (!llmInference.connector) {
163
+ return {
164
+ _error: `The model '${model}' is not available. Please try a different one.`,
165
+ _debug: logger.output,
166
+ };
167
+ }
168
+
169
+ const provider = await agent.modelsProvider.getProvider(model);
170
+
171
+ const files: any[] = parseFiles(input, config);
172
+ const validFiles = files.filter((file) => imageGenerator.isValidImageFile(provider, file.mimetype));
173
+
174
+ if (files.length > 0 && validFiles.length === 0) {
175
+ throw new Error('Supported image file types are: ' + SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.join(', '));
176
+ }
177
+
178
+ let response;
179
+
180
+ if (validFiles.length > 0) {
181
+ response = await llmInference.imageEditRequest({ query: prompt, files: validFiles, params: { ...args, agentId: agent.id } });
182
+ } else {
183
+ response = await llmInference.imageGenRequest({ query: prompt, params: { ...args, agentId: agent.id } });
184
+ }
185
+
186
+ if (response?.usage) {
187
+ imageGenerator.reportTokenUsage(response.usage, {
188
+ modelEntryName: model,
189
+ keySource: model.startsWith(BUILT_IN_MODEL_PREFIX) ? APIKeySource.Smyth : APIKeySource.User,
190
+ agentId: agent.id,
191
+ teamId: agent.teamId,
192
+ });
193
+ }
194
+
195
+ let output = response?.data?.[0]?.b64_json;
196
+
197
+ const binaryInput = BinaryInput.from(output);
198
+ const agentId = typeof agent == 'object' && agent.id ? agent.id : agent;
199
+ const smythFile = await binaryInput.getJsonData(AccessCandidate.agent(agentId));
200
+
201
+ return { output: smythFile };
202
+ } catch (error: any) {
203
+ throw new Error(`OpenAI Image Generation Error: ${error?.message || JSON.stringify(error)}`);
204
+ }
205
+ },
206
+ [MODEL_FAMILY.DALL_E]: async ({ model, prompt, config, logger, agent, input }) => {
207
+ let _finalPrompt = prompt;
208
+
209
+ const files: any[] = parseFiles(input, config);
210
+
211
+ if (files.length > 0) {
212
+ throw new Error('OpenAI Image Generation Error: DALL-E models do not support image editing or variations. Please use a different model.');
213
+ }
214
+
215
+ const responseFormat = config?.data?.responseFormat || 'url';
216
+
217
+ let args: GenerateImageConfig & { responseFormat: 'url' | 'b64_json' } = {
218
+ responseFormat,
219
+ model,
220
+ };
221
+
222
+ let cost = 0;
223
+
224
+ if (model === DALL_E_MODELS.DALL_E_3) {
225
+ const size = config?.data?.sizeDalle3 || '1024x1024';
226
+ const quality = config?.data?.quality || 'standard';
227
+ const style = config?.data?.style || 'vivid';
228
+ args.size = size;
229
+ args.quality = quality;
230
+ args.style = style;
231
+
232
+ const isRawInputPrompt = config?.data?.isRawInputPrompt || false;
233
+
234
+ if (isRawInputPrompt) {
235
+ _finalPrompt = `I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS: ${prompt}`;
236
+ }
237
+
238
+ cost = IMAGE_GEN_COST_MAP[model][quality][size];
239
+ } else if (model === DALL_E_MODELS.DALL_E_2) {
240
+ const size = config?.data?.sizeDalle2 || '256x256';
241
+ const numberOfImages = parseInt(config?.data?.numberOfImages) || 1;
242
+ args.size = size;
243
+ args.n = numberOfImages;
244
+
245
+ cost = IMAGE_GEN_COST_MAP[model][size];
246
+ }
247
+
248
+ const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
249
+
250
+ // if the llm is undefined, then it means we removed the model from our system
251
+ if (!llmInference.connector) {
252
+ return {
253
+ _error: `The model '${model}' is not available. Please try a different one.`,
254
+ _debug: logger.output,
255
+ };
256
+ }
257
+
258
+ const response: any = await llmInference.imageGenRequest({ query: _finalPrompt, params: { ...args, agentId: agent.id } });
259
+
260
+ let output = response?.data?.[0]?.[responseFormat];
261
+ const revised_prompt = response?.data?.[0]?.revised_prompt;
262
+
263
+ if (revised_prompt && prompt !== revised_prompt) {
264
+ logger.debug(`Revised Prompt:\n${revised_prompt}`);
265
+ }
266
+
267
+ imageGenerator.reportUsage({ cost }, { modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId });
268
+
269
+ return { output };
270
+ },
271
+ [MODEL_FAMILY.RUNWARE]: async ({ model, prompt, config, agent, input }) => {
272
+ // Initialize Runware client
273
+ const teamId = agent.teamId;
274
+ const apiKey = (await getCredentials(AccessCandidate.team(teamId), 'runware')) as string;
275
+
276
+ if (!apiKey) {
277
+ throw new Error('Runware API key is missing. Please provide a valid key to continue.');
278
+ }
279
+
280
+ const runware = new Runware({ apiKey });
281
+ await runware.ensureConnection();
282
+
283
+ const negativePrompt = config?.data?.negativePrompt || '';
284
+
285
+ const files: any[] = parseFiles(input, config);
286
+ let seedImage = Array.isArray(files) ? files[0] : files;
287
+ seedImage = await normalizeImageInput(seedImage);
288
+
289
+ const modelId = await agent.modelsProvider.getModelId(model);
290
+ const imageRequestArgs: IRequestImage = {
291
+ model: modelId,
292
+ positivePrompt: prompt,
293
+ width: +config?.data?.width || 1024,
294
+ height: +config?.data?.height || 1024,
295
+ numberResults: 1, // For Image Generation we only need 1 image
296
+ outputType: 'URL', // For Image Generation we only need the URL
297
+ outputFormat: config?.data?.outputFormat || 'JPEG',
298
+ includeCost: true,
299
+ };
300
+
301
+ if (seedImage) {
302
+ imageRequestArgs.seedImage = seedImage;
303
+ imageRequestArgs.strength = +config?.data?.strength || 0.5;
304
+ }
305
+
306
+ // If a negative prompt is provided, add it to the request args
307
+ if (negativePrompt) {
308
+ imageRequestArgs.negativePrompt = negativePrompt;
309
+ }
310
+
311
+ try {
312
+ const response = await runware.requestImages(imageRequestArgs);
313
+
314
+ // Get first image from response array
315
+ const firstImage = response[0];
316
+
317
+ // Map response to match expected format
318
+ let output = firstImage.imageURL;
319
+
320
+ imageGenerator.reportUsage(
321
+ { cost: firstImage.cost },
322
+ { modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId }
323
+ );
324
+
325
+ return { output };
326
+ } catch (error: any) {
327
+ throw new Error(`Runware Image Generation Error: ${error?.message || JSON.stringify(error)}`);
328
+ } finally {
329
+ // Clean up connection
330
+ await runware.disconnect();
331
+ }
332
+ },
333
+ [MODEL_FAMILY.IMAGEN]: async ({ model, prompt, config, logger, agent, input }) => {
334
+ try {
335
+ const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
336
+
337
+ // if the llm is undefined, then it means we removed the model from our system
338
+ if (!llmInference.connector) {
339
+ return {
340
+ _error: `The model '${model}' is not available. Please try a different one.`,
341
+ _debug: logger.output,
342
+ };
343
+ }
344
+
345
+ const files: any[] = parseFiles(input, config);
346
+
347
+ // Imagen models only support image generation, not image editing
348
+ if (files.length > 0) {
349
+ throw new Error('Google AI Image Generation Error: Image editing is not supported. Imagen models only support image generation.');
350
+ }
351
+
352
+ let args: GenerateImageConfig & {
353
+ aspectRatio?: string;
354
+ numberOfImages?: number;
355
+ personGeneration?: string;
356
+ } = {
357
+ model,
358
+ aspectRatio: config?.data?.aspectRatio || config?.data?.size || '1:1',
359
+ numberOfImages: config?.data?.numberOfImages || 1,
360
+ personGeneration: config?.data?.personGeneration || 'allow_adult',
361
+ };
362
+
363
+ const response = await llmInference.imageGenRequest({ query: prompt, params: { ...args, agentId: agent.id } });
364
+
365
+ // Calculate fixed cost for Imagen 4
366
+ const modelName = model.replace(BUILT_IN_MODEL_PREFIX, '');
367
+ const cost = IMAGEN_4_COST_MAP[modelName];
368
+
369
+ if (cost && cost > 0) {
370
+ // Multiply by number of images generated
371
+ const numberOfImages = args.numberOfImages || 1;
372
+ const totalCost = cost * numberOfImages;
373
+
374
+ // Report fixed cost usage
375
+ imageGenerator.reportUsage(
376
+ { cost: totalCost },
377
+ {
378
+ modelEntryName: model,
379
+ keySource: model.startsWith(BUILT_IN_MODEL_PREFIX) ? APIKeySource.Smyth : APIKeySource.User,
380
+ agentId: agent.id,
381
+ teamId: agent.teamId,
382
+ }
383
+ );
384
+ }
385
+
386
+ let output = response?.data?.[0]?.b64_json;
387
+
388
+ if (output) {
389
+ const binaryInput = BinaryInput.from(output);
390
+ const agentId = typeof agent == 'object' && agent.id ? agent.id : agent;
391
+ const smythFile = await binaryInput.getJsonData(AccessCandidate.agent(agentId));
392
+ return { output: smythFile };
393
+ } else {
394
+ // Handle URL response format
395
+ output = response?.data?.[0]?.url;
396
+ return { output };
397
+ }
398
+ } catch (error: any) {
399
+ throw new Error(`Google AI Image Generation Error: ${error?.message || JSON.stringify(error)}`);
400
+ }
401
+ },
402
+ reportTokenUsage(usage: TokenUsage, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
403
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
404
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
405
+
406
+ const usageData = {
407
+ sourceId: `api:imagegen.${modelName}`,
408
+ keySource: metadata.keySource,
409
+
410
+ input_tokens_txt: usage?.input_tokens_details?.text_tokens || 0,
411
+ input_tokens_img: usage?.input_tokens_details?.image_tokens || 0,
412
+ output_tokens: usage?.output_tokens,
413
+ input_tokens_cache_read: usage?.prompt_tokens_details?.cached_tokens || 0,
414
+
415
+ agentId: metadata.agentId,
416
+ teamId: metadata.teamId,
417
+ };
418
+ SystemEvents.emit('USAGE:API', usageData);
419
+
420
+ return usageData;
421
+ },
422
+ reportUsage(usage: { cost: number }, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
423
+ const usageData = {
424
+ sourceId: `api:imagegen.smyth`,
425
+ keySource: metadata.keySource,
426
+
427
+ cost: usage?.cost,
428
+
429
+ agentId: metadata.agentId,
430
+ teamId: metadata.teamId,
431
+ };
432
+ SystemEvents.emit('USAGE:API', usageData);
433
+
434
+ return usageData;
435
+ },
436
+ isValidImageFile(provider: string, mimetype: string) {
437
+ return SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.includes(mimetype);
438
+ },
439
+ };
440
+
441
+ enum PROVIDERS {
442
+ OPENAI = 'OpenAI',
443
+ RUNWARE = 'Runware',
444
+ GOOGLEAI = 'GoogleAI',
445
+ }
446
+
447
+ /**
448
+ * Gets the model family from a model identifier
449
+ * @param model The model identifier
450
+ * @returns The model family or null if not recognized
451
+ */
452
+ async function getModelFamily(model: string, agent: IAgent): Promise<string | null> {
453
+ if (await isGPTModel(model)) return MODEL_FAMILY.GPT;
454
+ if (await isRunwareModel(model, agent)) return MODEL_FAMILY.RUNWARE;
455
+ if (await isDallEModel(model)) return MODEL_FAMILY.DALL_E;
456
+ if (await isGoogleAIModel(model, agent)) return MODEL_FAMILY.IMAGEN;
457
+
458
+ return null;
459
+ }
460
+
461
+ function isGPTModel(model: string) {
462
+ return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.GPT);
463
+ }
464
+
465
+ async function isRunwareModel(model: string, agent: IAgent): Promise<boolean> {
466
+ const provider = await agent.modelsProvider.getProvider(model);
467
+ return provider === PROVIDERS.RUNWARE || provider?.toLowerCase() === PROVIDERS.RUNWARE.toLowerCase();
468
+ }
469
+
470
+ function isDallEModel(model: string) {
471
+ return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.DALL_E);
472
+ }
473
+
474
+ async function isGoogleAIModel(model: string, agent: IAgent): Promise<boolean> {
475
+ const provider = await agent.modelsProvider.getProvider(model);
476
+ return (
477
+ provider === PROVIDERS.GOOGLEAI ||
478
+ provider?.toLowerCase() === PROVIDERS.GOOGLEAI.toLowerCase() ||
479
+ model?.replace(BUILT_IN_MODEL_PREFIX, '')?.includes('imagen')
480
+ );
481
+ }
482
+
483
+ function parseFiles(input: any, config: any) {
484
+ const mediaTypes = ['Image', 'Audio', 'Video', 'Binary'];
485
+
486
+ // Parse media inputs from config
487
+ const inputFiles =
488
+ config.inputs
489
+ ?.filter((_input) => mediaTypes.includes(_input.type))
490
+ ?.flatMap((_input) => {
491
+ const value = input[_input.name];
492
+
493
+ if (Array.isArray(value)) {
494
+ return value.map((item) => TemplateString(item).parseRaw(input).result);
495
+ } else {
496
+ return TemplateString(value).parseRaw(input).result;
497
+ }
498
+ })
499
+ ?.filter((file) => file) || [];
500
+
501
+ return inputFiles;
502
+ }