@smythos/sre 1.5.42 → 1.5.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/dist/index.js +16 -17
  2. package/dist/index.js.map +1 -1
  3. package/dist/types/Components/GenAILLM.class.d.ts +22 -5
  4. package/dist/types/helpers/AWSLambdaCode.helper.d.ts +8 -5
  5. package/dist/types/index.d.ts +1 -0
  6. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Groq.class.d.ts +7 -0
  7. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.d.ts +0 -4
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +0 -4
  9. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.d.ts +6 -0
  10. package/dist/types/types/LLM.types.d.ts +8 -0
  11. package/package.json +5 -2
  12. package/src/Components/GenAILLM.class.ts +30 -6
  13. package/src/helpers/AWSLambdaCode.helper.ts +82 -22
  14. package/src/helpers/Conversation.helper.ts +8 -5
  15. package/src/index.ts +193 -192
  16. package/src/index.ts.bak +193 -192
  17. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +10 -8
  18. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +3 -1
  19. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +112 -92
  20. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +2 -2
  21. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +31 -31
  22. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +31 -22
  23. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -0
  24. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +9 -0
  25. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +0 -18
  26. package/src/types/LLM.types.ts +10 -0
@@ -27,11 +27,6 @@ export declare class GenAILLM extends Component {
27
27
  min: number;
28
28
  label: string;
29
29
  };
30
- maxThinkingTokens: {
31
- type: string;
32
- min: number;
33
- label: string;
34
- };
35
30
  stopSequences: {
36
31
  type: string;
37
32
  max: number;
@@ -221,6 +216,28 @@ export declare class GenAILLM extends Component {
221
216
  label: string;
222
217
  allowEmpty: boolean;
223
218
  };
219
+ useReasoning: {
220
+ type: string;
221
+ description: string;
222
+ label: string;
223
+ };
224
+ verbosity: {
225
+ type: string;
226
+ valid: string[];
227
+ label: string;
228
+ allowEmpty: boolean;
229
+ };
230
+ reasoningEffort: {
231
+ type: string;
232
+ valid: string[];
233
+ description: string;
234
+ label: string;
235
+ };
236
+ maxThinkingTokens: {
237
+ type: string;
238
+ min: number;
239
+ label: string;
240
+ };
224
241
  };
225
242
  inputs: {
226
243
  Input: {
@@ -4,13 +4,13 @@ import { IAgent } from '@sre/types/Agent.types';
4
4
  export declare const cachePrefix = "serverless_code";
5
5
  export declare const cacheTTL: number;
6
6
  export declare function getLambdaFunctionName(agentId: string, componentId: string): string;
7
- export declare function generateCodeHash(code_body: string, codeInputs: string[]): string;
8
- export declare function getSanitizeCodeHash(code: string): string;
7
+ export declare function generateCodeHash(code_body: string, codeInputs: string[], envVariables: string[]): string;
8
+ export declare function getSanitizeCodeHash(rawCode: string): string;
9
9
  export declare function getDeployedCodeHash(agentId: string, componentId: string): Promise<any>;
10
10
  export declare function setDeployedCodeHash(agentId: string, componentId: string, codeHash: string): Promise<void>;
11
- export declare function generateLambdaCode(code: string, parameters: string[]): string;
11
+ export declare function generateLambdaCode(code: string, parameters: string[], envVariables: Record<string, string>): string;
12
12
  export declare function zipCode(directory: string): Promise<unknown>;
13
- export declare function createOrUpdateLambdaFunction(functionName: any, zipFilePath: any, awsConfigs: any): Promise<void>;
13
+ export declare function createOrUpdateLambdaFunction(functionName: any, zipFilePath: any, awsConfigs: any, envVariables: Record<string, string>): Promise<void>;
14
14
  export declare function waitForRoleDeploymentStatus(roleName: any, client: any): Promise<boolean>;
15
15
  export declare function verifyFunctionDeploymentStatus(functionName: any, client: any): Promise<boolean>;
16
16
  export declare function getLambdaRolePolicy(): string;
@@ -37,10 +37,13 @@ export declare function reportUsage({ cost, agentId, teamId }: {
37
37
  agentId: string;
38
38
  teamId: string;
39
39
  }): void;
40
- export declare function validateAsyncMainFunction(code: string): {
40
+ export declare function validateAsyncMainFunction(rawCode: string): {
41
41
  isValid: boolean;
42
42
  error?: string;
43
43
  parameters?: string[];
44
44
  dependencies?: string[];
45
45
  };
46
46
  export declare function generateCodeFromLegacyComponent(code_body: string, code_imports: string, codeInputs: string[]): string;
47
+ export declare function extractAllKeyNamesFromTemplateVars(input: string): string[];
48
+ export declare function getCurrentEnvironmentVariables(agentTeamId: string, code: string): Promise<Record<string, string>>;
49
+ export declare function getSortedObjectValues(obj: Record<string, string>): string[];
@@ -187,3 +187,4 @@ export * from './subsystems/Security/Vault.service/connectors/SecretsManager.cla
187
187
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class';
188
188
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/types';
189
189
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants';
190
+ export * from './subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils';
@@ -5,6 +5,7 @@ import { LLMConnector } from '../LLMConnector';
5
5
  type ChatCompletionCreateParams = {
6
6
  model: string;
7
7
  messages: any;
8
+ max_completion_tokens?: number;
8
9
  max_tokens?: number;
9
10
  temperature?: number;
10
11
  stop?: string[];
@@ -12,6 +13,7 @@ type ChatCompletionCreateParams = {
12
13
  tools?: any;
13
14
  tool_choice?: string;
14
15
  stream?: boolean;
16
+ reasoning_effort?: 'none' | 'default' | 'low' | 'medium' | 'high';
15
17
  };
16
18
  export declare class GroqConnector extends LLMConnector {
17
19
  name: string;
@@ -55,4 +57,9 @@ export declare class GroqConnector extends LLMConnector {
55
57
  };
56
58
  getConsistentMessages(messages: TLLMMessageBlock[]): TLLMMessageBlock[];
57
59
  }
60
+ /**
61
+ * Type guard to check if a value is a valid OpenAI reasoning effort.
62
+ * Uses array includes for better maintainability when OpenAI adds new values.
63
+ */
64
+ export declare function isValidGroqReasoningEffort(value: unknown): value is 'low' | 'medium' | 'high' | 'none' | 'default';
58
65
  export {};
@@ -21,10 +21,6 @@ export declare class ChatCompletionsApiInterface extends OpenAIApiInterface {
21
21
  createStream(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>>;
22
22
  handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter;
23
23
  prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams>;
24
- /**
25
- * Type guard to check if a tool is an OpenAI tool definition
26
- */
27
- private isOpenAIToolDefinition;
28
24
  /**
29
25
  * Transform OpenAI tool definitions to ChatCompletionTool format
30
26
  */
@@ -46,10 +46,6 @@ export declare class ResponsesApiInterface extends OpenAIApiInterface {
46
46
  */
47
47
  private calculateSearchToolUsage;
48
48
  prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.Responses.ResponseCreateParams>;
49
- /**
50
- * Type guard to check if a tool is an OpenAI tool definition
51
- */
52
- private isOpenAIToolDefinition;
53
49
  /**
54
50
  * Transform OpenAI tool definitions to Responses.Tool format
55
51
  */
@@ -0,0 +1,6 @@
1
+ import OpenAI from 'openai';
2
+ /**
3
+ * Type guard to check if a value is a valid OpenAI reasoning effort.
4
+ * Uses array includes for better maintainability when OpenAI adds new values.
5
+ */
6
+ export declare function isValidOpenAIReasoningEffort(value: unknown): value is OpenAI.Responses.ResponseCreateParams['reasoning']['effort'];
@@ -35,6 +35,7 @@ export interface BedrockCredentials {
35
35
  export type ILLMConnectorCredentials = BasicCredentials | BedrockCredentials | VertexAICredentials;
36
36
  export type TOpenAIResponseToolChoice = OpenAI.Responses.ToolChoiceOptions | OpenAI.Responses.ToolChoiceTypes | OpenAI.Responses.ToolChoiceFunction;
37
37
  export type TLLMToolChoice = OpenAI.ChatCompletionToolChoiceOption;
38
+ export type OpenAIReasoningEffort = NonNullable<OpenAI.Responses.ResponseCreateParams['reasoning']>['effort'];
38
39
  export type TOpenAIToolsInfo = {
39
40
  webSearch: {
40
41
  enabled: boolean;
@@ -124,7 +125,14 @@ export type TLLMParams = {
124
125
  fromDate?: string;
125
126
  toDate?: string;
126
127
  useReasoning?: boolean;
128
+ /**
129
+ * Controls the level of effort the model will put into reasoning
130
+ * For GPT-OSS models (20B, 120B): "low" | "medium" | "high"
131
+ * For Qwen 3 32B: "none" | "default"
132
+ */
133
+ reasoningEffort?: 'none' | 'default' | OpenAIReasoningEffort;
127
134
  max_output_tokens?: number;
135
+ verbosity?: OpenAI.Responses.ResponseCreateParams['text']['verbosity'];
128
136
  abortSignal?: AbortSignal;
129
137
  };
130
138
  export type TLLMPreparedParams = TLLMParams & {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@smythos/sre",
3
- "version": "1.5.42",
3
+ "version": "1.5.44",
4
4
  "description": "Smyth Runtime Environment",
5
5
  "author": "Alaa-eddine KADDOURI",
6
6
  "license": "MIT",
@@ -19,6 +19,9 @@
19
19
  "CHANGELOG"
20
20
  ],
21
21
  "type": "module",
22
+ "engines": {
23
+ "node": ">=20"
24
+ },
22
25
  "devDependencies": {
23
26
  "@istanbuljs/nyc-config-typescript": "^1.0.2",
24
27
  "@rollup/plugin-commonjs": "^28.0.3",
@@ -79,7 +82,7 @@
79
82
  "mime": "^4.0.3",
80
83
  "mysql2": "^3.11.3",
81
84
  "oauth-1.0a": "^2.2.6",
82
- "openai": "^4.103.0",
85
+ "openai": "^5.12.2",
83
86
  "p-limit": "^6.1.0",
84
87
  "qs": "^6.13.0",
85
88
  "readline-sync": "^1.4.10",
@@ -36,11 +36,6 @@ export class GenAILLM extends Component {
36
36
  min: 1,
37
37
  label: 'Maximum Tokens',
38
38
  },
39
- maxThinkingTokens: {
40
- type: 'number',
41
- min: 1,
42
- label: 'Maximum Thinking Tokens',
43
- },
44
39
  stopSequences: {
45
40
  type: 'string',
46
41
  max: 400,
@@ -236,6 +231,31 @@ export class GenAILLM extends Component {
236
231
  allowEmpty: true,
237
232
  },
238
233
  // #endregion
234
+
235
+ // #region Reasoning
236
+ useReasoning: {
237
+ type: 'boolean',
238
+ description: 'If true, the component will use reasoning capabilities for complex problem-solving',
239
+ label: 'Use Reasoning',
240
+ },
241
+ verbosity: {
242
+ type: 'string',
243
+ valid: ['low', 'medium', 'high'],
244
+ label: 'Verbosity',
245
+ allowEmpty: true,
246
+ },
247
+ reasoningEffort: {
248
+ type: 'string',
249
+ valid: ['none', 'default', 'low', 'medium', 'high'],
250
+ description: 'Controls the level of effort the model will put into reasoning',
251
+ label: 'Reasoning Effort',
252
+ },
253
+ maxThinkingTokens: {
254
+ type: 'number',
255
+ min: 1,
256
+ label: 'Maximum Thinking Tokens',
257
+ },
258
+ // #endregion
239
259
  },
240
260
  inputs: {
241
261
  Input: {
@@ -269,6 +289,7 @@ export class GenAILLM extends Component {
269
289
  useSystemPrompt: Joi.boolean().optional().label('Use System Prompt'),
270
290
  useContextWindow: Joi.boolean().optional().label('Use Context Window'),
271
291
  maxContextWindowLength: Joi.number().optional().min(0).label('Maximum Context Window Length'),
292
+ verbosity: Joi.string().valid('low', 'medium', 'high').optional().allow('').allow(null).label('Verbosity'),
272
293
 
273
294
  // #region Web Search
274
295
  useWebSearch: Joi.boolean().optional().label('Use Web Search'),
@@ -306,8 +327,11 @@ export class GenAILLM extends Component {
306
327
  .label('To Date'),
307
328
  // #endregion
308
329
 
330
+ // #region Reasoning
309
331
  useReasoning: Joi.boolean().optional().label('Use Reasoning'),
310
- maxThinkingTokens: Joi.number().min(1).label('Maximum Thinking Tokens'),
332
+ reasoningEffort: Joi.string().valid('none', 'default', 'minimal', 'low', 'medium', 'high').optional().allow('').label('Reasoning Effort'),
333
+ maxThinkingTokens: Joi.number().min(1).optional().label('Maximum Thinking Tokens'),
334
+ // #endregion
311
335
  });
312
336
  constructor() {
313
337
  super();
@@ -2,7 +2,7 @@ import crypto from 'crypto';
2
2
  import { ConnectorService } from '@sre/Core/ConnectorsService';
3
3
  import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
4
4
  import zl from 'zip-lib';
5
- import { InvokeCommand, Runtime, LambdaClient, UpdateFunctionCodeCommand, CreateFunctionCommand, GetFunctionCommand, GetFunctionCommandOutput, InvokeCommandOutput } from '@aws-sdk/client-lambda';
5
+ import { InvokeCommand, Runtime, LambdaClient, UpdateFunctionCodeCommand, CreateFunctionCommand, GetFunctionCommand, GetFunctionCommandOutput, InvokeCommandOutput, UpdateFunctionConfigurationCommand } from '@aws-sdk/client-lambda';
6
6
  import { GetRoleCommand, CreateRoleCommand, IAMClient, GetRoleCommandOutput, CreateRoleCommandOutput } from '@aws-sdk/client-iam';
7
7
  import fs from 'fs';
8
8
  import { AWSConfig, AWSCredentials, AWSRegionConfig } from '@sre/types/AWS.types';
@@ -20,13 +20,15 @@ export function getLambdaFunctionName(agentId: string, componentId: string) {
20
20
  }
21
21
 
22
22
 
23
- export function generateCodeHash(code_body: string, codeInputs: string[]) {
23
+ export function generateCodeHash(code_body: string, codeInputs: string[], envVariables: string[]) {
24
24
  const bodyHash = getSanitizeCodeHash(code_body);
25
25
  const inputsHash = getSanitizeCodeHash(JSON.stringify(codeInputs));
26
- return `body-${bodyHash}__inputs-${inputsHash}`;
26
+ const envVariablesHash = getSanitizeCodeHash(JSON.stringify(envVariables));
27
+ return `body-${bodyHash}__inputs-${inputsHash}__env-${envVariablesHash}`;
27
28
  }
28
29
 
29
- export function getSanitizeCodeHash(code: string) {
30
+ export function getSanitizeCodeHash(rawCode: string) {
31
+ const code = replaceVaultKeysTemplateVars(rawCode, {});
30
32
  let output = '';
31
33
  let isSingleQuote = false;
32
34
  let isDoubleQuote = false;
@@ -82,9 +84,15 @@ export async function setDeployedCodeHash(agentId: string, componentId: string,
82
84
  .set(`${cachePrefix}_${agentId}-${componentId}`, codeHash, null, null, cacheTTL);
83
85
  }
84
86
 
85
- export function generateLambdaCode(code: string, parameters: string[]) {
87
+ function replaceVaultKeysTemplateVars(code: string, envVariables: Record<string, string>) {
88
+ const regex = /\{\{KEY\((.*?)\)\}\}/g;
89
+ return code.replaceAll(regex, (match, p1) => `process.env.${p1}`);
90
+ }
91
+
92
+ export function generateLambdaCode(code: string, parameters: string[], envVariables: Record<string, string>) {
93
+ const codeWithEnvVariables = envVariables && Object.keys(envVariables).length ? replaceVaultKeysTemplateVars(code, envVariables) : code;
86
94
  const lambdaCode = `
87
- ${code}
95
+ ${codeWithEnvVariables}
88
96
  export const handler = async (event, context) => {
89
97
  try {
90
98
  context.callbackWaitsForEmptyEventLoop = false;
@@ -118,7 +126,7 @@ export async function zipCode(directory: string) {
118
126
  });
119
127
  }
120
128
 
121
- export async function createOrUpdateLambdaFunction(functionName, zipFilePath, awsConfigs) {
129
+ export async function createOrUpdateLambdaFunction(functionName, zipFilePath, awsConfigs, envVariables: Record<string, string>) {
122
130
  const client = new LambdaClient({
123
131
  region: awsConfigs.region,
124
132
  credentials: {
@@ -142,9 +150,12 @@ export async function createOrUpdateLambdaFunction(functionName, zipFilePath, aw
142
150
  };
143
151
  const updateFunctionCodeCommand = new UpdateFunctionCodeCommand(updateCodeParams);
144
152
  await client.send(updateFunctionCodeCommand);
145
- // Update function configuration to attach layer
146
153
  await verifyFunctionDeploymentStatus(functionName, client);
147
- // console.log('Lambda function code and configuration updated successfully!');
154
+
155
+ if (envVariables && Object.keys(envVariables).length) {
156
+ await updateLambdaFunctionConfiguration(client, functionName, envVariables);
157
+ await verifyFunctionDeploymentStatus(functionName, client);
158
+ }
148
159
  } else {
149
160
  // Create function if it does not exist
150
161
  let roleArn = '';
@@ -188,6 +199,7 @@ export async function createOrUpdateLambdaFunction(functionName, zipFilePath, aw
188
199
  'auto-delete': 'true',
189
200
  },
190
201
  MemorySize: 256,
202
+ ...(envVariables && Object.keys(envVariables).length ? { Environment: { Variables: envVariables } } : {}),
191
203
  };
192
204
 
193
205
  const functionCreateCommand = new CreateFunctionCommand(functionParams);
@@ -200,6 +212,17 @@ export async function createOrUpdateLambdaFunction(functionName, zipFilePath, aw
200
212
  }
201
213
  }
202
214
 
215
+ function updateLambdaFunctionConfiguration(client: LambdaClient, functionName: string, envVariables: Record<string, string>) {
216
+ const updateFunctionConfigurationParams = {
217
+ FunctionName: functionName,
218
+ Environment: {
219
+ Variables: envVariables,
220
+ },
221
+ };
222
+ const updateFunctionConfigurationCommand = new UpdateFunctionConfigurationCommand(updateFunctionConfigurationParams);
223
+ return client.send(updateFunctionConfigurationCommand);
224
+ }
225
+
203
226
  export async function waitForRoleDeploymentStatus(roleName, client): Promise<boolean> {
204
227
  return new Promise((resolve, reject) => {
205
228
  try {
@@ -356,10 +379,11 @@ export function reportUsage({ cost, agentId, teamId }: { cost: number; agentId:
356
379
  });
357
380
  }
358
381
 
359
- export function validateAsyncMainFunction(code: string): { isValid: boolean; error?: string; parameters?: string[]; dependencies?: string[] } {
382
+ export function validateAsyncMainFunction(rawCode: string): { isValid: boolean; error?: string; parameters?: string[]; dependencies?: string[] } {
360
383
  try {
361
- // Parse the code using acorn
362
- const ast = acorn.parse(code, {
384
+ const code = replaceVaultKeysTemplateVars(rawCode, {});
385
+ // Parse the code using acorn
386
+ const ast = acorn.parse(code, {
363
387
  ecmaVersion: 'latest',
364
388
  sourceType: 'module'
365
389
  });
@@ -387,8 +411,8 @@ export function validateAsyncMainFunction(code: string): { isValid: boolean; err
387
411
  }
388
412
 
389
413
  // Handle CallExpression (require() calls)
390
- if (node.type === 'CallExpression' &&
391
- node.callee.type === 'Identifier' &&
414
+ if (node.type === 'CallExpression' &&
415
+ node.callee.type === 'Identifier' &&
392
416
  node.callee.name === 'require' &&
393
417
  node.arguments.length > 0 &&
394
418
  node.arguments[0].type === 'Literal') {
@@ -399,7 +423,7 @@ export function validateAsyncMainFunction(code: string): { isValid: boolean; err
399
423
  }
400
424
 
401
425
  // Handle dynamic import() calls
402
- if (node.type === 'CallExpression' &&
426
+ if (node.type === 'CallExpression' &&
403
427
  node.callee.type === 'Import' &&
404
428
  node.arguments.length > 0 &&
405
429
  node.arguments[0].type === 'Literal') {
@@ -473,16 +497,16 @@ export function validateAsyncMainFunction(code: string): { isValid: boolean; err
473
497
  }
474
498
 
475
499
  if (!hasMain) {
476
- return {
477
- isValid: false,
500
+ return {
501
+ isValid: false,
478
502
  error: 'No main function found at root level',
479
503
  dependencies
480
504
  };
481
505
  }
482
506
 
483
507
  if (!hasAsyncMain) {
484
- return {
485
- isValid: false,
508
+ return {
509
+ isValid: false,
486
510
  error: 'Main function exists but is not async',
487
511
  dependencies
488
512
  };
@@ -490,9 +514,9 @@ export function validateAsyncMainFunction(code: string): { isValid: boolean; err
490
514
 
491
515
  return { isValid: true, parameters: mainParameters, dependencies };
492
516
  } catch (error) {
493
- return {
494
- isValid: false,
495
- error: `Failed to parse code: ${error.message}`
517
+ return {
518
+ isValid: false,
519
+ error: `Failed to parse code: ${error.message}`
496
520
  };
497
521
  }
498
522
  }
@@ -526,3 +550,39 @@ export function generateCodeFromLegacyComponent(code_body: string, code_imports:
526
550
  return code;
527
551
  }
528
552
 
553
+ export function extractAllKeyNamesFromTemplateVars(input: string): string[] {
554
+ const regex = /\{\{KEY\((.*?)\)\}\}/g;
555
+ const matches = [];
556
+ let match;
557
+ while ((match = regex.exec(input)) !== null) {
558
+ if (match[1]) {
559
+ matches.push(match[1]);
560
+ }
561
+ }
562
+ return matches;
563
+ }
564
+
565
+
566
+ async function fetchVaultSecret(keyName: string, agentTeamId: string): Promise<{ value: string, key: string }> {
567
+ const vaultSecret = await VaultHelper.getAgentKey(keyName, agentTeamId);
568
+ return {
569
+ value: vaultSecret,
570
+ key: keyName,
571
+ };
572
+
573
+ }
574
+
575
+ export async function getCurrentEnvironmentVariables(agentTeamId: string, code: string): Promise<Record<string, string>> {
576
+ const allKeyNames = extractAllKeyNamesFromTemplateVars(code);
577
+ const envVariables: Record<string, string> = {};
578
+ const vaultSecrets = await Promise.all(allKeyNames.map((keyName) => fetchVaultSecret(keyName, agentTeamId)));
579
+ vaultSecrets.forEach((secret) => {
580
+ envVariables[secret.key] = secret.value;
581
+ });
582
+ return envVariables;
583
+ }
584
+
585
+ export function getSortedObjectValues(obj: Record<string, string>): string[] {
586
+ const sortedKeys = Object.keys(obj).sort();
587
+ return sortedKeys.map((key) => obj[key]);
588
+ }
@@ -348,11 +348,13 @@ export class Conversation extends EventEmitter {
348
348
  // console.log('Passthrough skiped content ', content);
349
349
  // return;
350
350
  // }
351
- const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
352
- //const skip = lastMessage?.content?.includes(passThroughtContinueMessage) && lastMessage?.__smyth_data__?.internal;
351
+ //const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
352
+ //const skip = lastMessage?.__smyth_data__?.internal;
353
353
 
354
354
  //skip if the content is the last generated message after a passthrough content
355
- //if (skip) return;
355
+ // if (skip) {
356
+ // let s = true;
357
+ // }
356
358
  _content += content;
357
359
  this.emit(TLLMEvent.Content, content);
358
360
  });
@@ -494,8 +496,9 @@ export class Conversation extends EventEmitter {
494
496
  //delete toolHeaders['x-passthrough'];
495
497
  } else {
496
498
  //this._context.addAssistantMessage(passThroughContent, message_id);
497
- llmMessage.content += '\n' + passThroughContent;
498
- this._context.addToolMessage(llmMessage, processedToolsData, message_id);
499
+
500
+ //llmMessage.content += '\n' + passThroughContent;
501
+ this._context.addToolMessage(llmMessage, processedToolsData, message_id, { passThrough: true });
499
502
 
500
503
  //this._context.addAssistantMessage(passThroughContent, message_id, { passthrough: true });
501
504
  //this should not be stored in the persistent conversation store