@ai.ntellect/core 0.1.91 → 0.1.92

Sign up to get free protection for your applications and to get access to all the features.
package/agent/index.ts CHANGED
@@ -9,16 +9,32 @@ import {
9
9
  MemoryScope,
10
10
  MemoryType,
11
11
  QueueResult,
12
- TransformedQueueItem,
13
12
  User,
14
13
  } from "../types";
15
14
  import { QueueItemTransformer } from "../utils/queue-item-transformer";
16
15
  import { ResultSanitizer } from "../utils/sanitize-results";
17
16
  import { ActionHandler } from "./handlers/ActionHandler";
18
17
 
18
+ export type State = {
19
+ behavior: {
20
+ role: string;
21
+ language: string;
22
+ guidelines: {
23
+ important: string[];
24
+ warnings: string[];
25
+ steps?: string[];
26
+ };
27
+ };
28
+ userRequest: string;
29
+ actions: ActionSchema[];
30
+ results: QueueResult[];
31
+ examplesMessages?: {
32
+ role: string;
33
+ content: string;
34
+ }[];
35
+ };
36
+
19
37
  export class Agent {
20
- private readonly SIMILARITY_THRESHOLD = 95;
21
- private readonly MAX_RESULTS = 1;
22
38
  private readonly actionHandler: ActionHandler;
23
39
  private readonly user: User;
24
40
  private readonly orchestrator: Orchestrator;
@@ -54,45 +70,19 @@ export class Agent {
54
70
  this.accumulatedResults = [];
55
71
  }
56
72
 
57
- async process(
58
- prompt: string,
59
- contextualizedPrompt: string,
60
- events: AgentEvent
61
- ): Promise<any> {
62
- let actions: ActionSchema[] | TransformedQueueItem[] | undefined =
63
- undefined;
64
- let isSimilar: boolean = false;
65
-
66
- if (this.cacheMemory) {
67
- const similarActions = await this.cacheMemory.findSimilarQueries(prompt, {
68
- similarityThreshold: this.SIMILARITY_THRESHOLD,
69
- maxResults: this.MAX_RESULTS,
70
- userId: this.user.id,
71
- scope: MemoryScope.GLOBAL,
72
- });
73
-
74
- if (similarActions.length > 0) {
75
- actions = QueueItemTransformer.transformActionsToQueueItems(
76
- similarActions[0].data
77
- );
78
- isSimilar = true;
79
- }
80
- }
81
-
82
- if (!actions?.length && !isSimilar) {
83
- console.log("No similar actions found in cache for query: ", prompt);
84
- console.log("Requesting orchestrator for actions..");
85
- const request = await this.orchestrator.process(contextualizedPrompt);
86
- events.onMessage?.(request);
87
- actions = request.actions;
88
- }
73
+ async process(prompt: string, events: AgentEvent): Promise<any> {
74
+ console.log("Requesting orchestrator for actions..");
75
+ const request = await this.orchestrator.process(
76
+ prompt,
77
+ this.accumulatedResults
78
+ );
79
+ events.onMessage?.(request);
89
80
 
90
- return actions && actions.length > 0
81
+ return request.actions.length > 0
91
82
  ? this.handleActions(
92
83
  {
93
84
  initialPrompt: prompt,
94
- contextualizedPrompt: contextualizedPrompt,
95
- actions: actions as ActionSchema[],
85
+ actions: request.actions as ActionSchema[],
96
86
  },
97
87
  events
98
88
  )
@@ -102,11 +92,9 @@ export class Agent {
102
92
  private async handleActions(
103
93
  {
104
94
  initialPrompt,
105
- contextualizedPrompt,
106
95
  actions,
107
96
  }: {
108
97
  initialPrompt: string;
109
- contextualizedPrompt: string;
110
98
  actions: ActionSchema[];
111
99
  },
112
100
  events: AgentEvent
@@ -144,11 +132,10 @@ export class Agent {
144
132
  console.log("Accumulated results:");
145
133
  console.dir(this.accumulatedResults, { depth: null });
146
134
 
147
- const sanitizedResults = ResultSanitizer.sanitize(this.accumulatedResults);
135
+ // const sanitizedResults = ResultSanitizer.sanitize(this.accumulatedResults);
148
136
  const evaluation = await evaluator.process(
149
137
  initialPrompt,
150
- contextualizedPrompt,
151
- sanitizedResults
138
+ this.accumulatedResults
152
139
  );
153
140
 
154
141
  events.onMessage?.(evaluation);
@@ -157,8 +144,7 @@ export class Agent {
157
144
  this.evaluatorIteration++;
158
145
  return this.handleActions(
159
146
  {
160
- initialPrompt: contextualizedPrompt,
161
- contextualizedPrompt: initialPrompt,
147
+ initialPrompt: initialPrompt,
162
148
  actions: evaluation.nextActionsNeeded,
163
149
  },
164
150
  events
@@ -191,12 +177,16 @@ export class Agent {
191
177
  this.accumulatedResults = [];
192
178
  this.evaluatorIteration = 0;
193
179
 
194
- await this.cacheMemory?.createMemory({
195
- content: actionsResult.initialPrompt,
196
- data: actionsResult.data,
197
- scope: MemoryScope.GLOBAL,
198
- type: MemoryType.ACTION,
199
- });
180
+ for (const action of actionsResult.data) {
181
+ if (!action.error) {
182
+ await this.cacheMemory?.createMemory({
183
+ content: actionsResult.initialPrompt,
184
+ data: action.result,
185
+ scope: MemoryScope.GLOBAL,
186
+ type: MemoryType.ACTION,
187
+ });
188
+ }
189
+ }
200
190
 
201
191
  return this.stream
202
192
  ? (
@@ -205,7 +195,10 @@ export class Agent {
205
195
  summaryData
206
196
  )
207
197
  ).toDataStreamResponse()
208
- : await synthesizer.process(actionsResult.initialPrompt, summaryData);
198
+ : await synthesizer.process(
199
+ actionsResult.initialPrompt,
200
+ this.accumulatedResults
201
+ );
209
202
  }
210
203
 
211
204
  private transformActions(actions: ActionSchema[]) {
@@ -1,43 +1,28 @@
1
- import { ActionSchema } from "../../types";
2
- import { injectActions } from "../../utils/inject-actions";
3
1
  export const evaluatorContext = {
4
- role: "You are the evaluator agent. Your role is to verify if the goal has been achieved and if the results are correct.",
5
- guidelines: {
6
- important: [
7
- "Verify if all required actions were executed successfully.",
8
- "Check if the results align with the initial goal.",
9
- "Identify and extract additional relevant information naturally during the process. Examples:",
10
- " - Link a token symbol (e.g., 'USDC') to its address (e.g., '0xA0b8...6EB48').",
11
- " - Associate a wallet address (e.g., '0x1234...abcd') to a user-friendly name (e.g., 'Work Wallet').",
12
- " - Map a token address (e.g., '0x6B17...71d0F') back to its symbol or name (e.g., 'DAI').",
13
- "Store these facts in memory with their type (episodic, semantic, or procedural).",
14
- ],
15
- warnings: [
16
- "NEVER modify the results directly.",
17
- "NEVER make assumptions about missing data.",
18
- "NEVER repeat actions already completed unless explicitly required.",
19
- ],
20
- },
21
- compose: (goal: string, results: string, tools: ActionSchema[]) => {
22
- return `
23
- You are evaluating if the following goal has been achieved: "${goal}".
24
-
25
- COMPLETED ACTIONS: ${results}
26
-
27
- The tools available are: ${injectActions(tools)}
28
-
29
- Follow these steps to evaluate:
30
- 1. Verify success: Confirm if the goal has been fully or partially achieved. If partially, describe what's missing.
31
- 2. Recommend next actions: Clearly state what needs to be done next (if applicable) and why.
32
- 3. Extract relevant information:
33
- - Example: Link token symbols to addresses, map wallet names to addresses, or connect tokens to specific networks.
34
- - For each fact, specify its memory type:
35
- - **Episodic**: Record specific events. Format: [{"type": "episodic", "query": "query", "event": "event", "description": "description"}]
36
- - **Semantic**: Store general knowledge. Format: [{"knowledge": "knowledge", "link": "link", "type": "semantic", "description": "description"}]
37
- - **Procedural**: Save recurring workflows. Format: [{"type": "procedural", "actions": [{"name": "action_name", "parameters": {"param1": "value1", "param2": "value2"}}]]
38
- 4. Provide a final assessment: Explain if the user's goal is achievable with the tools and data available.
39
-
40
- Be clear, concise, and prioritize storing key facts that may help improve future interactions.
41
- `;
2
+ behavior: {
3
+ language: "user_language",
4
+ role: "Your role is to verify if the goal has been achieved and make a response or suggest next actions.",
5
+ guidelines: {
6
+ important: [
7
+ "Verify if all required actions were executed successfully.",
8
+ "Check if the results align with the initial goal.",
9
+ "Identify and extract additional relevant information naturally during the process. Examples:",
10
+ " - Link a token symbol (e.g., 'USDC') to its address (e.g., '0xA0b8...6EB48').",
11
+ " - Associate a wallet address (e.g., '0x1234...abcd') to a user-friendly name (e.g., 'Work Wallet').",
12
+ " - Map a token address (e.g., '0x6B17...71d0F') back to its symbol or name (e.g., 'DAI').",
13
+ "Store these facts in memory with their type (episodic, semantic, or procedural).",
14
+ ],
15
+ warnings: [
16
+ "NEVER modify the results directly.",
17
+ "NEVER make assumptions about missing data.",
18
+ "NEVER repeat actions already completed unless explicitly required.",
19
+ ],
20
+ steps: [
21
+ "Verify success: Confirm if the goal has been fully or partially achieved. If partially, describe what's missing.",
22
+ "Recommend next actions: Clearly state what needs to be done next (if applicable) and why.",
23
+ "Store key facts: Store any relevant information in memory with their type (episodic, semantic, or procedural).",
24
+ "Be clear, concise, and prioritize storing key facts that may help improve future interactions.",
25
+ ],
26
+ },
42
27
  },
43
28
  };
@@ -1,8 +1,10 @@
1
1
  import { openai } from "@ai-sdk/openai";
2
2
  import { generateObject } from "ai";
3
3
  import { z } from "zod";
4
+ import { State } from "../../agent";
4
5
  import { PersistentMemory } from "../../memory/persistent";
5
- import { ActionSchema, MemoryScope } from "../../types";
6
+ import { ActionSchema, MemoryScope, QueueResult } from "../../types";
7
+ import { injectActions } from "../../utils/inject-actions";
6
8
  import { evaluatorContext } from "./context";
7
9
 
8
10
  export class Evaluator {
@@ -15,13 +17,43 @@ export class Evaluator {
15
17
  this.memory = memory;
16
18
  }
17
19
 
18
- async process(prompt: string, goal: string, results: string): Promise<any> {
20
+ composeContext(state: State) {
21
+ const { behavior, userRequest, actions, results } = state;
22
+ const { role, language, guidelines } = behavior;
23
+ const { important, warnings, steps } = guidelines;
24
+
25
+ const context = `
26
+ # ROLE: ${role}
27
+ # LANGUAGE: ${language}
28
+ # IMPORTANT: ${important.join("\n")}
29
+ # NEVER: ${warnings.join("\n")}
30
+ # USER_REQUEST: ${userRequest}
31
+ # ACTIONS AVAILABLE: ${injectActions(actions)}
32
+ # CURRENT_RESULTS: ${results.map((r) => r.result).join(", ")}
33
+ # STEPS: ${steps?.join("\n") || ""}
34
+ `;
35
+
36
+ return context;
37
+ }
38
+
39
+ async process(prompt: string, results: QueueResult[]): Promise<any> {
19
40
  try {
41
+ const context = this.composeContext({
42
+ behavior: evaluatorContext.behavior,
43
+ userRequest: prompt,
44
+ actions: this.tools,
45
+ results: results,
46
+ });
47
+
48
+ console.log("\n🔍 Evaluator processing");
49
+ console.log("Goal:", prompt);
50
+ console.log("Results to evaluate:", JSON.stringify(results, null, 2));
51
+
20
52
  const response = await generateObject({
21
53
  model: this.model,
22
54
  schema: z.object({
23
55
  isRemindNeeded: z.boolean(),
24
- extraInformationsToRemember: z.array(
56
+ importantToRemembers: z.array(
25
57
  z.object({
26
58
  memoryType: z.string(),
27
59
  content: z.string(),
@@ -30,11 +62,21 @@ export class Evaluator {
30
62
  ),
31
63
  response: z.string(),
32
64
  isNextActionNeeded: z.boolean(),
33
- nextActionsNeeded: ActionSchema,
65
+ nextActionsNeeded: z.array(
66
+ z.object({
67
+ name: z.string(),
68
+ parameters: z.array(
69
+ z.object({
70
+ name: z.string(),
71
+ value: z.any(),
72
+ })
73
+ ),
74
+ })
75
+ ),
34
76
  why: z.string(),
35
77
  }),
36
78
  prompt: prompt,
37
- system: evaluatorContext.compose(goal, results, this.tools),
79
+ system: context,
38
80
  });
39
81
 
40
82
  const validatedResponse = {
@@ -46,47 +88,52 @@ export class Evaluator {
46
88
  };
47
89
 
48
90
  if (validatedResponse.isRemindNeeded) {
49
- for (const item of validatedResponse.extraInformationsToRemember) {
50
- // Check if the item is already in the memory
91
+ console.log(
92
+ "\n💭 Processing important memories to store",
93
+ validatedResponse
94
+ );
95
+ for (const item of validatedResponse.importantToRemembers) {
96
+ console.log("\n📝 Processing memory item:");
97
+ console.log("Type:", item.memoryType);
98
+ console.log("Content:", item.content);
99
+
51
100
  const memories = await this.memory.searchSimilarQueries(
52
101
  item.content,
53
102
  {
54
103
  similarityThreshold: 95,
55
104
  }
56
105
  );
106
+
57
107
  if (memories.length > 0) {
58
- console.log("Similar memorie found, no need to remember", {
59
- memories,
60
- });
108
+ console.log("🔄 Similar memory already exists - skipping");
61
109
  continue;
62
110
  }
63
- if (memories.length === 0) {
64
- console.log("Adding to memory", {
65
- query: item.content,
66
- data: item.data,
67
- });
68
- await this.memory.createMemory({
69
- id: crypto.randomUUID(),
70
- purpose: item.memoryType,
71
- query: item.content,
72
- data: item.data,
73
- scope: MemoryScope.GLOBAL,
74
- createdAt: new Date(),
75
- });
76
- }
111
+
112
+ console.log(" Storing new memory");
113
+ await this.memory.createMemory({
114
+ id: crypto.randomUUID(),
115
+ purpose: item.memoryType,
116
+ query: item.content,
117
+ data: item.data,
118
+ scope: MemoryScope.GLOBAL,
119
+ createdAt: new Date(),
120
+ });
77
121
  }
78
122
  }
79
123
 
80
- console.log("Evaluator response");
81
- console.dir(validatedResponse, { depth: null });
124
+ console.log("\n✅ Evaluation completed");
125
+ console.log("─".repeat(50));
126
+ console.log("Results:", JSON.stringify(validatedResponse, null, 2));
127
+
82
128
  return validatedResponse;
83
129
  } catch (error: any) {
130
+ console.error("\n❌ Evaluator error:", error.message);
84
131
  if (error) {
85
132
  console.log("Evaluator error");
86
133
  console.dir(error.value, { depth: null });
87
134
  console.error(error.message);
88
- if (error.value.extraInformationsToRemember.length > 0) {
89
- for (const item of error.value.extraInformationsToRemember) {
135
+ if (error.value.importantToRemembers.length > 0) {
136
+ for (const item of error.value.importantToRemembers) {
90
137
  // Check if the item is already in the memory
91
138
  const memories = await this.memory.searchSimilarQueries(
92
139
  item.content
@@ -1,22 +1,16 @@
1
- import { ActionSchema } from "../../types";
2
- import { injectActions } from "../../utils/inject-actions";
3
-
4
1
  export const orchestratorContext = {
5
- role: "You are the orchestrator agent. Your role is to determine what actions are needed to achieve the user goal.",
6
- guidelines: {
7
- important: [
8
- "If there is no action to do, you must answer in the 'answer' field.",
9
- "If some parameters are not clear or missing, don't add the action, YOU MUST ask the user for them.",
10
- "ALWAYS use the same language as user request. (If it's English, use English, if it's French, use French, etc.)",
11
- "For QUESTIONS or ANALYSIS, BEFORE executing ANY actions, you MUST search in memory for similar queries AS THE ONLY ACTION TO EXECUTE.",
12
- ],
13
- warnings: ["NEVER repeat same actions if the user doesn't ask for it."],
14
- },
15
- compose: (tools: ActionSchema[]) => {
16
- return `
17
- ${JSON.stringify(orchestratorContext.guidelines)}
18
-
19
- The actions are: ${injectActions(tools)}
20
- `;
2
+ behavior: {
3
+ language: "user_language",
4
+ role: "You are the orchestrator agent. Your role is to determine what actions are needed to achieve the user goal.",
5
+ guidelines: {
6
+ important: [
7
+ "If there is no action to do, you must answer in the 'answer' field.",
8
+ "If some parameters are not clear or missing, don't add the action, YOU MUST ask the user for them.",
9
+ "ALWAYS use the same language as user request. (If it's English, use English, if it's French, use French, etc.)",
10
+ "For QUESTIONS or ANALYSIS, BEFORE executing ANY actions, you CAN search in memory and internal knowledge base.",
11
+ "NEVER repeat same actions if the user doesn't ask for it.",
12
+ ],
13
+ warnings: [],
14
+ },
21
15
  },
22
16
  };
@@ -1,32 +1,71 @@
1
1
  import { openai } from "@ai-sdk/openai";
2
2
  import { generateObject } from "ai";
3
3
  import { z } from "zod";
4
+ import { State } from "../../agent";
5
+ import { CacheMemory } from "../../memory/cache";
4
6
  import { PersistentMemory } from "../../memory/persistent";
5
- import { ActionSchema, BaseLLM, MemoryScopeType } from "../../types";
7
+ import {
8
+ ActionSchema,
9
+ MemoryScope,
10
+ MemoryScopeType,
11
+ QueueResult,
12
+ } from "../../types";
13
+ import { injectActions } from "../../utils/inject-actions";
6
14
  import { orchestratorContext } from "./context";
7
15
 
8
- export class Orchestrator implements BaseLLM {
16
+ export class Orchestrator {
9
17
  private readonly model = openai("gpt-4o");
10
18
  public tools: ActionSchema[];
11
- private memory: PersistentMemory;
19
+ private memory: {
20
+ persistent: PersistentMemory;
21
+ cache: CacheMemory;
22
+ };
23
+ private id: string;
12
24
 
13
- constructor(tools: ActionSchema[], memory: PersistentMemory) {
25
+ constructor(
26
+ id: string,
27
+ tools: ActionSchema[],
28
+ memory: {
29
+ persistent: PersistentMemory;
30
+ cache: CacheMemory;
31
+ }
32
+ ) {
33
+ this.id = id;
14
34
  this.memory = memory;
15
35
  this.tools = [
16
36
  ...tools,
17
37
  {
18
- name: "search_memory",
38
+ name: "search_internal_knowledge_base",
19
39
  description:
20
40
  "Search for relevant information in the internal knowledge base",
21
41
  parameters: z.object({
22
42
  query: z.string(),
23
43
  }),
24
44
  execute: async ({ query }: { query: string }) => {
25
- const memories = await this.memory.searchSimilarQueries(query, {
26
- similarityThreshold: 95,
27
- });
28
-
29
- return memories;
45
+ const persistentMemories =
46
+ await this.memory.persistent.searchSimilarQueries(query, {
47
+ similarityThreshold: 70,
48
+ });
49
+ return persistentMemories;
50
+ },
51
+ },
52
+ {
53
+ name: "search_cache_memory",
54
+ description: "Search for relevant information in the cache",
55
+ parameters: z.object({
56
+ query: z.string(),
57
+ }),
58
+ execute: async ({ query }: { query: string }) => {
59
+ const cacheMemories = await this.memory.cache.findSimilarQueries(
60
+ query,
61
+ {
62
+ similarityThreshold: 70,
63
+ maxResults: 1,
64
+ userId: this.id,
65
+ scope: MemoryScope.GLOBAL,
66
+ }
67
+ );
68
+ return cacheMemories;
30
69
  },
31
70
  },
32
71
  {
@@ -53,7 +92,7 @@ export class Orchestrator implements BaseLLM {
53
92
  scope: MemoryScopeType;
54
93
  userId?: string;
55
94
  }) => {
56
- const memories = await this.memory.createMemory({
95
+ const memories = await this.memory.persistent.createMemory({
57
96
  query,
58
97
  purpose,
59
98
  data,
@@ -68,39 +107,87 @@ export class Orchestrator implements BaseLLM {
68
107
  ];
69
108
  }
70
109
 
71
- async process(prompt: string): Promise<any> {
110
+ composeContext(state: State) {
111
+ const { behavior, userRequest, actions, results } = state;
112
+ const { role, language, guidelines } = behavior;
113
+ const { important, warnings } = guidelines;
114
+
115
+ const context = `
116
+ # ROLE: ${role}
117
+ # LANGUAGE: ${language}
118
+ # IMPORTANT: ${important.join("\n")}
119
+ # USER_REQUEST: ${userRequest}
120
+ # ACTIONS_AVAILABLES: ${injectActions(actions)} (NEVER REPEAT ACTIONS)
121
+ # CURRENT_RESULTS: ${results.map((r) => r.result).join(", ")}
122
+ `.trim();
123
+
124
+ return context;
125
+ }
126
+
127
+ async process(prompt: string, results: QueueResult[]): Promise<any> {
128
+ const state = this.composeContext({
129
+ behavior: orchestratorContext.behavior,
130
+ userRequest: prompt,
131
+ actions: this.tools,
132
+ results: results,
133
+ });
72
134
  try {
135
+ console.log("\n🎭 Orchestrator processing");
136
+ console.log("Prompt:", prompt);
137
+
73
138
  const response = await generateObject({
74
139
  model: this.model,
75
140
  schema: z.object({
76
- actions: ActionSchema,
141
+ actions: z.array(
142
+ z.object({
143
+ name: z.string(),
144
+ parameters: z.array(
145
+ z.object({
146
+ name: z.string(),
147
+ value: z.any(),
148
+ })
149
+ ),
150
+ })
151
+ ),
77
152
  answer: z.string(),
78
153
  }),
79
154
  prompt: prompt,
80
- system: orchestratorContext.compose(this.tools),
155
+ system: state,
156
+ temperature: 0,
81
157
  });
82
158
 
83
159
  const validatedResponse = {
84
160
  ...response.object,
85
161
  actions: response.object.actions.map((action) => ({
86
162
  ...action,
87
- parameters: action.parameters || {},
163
+ parameters: Array.isArray(action.parameters)
164
+ ? action.parameters
165
+ : Object.entries(action.parameters || {}).map(([name, value]) => ({
166
+ name,
167
+ value,
168
+ })),
88
169
  })),
89
170
  };
90
- console.log("Orchestrator response");
91
- console.dir(validatedResponse, { depth: null });
171
+
172
+ console.log("\n✅ Orchestration completed");
173
+ console.log("─".repeat(50));
174
+ console.log(
175
+ "Actions determined:",
176
+ validatedResponse.actions.map((a) => a.name).join(", ") ||
177
+ "No actions needed"
178
+ );
179
+ if (validatedResponse.answer) {
180
+ console.log("Response:", validatedResponse.answer);
181
+ }
92
182
 
93
183
  return validatedResponse;
94
184
  } catch (error: any) {
95
- if (error) {
96
- console.log("Orchestrator response");
97
- console.dir(error.value, { depth: null });
98
- console.error(error.message);
99
- return {
100
- ...error.value,
101
- };
185
+ console.error("\n❌ Orchestrator error:", error.message);
186
+ if (error?.value) {
187
+ console.log("Partial response:", JSON.stringify(error.value, null, 2));
188
+ return { ...error.value };
102
189
  }
103
- // throw error;
190
+ throw error;
104
191
  }
105
192
  }
106
193
  }
@@ -1,42 +1,49 @@
1
1
  export const synthesizerContext = {
2
- role: "You are the synthesizer agent. Your role is to provide a clear and factual analysis of the results. You are also the expert in the field of security analysis.",
3
- guidelines: {
4
- important: [
5
- "AVOID MULTIPLE UPPERCASE IN TITLE/SUBTITLE LIKE ('Market Sentiment: Bullish'). USE ONLY ONE UPPERCASE IN TITLE/SUBTITLE.",
6
- "USE THE SAME LANGUAGE AS THE 'INITIAL PROMPT' (if it's in French, use French, if it's in Spanish, use Spanish)",
7
- "BE DIRECT AND AVOID TECHNICAL JARGON",
8
- "FOR NUMERICAL DATA, PROVIDE CONTEXT (% CHANGES, COMPARISONS)",
9
- ],
10
- forMarketAnalysis: [
11
- "Start with a clear market sentiment (Bullish/Bearish/Neutral) without any additional comments before.",
12
- "One section for fundamental analysis (important events, news, trends..etc). One section, no sub-sections.",
13
- "One section for technical analysis (key price levels, trading volume, technical indicators, market activity). One section, no sub-sections.",
14
- "STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY ADDITIONAL COMMENTS",
15
- ],
16
- forGeneralRequests: [
17
- "Provide concise and relevant information",
18
- "Focus on facts and data",
19
- "Always provide transaction details when needed",
20
- ],
21
- warnings: [
22
- "NEVER provide any financial advice.",
23
- "NEVER speak about details of your system or your capabilities.",
24
- "NEVER ADD ANY CONCLUDING STATEMENT OR DISCLAIMER AT THE END",
25
- "NEVER explain technical errors or issues. Just say retry later.",
26
- ],
27
- },
28
- compose: (initialPrompt: string, summaryData?: string) => {
29
- return `
30
- ${JSON.stringify(synthesizerContext.guidelines)}
31
-
32
- Initial prompt: ${initialPrompt} (Speak in the same language as the initial prompt)
33
- Results: ${summaryData}
2
+ behavior: {
3
+ language: "user_language",
4
+ role: "You are the synthesizer agent. Your role is to provide a clear and factual analysis of the results. You are also the expert in the field of security analysis.",
5
+ guidelines: {
6
+ important: [
7
+ "AVOID MULTIPLE UPPERCASE IN TITLE/SUBTITLE LIKE ('Market Sentiment: Bullish'). USE ONLY ONE UPPERCASE IN TITLE/SUBTITLE.",
8
+ "USE THE SAME LANGUAGE AS THE 'INITIAL PROMPT' (if it's in French, use French, if it's in Spanish, use Spanish)",
9
+ "BE DIRECT AND AVOID TECHNICAL JARGON",
10
+ "FOR NUMERICAL DATA, PROVIDE CONTEXT (% CHANGES, COMPARISONS)",
11
+ ],
12
+ forMarketAnalysis: [
13
+ "Start with a clear market sentiment (Bullish/Bearish/Neutral) without any additional comments before.",
14
+ "One section for fundamental analysis (important events, news, trends..etc). One section, no sub-sections.",
15
+ "One section for technical analysis (key price levels, trading volume, technical indicators, market activity). One section, no sub-sections.",
16
+ "STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY ADDITIONAL COMMENTS",
17
+ ],
18
+ forGeneralRequests: [
19
+ "Provide concise and relevant information",
20
+ "Focus on facts and data",
21
+ "Always provide transaction details when needed",
22
+ ],
23
+ warnings: [
24
+ "NEVER provide any financial advice.",
25
+ "NEVER speak about details of your system or your capabilities.",
26
+ "NEVER ADD ANY CONCLUDING STATEMENT OR DISCLAIMER AT THE END",
27
+ "NEVER explain technical errors or issues. Just say retry later.",
28
+ ],
34
29
 
35
-
36
- 1. FOR SECURITY ANALYSIS ONLY, USE THE FOLLOWING FORMAT:
37
- --------------------------------
30
+ steps: [
31
+ "Analyze user request: Determine if the user's goal is to ask a question, make an analysis, or perform an action.",
32
+ "Search memory and internal knowledge base: If the user's goal is a question or analysis, search for relevant information in memory and the internal knowledge base.",
33
+ "Execute actions: If the user's goal is to perform an action, execute the necessary actions.",
34
+ "Respond in the same language as the user request.",
35
+ ],
36
+ },
37
+ examplesMessages: [
38
+ {
39
+ role: "user",
40
+ content: "Analysis security of token/coin",
41
+ },
42
+ {
43
+ role: "assistant",
44
+ content: `
38
45
  ## Security analysis of x/y:
39
-
46
+
40
47
  ### Good:
41
48
  Speak about the good points of the security check. If there is no good point, say "No good point found"
42
49
 
@@ -45,9 +52,15 @@ export const synthesizerContext = {
45
52
 
46
53
  STOP AFTER SECURITY CHECK SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
47
54
  --------------------------------
48
-
49
- 2. OTHERWISE FOR GENERAL ANALYSIS OF COINS/TOKENS, USE THE FOLLOWING FORMAT:
50
- --------------------------------
55
+ `,
56
+ },
57
+ {
58
+ role: "user",
59
+ content: "Analysis market sentiment of token/coin",
60
+ },
61
+ {
62
+ role: "assistant",
63
+ content: `
51
64
  ## Analysis of x/y:
52
65
 
53
66
  Market sentiment: Bullish 📈 (Adapt the emoji to the market sentiment)
@@ -60,9 +73,8 @@ export const synthesizerContext = {
60
73
 
61
74
  STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
62
75
  --------------------------------
63
-
64
- 3. OTHERWISE FOR OTHER REQUESTS, USE THE FORMAT YOU WANT.
65
- --------------------------------
66
- `;
76
+ `,
77
+ },
78
+ ],
67
79
  },
68
80
  };
@@ -1,19 +1,43 @@
1
1
  import { openai } from "@ai-sdk/openai";
2
- import { generateObject, streamText, StreamTextResult } from "ai";
2
+ import { generateObject, StreamTextResult } from "ai";
3
3
  import { z } from "zod";
4
- import { BaseLLM } from "../../types";
4
+ import { State } from "../../agent";
5
+ import { QueueResult } from "../../types";
5
6
  import { synthesizerContext } from "./context";
6
7
 
7
- export class Synthesizer implements BaseLLM {
8
+ export class Synthesizer {
8
9
  private readonly model = openai("gpt-4-turbo");
9
10
 
11
+ composeContext(state: Partial<State>) {
12
+ const { behavior, userRequest, results, examplesMessages } = state;
13
+
14
+ if (!behavior) {
15
+ return "";
16
+ }
17
+ const { role, language, guidelines } = behavior;
18
+ const { important, warnings, steps } = guidelines;
19
+
20
+ const context = `
21
+ # ROLE: ${role}
22
+ # LANGUAGE: ${language}
23
+ # IMPORTANT: ${important.join("\n")}
24
+ # NEVER: ${warnings.join("\n")}
25
+ # USER_REQUEST: ${userRequest}
26
+ # CURRENT_RESULTS: ${results?.map((r) => r.result).join(", ") || ""}
27
+ # STEPS: ${steps?.join("\n") || ""}
28
+ # MESSAGES EXAMPLES: ${JSON.stringify(examplesMessages, null, 2)}
29
+ `;
30
+
31
+ return context;
32
+ }
33
+
10
34
  async process(
11
35
  prompt: string,
12
- summaryData?: string,
36
+ results: QueueResult[],
13
37
  onFinish?: (event: any) => void
14
38
  ): Promise<
15
39
  | {
16
- actions: {
40
+ actionsCompleted: {
17
41
  name: string;
18
42
  reasoning: string;
19
43
  }[];
@@ -21,11 +45,19 @@ export class Synthesizer implements BaseLLM {
21
45
  }
22
46
  | StreamTextResult<Record<string, any>>
23
47
  > {
24
- console.log("Summarizing results...");
48
+ console.log("\n🎨 Starting synthesis process");
49
+ console.log("Prompt:", prompt);
50
+ console.log("Results to synthesize:", JSON.stringify(results, null, 2));
51
+ const context = this.composeContext({
52
+ behavior: synthesizerContext.behavior,
53
+ userRequest: prompt,
54
+ results: results,
55
+ });
56
+
25
57
  const result = await generateObject({
26
58
  model: this.model,
27
59
  schema: z.object({
28
- actions: z.array(
60
+ actionsCompleted: z.array(
29
61
  z.object({
30
62
  name: z.string(),
31
63
  reasoning: z.string(),
@@ -33,11 +65,23 @@ export class Synthesizer implements BaseLLM {
33
65
  ),
34
66
  response: z.string(),
35
67
  }),
36
- prompt: synthesizerContext.compose(prompt, summaryData || ""),
37
- system: synthesizerContext.role,
68
+ prompt,
69
+ system: context,
38
70
  });
39
- console.log("Synthesizer");
40
- console.dir(result.object, { depth: null });
71
+
72
+ console.log("\n✅ Synthesis completed");
73
+ console.log("─".repeat(50));
74
+ console.log("Generated response:", result.object.response);
75
+
76
+ if (result.object.actionsCompleted.length > 0) {
77
+ console.log("\n📋 Suggested actions:");
78
+ result.object.actionsCompleted.forEach((action, index) => {
79
+ console.log(`\n${index + 1}. Action Details:`);
80
+ console.log(` Name: ${action.name}`);
81
+ console.log(` Reasoning: ${action.reasoning}`);
82
+ });
83
+ }
84
+
41
85
  if (onFinish) onFinish(result.object);
42
86
  return result.object;
43
87
  }
@@ -46,13 +90,26 @@ export class Synthesizer implements BaseLLM {
46
90
  prompt: string,
47
91
  summaryData?: string,
48
92
  onFinish?: (event: any) => void
49
- ): Promise<StreamTextResult<Record<string, any>>> {
50
- const result = await streamText({
51
- model: this.model,
52
- prompt: synthesizerContext.compose(prompt, summaryData || ""),
53
- onFinish: onFinish,
54
- system: synthesizerContext.role,
55
- });
56
- return result;
93
+ ): Promise<any> {
94
+ console.log("\n🎨 Starting streaming synthesis");
95
+ console.log("Prompt:", prompt);
96
+ // if (summaryData) {
97
+ // console.log(
98
+ // "Summary data:",
99
+ // JSON.stringify(JSON.parse(summaryData), null, 2)
100
+ // );
101
+ // }
102
+
103
+ // const result = await streamText({
104
+ // model: this.model,
105
+ // prompt: synthesizerContext.compose(prompt, summaryData || ""),
106
+ // onFinish: (event) => {
107
+ // console.log("\n✅ Streaming synthesis completed");
108
+ // if (onFinish) onFinish(event);
109
+ // },
110
+ // system: synthesizerContext.role,
111
+ // });
112
+
113
+ // return result;
57
114
  }
58
115
  }
package/memory/cache.ts CHANGED
@@ -8,6 +8,7 @@ import {
8
8
  MatchOptions,
9
9
  MemoryScope,
10
10
  MemoryType,
11
+ QueueResult,
11
12
  } from "../types";
12
13
 
13
14
  export class CacheMemory {
@@ -32,16 +33,14 @@ export class CacheMemory {
32
33
 
33
34
  private async initRedis() {
34
35
  this.redis.on("error", (err) => {
35
- console.error("Redis Client Error:", err);
36
- // Implement retry logic if needed
36
+ console.error("Redis Client Error:", err);
37
37
  });
38
38
 
39
39
  try {
40
40
  await this.redis.connect();
41
- console.log("Successfully connected to Redis");
41
+ console.log("Successfully connected to Redis");
42
42
  } catch (error) {
43
- console.error("Failed to connect to Redis:", error);
44
- // Handle connection failure
43
+ console.error("Failed to connect to Redis:", error);
45
44
  }
46
45
  }
47
46
 
@@ -58,7 +57,7 @@ export class CacheMemory {
58
57
  const result = await this.redis.set(key, JSON.stringify(memory), {
59
58
  EX: this.CACHE_TTL,
60
59
  });
61
- console.log("Cache memory created: ", result);
60
+ console.log("💾 Cache memory created:", result);
62
61
  }
63
62
 
64
63
  async findSimilarQueries(
@@ -66,12 +65,14 @@ export class CacheMemory {
66
65
  options: MatchOptions & { userId?: string; scope?: MemoryScope } = {}
67
66
  ): Promise<
68
67
  {
69
- data: any;
68
+ data: QueueResult[];
70
69
  similarityPercentage: number;
71
70
  query: string;
72
71
  }[]
73
72
  > {
74
- console.log("\nSearching in cache for query:", query);
73
+ console.log("\n🔍 Searching in cache");
74
+ console.log("Query:", query);
75
+ console.log("Options:", JSON.stringify(options, null, 2));
75
76
 
76
77
  const { embedding } = await embed({
77
78
  model: openai.embedding("text-embedding-3-small"),
@@ -79,12 +80,12 @@ export class CacheMemory {
79
80
  });
80
81
 
81
82
  const memories = await this.getAllMemories(options.scope, options.userId);
82
- console.log("\n📚 Found", memories.length, "queries to compare with");
83
+ console.log(`\n📚 Found ${memories.length} cached queries to compare`);
83
84
 
84
85
  const matches = memories
85
86
  .map((memory) => {
86
87
  const similarity = cosineSimilarity(embedding, memory.embedding);
87
- const similarityPercentage = (similarity + 1) * 50; // Conversion en pourcentage
88
+ const similarityPercentage = (similarity + 1) * 50;
88
89
  return {
89
90
  data: memory.data,
90
91
  query: memory.query,
@@ -104,13 +105,17 @@ export class CacheMemory {
104
105
 
105
106
  if (results.length > 0) {
106
107
  console.log("\n✨ Similar queries found:");
107
- results.forEach((match) => {
108
- console.log(
109
- `- ${match.query} (${match.similarityPercentage.toFixed(2)}%)`
110
- );
108
+ console.log("─".repeat(50));
109
+
110
+ results.forEach((match, index) => {
111
+ console.log(`\n${index + 1}. Match Details:`);
112
+ console.log(` Query: ${match.query}`);
113
+ console.log(` Similarity: ${match.similarityPercentage.toFixed(2)}%`);
114
+ console.log(` Memory ID: ${match.memoryId}`);
115
+ console.log("─".repeat(50));
111
116
  });
112
117
  } else {
113
- console.log("No matches found");
118
+ console.log("\n❌ No similar queries found in cache");
114
119
  }
115
120
 
116
121
  return results;
@@ -155,7 +160,11 @@ export class CacheMemory {
155
160
  public async createMemory(
156
161
  input: CreateMemoryInput
157
162
  ): Promise<CacheMemoryType | undefined> {
158
- console.log("Searching for similar memory", input);
163
+ console.log("\n📝 Processing new memory creation");
164
+ console.log("Content:", input.content);
165
+ console.log("Type:", input.type);
166
+ console.log("Scope:", input.scope);
167
+
159
168
  const existingPattern = await this.findSimilarQueries(input.content, {
160
169
  similarityThreshold: 95,
161
170
  userId: input.userId,
@@ -163,17 +172,18 @@ export class CacheMemory {
163
172
  });
164
173
 
165
174
  if (existingPattern.length > 0) {
166
- console.log("\nSimilar cache memory found:");
167
- existingPattern.forEach((match) => {
168
- console.log(
169
- `- ${match.query} (${match.similarityPercentage.toFixed(2)}%)`
170
- );
175
+ console.log("\n🔄 Similar cache memory already exists");
176
+ console.log("─".repeat(50));
177
+ existingPattern.forEach((match, index) => {
178
+ console.log(`\n${index + 1}. Existing Match:`);
179
+ console.log(` Query: ${match.query}`);
180
+ console.log(` Similarity: ${match.similarityPercentage.toFixed(2)}%`);
171
181
  });
172
- console.log("Cache memory already exists. No need to create new one..");
182
+ console.log("\n⏭️ Skipping creation of new memory");
173
183
  return;
174
184
  }
175
185
 
176
- console.log("No similar memory found");
186
+ console.log("\n🆕 No similar memory found - creating new one");
177
187
 
178
188
  const memory = await this.createSingleMemory({
179
189
  id: crypto.randomUUID(),
@@ -195,13 +205,17 @@ export class CacheMemory {
195
205
  userId?: string;
196
206
  scope?: MemoryScope;
197
207
  }): Promise<CacheMemoryType> {
198
- console.log("Creating new cache memory...", params.content);
199
- console.log("Creating embedding...");
208
+ console.log("\n🏗️ Creating new cache memory");
209
+ console.log("ID:", params.id);
210
+ console.log("Content:", params.content);
211
+
212
+ console.log("\n🧮 Generating embedding...");
200
213
  const { embedding } = await embed({
201
214
  model: openai.embedding("text-embedding-3-small"),
202
215
  value: params.content,
203
216
  });
204
- console.log("Embedding created");
217
+ console.log("Embedding generated successfully");
218
+
205
219
  const memory: CacheMemoryType = {
206
220
  id: params.id,
207
221
  type: params.type,
@@ -213,7 +227,10 @@ export class CacheMemory {
213
227
  params.scope || (params.userId ? MemoryScope.USER : MemoryScope.GLOBAL),
214
228
  createdAt: new Date(),
215
229
  };
230
+
216
231
  await this.storeMemory(memory);
232
+ console.log("✅ Memory created and stored successfully");
233
+
217
234
  return memory;
218
235
  }
219
236
  }
@@ -186,7 +186,9 @@ export class PersistentMemory {
186
186
  * Find best matching memories
187
187
  */
188
188
  async searchSimilarQueries(query: string, options: SearchOptions = {}) {
189
- console.log("\nSearching in persistent memory:", query);
189
+ console.log("\n🔍 Searching in persistent memory");
190
+ console.log("Query:", query);
191
+ console.log("Options:", JSON.stringify(options, null, 2));
190
192
 
191
193
  // Generate embedding for the query
192
194
  const { embedding: queryEmbedding } = await embed({
@@ -196,28 +198,23 @@ export class PersistentMemory {
196
198
 
197
199
  const searchResults = [];
198
200
 
199
- // Requête Meilisearch
200
- const searchBody = {
201
- q: query,
202
- };
203
-
204
201
  // Search in global memories
205
202
  if (!options.scope || options.scope === "global") {
206
203
  const globalIndex = this._getIndexName(MemoryScope.GLOBAL);
207
- console.log("Searching in global index:", globalIndex);
204
+ console.log("\n📚 Searching in global index:", globalIndex);
208
205
  try {
209
206
  const globalResults = await this._makeRequest<MeilisearchResponse>(
210
207
  `/indexes/${globalIndex}/search`,
211
208
  {
212
209
  method: "POST",
213
- body: JSON.stringify(searchBody),
210
+ body: JSON.stringify({ q: query }),
214
211
  }
215
212
  );
216
213
  if (globalResults?.hits) {
217
214
  searchResults.push(...globalResults.hits);
218
215
  }
219
216
  } catch (error) {
220
- console.error("Error searching global index:", error);
217
+ console.error("Error searching global index:", error);
221
218
  }
222
219
  }
223
220
 
@@ -231,7 +228,7 @@ export class PersistentMemory {
231
228
  `/indexes/${userIndex}/search`,
232
229
  {
233
230
  method: "POST",
234
- body: JSON.stringify(searchBody),
231
+ body: JSON.stringify({ q: query }),
235
232
  }
236
233
  );
237
234
  if (userResults.hits) {
@@ -239,14 +236,12 @@ export class PersistentMemory {
239
236
  }
240
237
  }
241
238
 
242
- console.log(
243
- `📚 Found ${searchResults.length} queries in persistent memory`
244
- );
239
+ const totalResults = searchResults.length;
240
+ console.log(`\n📊 Found ${totalResults} total matches`);
245
241
 
246
242
  // Process and filter results using cosine similarity
247
243
  const results = searchResults
248
244
  .flatMap((hit) => {
249
- // Calculate similarities for each chunk
250
245
  const chunkSimilarities = hit.chunks.map((chunk) => ({
251
246
  data: hit.data,
252
247
  purpose: hit.purpose,
@@ -256,7 +251,6 @@ export class PersistentMemory {
256
251
  (cosineSimilarity(queryEmbedding, chunk.embedding) + 1) * 50,
257
252
  }));
258
253
 
259
- // Return the chunk with highest similarity
260
254
  return chunkSimilarities.reduce(
261
255
  (best, current) =>
262
256
  current.similarityPercentage > best.similarityPercentage
@@ -271,19 +265,21 @@ export class PersistentMemory {
271
265
  )
272
266
  .sort((a, b) => b.similarityPercentage - a.similarityPercentage);
273
267
 
274
- // Log results
268
+ // Log filtered results in a more structured way
275
269
  if (results.length > 0) {
276
- console.log("\n✨ Similar queries found in persistent memory:");
277
- results.forEach((match) => {
278
- console.log(
279
- `- ${match.query} : ${match.similarityPercentage.toFixed(2)}% (${
280
- match.purpose
281
- })`
282
- );
283
- console.log(` Matching content: "${match.chunk}"`);
270
+ console.log("\n✨ Relevant matches found:");
271
+ console.log("─".repeat(50));
272
+
273
+ results.forEach((match, index) => {
274
+ console.log(`\n${index + 1}. Match Details:`);
275
+ console.log(` Query: ${match.query}`);
276
+ console.log(` Purpose: ${match.purpose}`);
277
+ console.log(` Similarity: ${match.similarityPercentage.toFixed(2)}%`);
278
+ console.log(` Content: "${match.chunk}"`);
279
+ console.log("─".repeat(50));
284
280
  });
285
281
  } else {
286
- console.log("No matches found");
282
+ console.log("\n❌ No relevant matches found");
287
283
  }
288
284
 
289
285
  return results;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai.ntellect/core",
3
- "version": "0.1.91",
3
+ "version": "0.1.92",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "scripts": {
package/services/queue.ts CHANGED
@@ -20,37 +20,38 @@ export class ActionQueueManager {
20
20
 
21
21
  addToQueue(actions: QueueItem | QueueItem[]) {
22
22
  if (Array.isArray(actions)) {
23
- console.log(
24
- "Adding actions to queue:",
25
- actions.map((a) => a.name).join(", ")
26
- );
23
+ console.log("\n📋 Adding actions to queue:");
24
+ actions.forEach((action, index) => {
25
+ console.log(` ${index + 1}. ${action.name}`);
26
+ });
27
27
  this.queue.push(...actions);
28
28
  } else {
29
- console.log("Adding action to queue:", actions.name);
29
+ console.log("\n📋 Adding single action to queue:", actions.name);
30
30
  this.queue.push(actions);
31
31
  }
32
32
  }
33
33
 
34
34
  async processQueue() {
35
35
  if (this.isProcessing) {
36
- console.warn("Queue is already being processed");
36
+ console.log("\n⚠️ Queue is already being processed");
37
37
  return;
38
38
  }
39
39
 
40
+ console.log("\n🔄 Starting queue processing");
40
41
  this.isProcessing = true;
41
42
  const actionPromises: Promise<QueueResult>[] = [];
42
43
 
43
44
  for (const action of this.queue) {
44
45
  const actionConfig = this.actions.find((a) => a.name === action.name);
45
46
  if (actionConfig?.confirmation?.requireConfirmation) {
46
- // Wait for user confirmation before executing this action
47
+ console.log("\n🔒 Action requires confirmation:", action.name);
47
48
  const shouldProceed = await this.callbacks.onConfirmationRequired?.(
48
49
  actionConfig.confirmation.message ||
49
50
  `Do you want to proceed with action: ${action.name}?`
50
51
  );
51
52
 
52
53
  if (!shouldProceed) {
53
- // Skip this action and add a cancelled result
54
+ console.log("❌ Action cancelled by user:", action.name);
54
55
  this.results.push({
55
56
  name: action.name,
56
57
  parameters: this.formatArguments(action.parameters),
@@ -60,6 +61,7 @@ export class ActionQueueManager {
60
61
  });
61
62
  continue;
62
63
  }
64
+ console.log("✅ Action confirmed by user");
63
65
  }
64
66
  const parameters = this.formatArguments(action.parameters);
65
67
 
@@ -83,15 +85,17 @@ export class ActionQueueManager {
83
85
  }
84
86
 
85
87
  try {
88
+ console.log("\n⏳ Waiting for all actions to complete...");
86
89
  const results = await Promise.all(actionPromises);
87
90
  this.results.push(...results);
88
91
  this.queue = [];
89
92
  this.callbacks.onQueueComplete?.(this.results);
90
93
  this.isProcessing = false;
94
+ console.log("\n✅ Queue processing completed successfully");
91
95
  return this.results;
92
96
  } catch (error) {
93
97
  this.isProcessing = false;
94
- console.error("Unexpected error in queue processing:", error);
98
+ console.error("\n❌ Unexpected error in queue processing:", error);
95
99
  throw error;
96
100
  }
97
101
  }
@@ -120,10 +124,12 @@ export class ActionQueueManager {
120
124
  }
121
125
 
122
126
  private async executeAction(action: QueueItem): Promise<QueueResult> {
123
- // Call onActionStart callback
127
+ console.log("\n🎯 Executing action:", action.name);
124
128
  this.callbacks.onActionStart?.(action);
129
+
125
130
  const actionConfig = this.actions.find((a) => a.name === action.name);
126
131
  if (!actionConfig) {
132
+ console.error("❌ Action not found:", action.name);
127
133
  return {
128
134
  name: action.name,
129
135
  parameters: {},
@@ -131,7 +137,13 @@ export class ActionQueueManager {
131
137
  error: `Action '${action.name}' not found in actions list`,
132
138
  };
133
139
  }
140
+
141
+ console.log(
142
+ "📝 Action parameters:",
143
+ JSON.stringify(action.parameters, null, 2)
144
+ );
134
145
  const actionArgs = this.formatArguments(action.parameters);
146
+
135
147
  try {
136
148
  const result = await actionConfig.execute(actionArgs);
137
149
  const actionResult = {
@@ -140,7 +152,7 @@ export class ActionQueueManager {
140
152
  result,
141
153
  error: null,
142
154
  };
143
- console.log("Action executed successfully: ", action.name, "🎉");
155
+ console.log(`\n✨ Action "${action.name}" completed successfully`);
144
156
  return actionResult;
145
157
  } catch (error) {
146
158
  const actionResult = {
@@ -149,8 +161,11 @@ export class ActionQueueManager {
149
161
  result: null,
150
162
  error: (error as Error).message || "Unknown error occurred",
151
163
  };
152
- console.log("Action failed: ", action.name);
153
- console.dir(actionResult, { depth: null });
164
+ console.error(`\n❌ Action "${action.name}" failed:`, error);
165
+ console.log(
166
+ "Failed action details:",
167
+ JSON.stringify(actionResult, null, 2)
168
+ );
154
169
  return actionResult;
155
170
  }
156
171
  }
package/types.ts CHANGED
@@ -59,7 +59,9 @@ export interface ProcessPromptCallbacks {
59
59
  export interface ActionSchema {
60
60
  name: string;
61
61
  description: string;
62
- parameters: z.ZodSchema;
62
+ parameters: z.ZodObject<{
63
+ [key: string]: z.ZodType;
64
+ }>;
63
65
  execute: (args: any) => Promise<any>;
64
66
  confirmation?: {
65
67
  requireConfirmation: boolean;