@ai.ntellect/core 0.0.33 → 0.0.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,22 +1,26 @@
1
- import EventEmitter from "events";
2
1
  import { Orchestrator } from "../llm/orchestrator";
3
2
  import { MemoryCache } from "../memory";
4
3
  import { AgentEvent, User } from "../types";
5
4
  export declare class Agent {
6
- private readonly user;
7
- private readonly dependencies;
8
- private readonly stream;
9
5
  private readonly SIMILARITY_THRESHOLD;
10
6
  private readonly MAX_RESULTS;
11
7
  private readonly actionHandler;
12
- constructor(user: User, dependencies: {
8
+ private readonly user;
9
+ private readonly orchestrator;
10
+ private readonly memoryCache;
11
+ private readonly stream;
12
+ private readonly maxEvaluatorIteration;
13
+ private readonly evaluatorIteration;
14
+ constructor({ user, orchestrator, memoryCache, stream, maxEvaluatorIteration, }: {
15
+ user: User;
13
16
  orchestrator: Orchestrator;
14
- memoryCache: MemoryCache;
15
- eventEmitter: EventEmitter;
16
- }, stream?: boolean);
17
- start(prompt: string, contextualizedPrompt: string, events: AgentEvent): Promise<any>;
17
+ memoryCache?: MemoryCache;
18
+ stream: boolean;
19
+ maxEvaluatorIteration: number;
20
+ });
21
+ process(prompt: string, contextualizedPrompt: string, events: AgentEvent): Promise<any>;
18
22
  private handleActions;
23
+ private handleActionResults;
19
24
  private findSimilarActions;
20
25
  private transformActions;
21
- private handleActionResults;
22
26
  }
@@ -1,40 +1,57 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.Agent = void 0;
4
+ const evaluator_1 = require("../llm/evaluator");
4
5
  const synthesizer_1 = require("../llm/synthesizer");
5
6
  const types_1 = require("../types");
6
7
  const queue_item_transformer_1 = require("../utils/queue-item-transformer");
7
8
  const ActionHandler_1 = require("./handlers/ActionHandler");
8
9
  class Agent {
9
- constructor(user, dependencies, stream = true) {
10
- this.user = user;
11
- this.dependencies = dependencies;
12
- this.stream = stream;
10
+ constructor({ user, orchestrator, memoryCache, stream, maxEvaluatorIteration = 1, }) {
13
11
  this.SIMILARITY_THRESHOLD = 95;
14
12
  this.MAX_RESULTS = 1;
13
+ this.evaluatorIteration = 0;
14
+ this.user = user;
15
+ this.orchestrator = orchestrator;
16
+ this.memoryCache = memoryCache;
17
+ this.stream = stream;
18
+ this.maxEvaluatorIteration = maxEvaluatorIteration;
15
19
  this.actionHandler = new ActionHandler_1.ActionHandler();
16
20
  }
17
- async start(prompt, contextualizedPrompt, events) {
18
- const request = await this.dependencies.orchestrator.process(contextualizedPrompt);
21
+ async process(prompt, contextualizedPrompt, events) {
22
+ const request = await this.orchestrator.process(contextualizedPrompt);
19
23
  events.onMessage?.(request);
20
24
  if (request.actions.length > 0) {
21
25
  return this.handleActions({
22
26
  initialPrompt: prompt,
27
+ contextualizedPrompt: contextualizedPrompt,
23
28
  actions: request.actions,
24
29
  }, events);
25
30
  }
26
31
  }
27
- async handleActions({ initialPrompt, actions, }, events) {
32
+ async handleActions({ initialPrompt, contextualizedPrompt, actions, }, events) {
28
33
  const similarActions = await this.findSimilarActions(initialPrompt);
29
- const predefinedActions = this.transformActions(actions, similarActions);
30
- const callbacks = {
34
+ const queueItems = this.transformActions(actions, similarActions);
35
+ const actionsResult = await this.actionHandler.executeActions(queueItems, this.orchestrator.tools, {
31
36
  onQueueStart: events.onQueueStart,
32
37
  onActionStart: events.onActionStart,
33
38
  onActionComplete: events.onActionComplete,
34
39
  onQueueComplete: events.onQueueComplete,
35
40
  onConfirmationRequired: events.onConfirmationRequired,
36
- };
37
- const actionsResult = await this.actionHandler.executeActions(predefinedActions, this.dependencies.orchestrator.tools, callbacks);
41
+ });
42
+ if (this.evaluatorIteration >= this.maxEvaluatorIteration) {
43
+ return this.handleActionResults({ ...actionsResult, initialPrompt });
44
+ }
45
+ const evaluator = new evaluator_1.Evaluator(this.orchestrator.tools);
46
+ const evaluation = await evaluator.process(initialPrompt, contextualizedPrompt, JSON.stringify(actionsResult.data));
47
+ events.onMessage?.(evaluation);
48
+ if (evaluation.nextActions.length > 0) {
49
+ return this.handleActions({
50
+ initialPrompt: contextualizedPrompt,
51
+ contextualizedPrompt: initialPrompt,
52
+ actions: evaluation.nextActions,
53
+ }, events);
54
+ }
38
55
  if (!this.actionHandler.hasNonPrepareActions(actionsResult.data)) {
39
56
  return {
40
57
  data: actionsResult.data,
@@ -43,8 +60,21 @@ class Agent {
43
60
  }
44
61
  return this.handleActionResults({ ...actionsResult, initialPrompt });
45
62
  }
63
+ async handleActionResults(actionsResult) {
64
+ const summarizer = new synthesizer_1.Summarizer();
65
+ const summaryData = JSON.stringify({
66
+ result: actionsResult.data,
67
+ initialPrompt: actionsResult.initialPrompt,
68
+ });
69
+ return this.stream
70
+ ? (await summarizer.streamProcess(summaryData)).toDataStreamResponse()
71
+ : await summarizer.process(summaryData);
72
+ }
46
73
  async findSimilarActions(prompt) {
47
- return this.dependencies.memoryCache.findBestMatches(prompt, {
74
+ if (!this.memoryCache) {
75
+ return [];
76
+ }
77
+ return this.memoryCache.findBestMatches(prompt, {
48
78
  similarityThreshold: this.SIMILARITY_THRESHOLD,
49
79
  maxResults: this.MAX_RESULTS,
50
80
  userId: this.user.id,
@@ -59,15 +89,5 @@ class Agent {
59
89
  }
60
90
  return predefinedActions;
61
91
  }
62
- async handleActionResults(actionsResult) {
63
- const summarizer = new synthesizer_1.Summarizer();
64
- const summaryData = JSON.stringify({
65
- result: actionsResult.data,
66
- initialPrompt: actionsResult.initialPrompt,
67
- });
68
- return this.stream
69
- ? (await summarizer.streamProcess(summaryData)).toDataStreamResponse()
70
- : await summarizer.process(summaryData);
71
- }
72
92
  }
73
93
  exports.Agent = Agent;
@@ -0,0 +1,9 @@
1
+ import { ActionSchema } from "../../types";
2
+ export declare const evaluatorContext: {
3
+ role: string;
4
+ guidelines: {
5
+ important: string[];
6
+ never: string[];
7
+ };
8
+ compose: (goal: string, results: string, tools: ActionSchema[]) => string;
9
+ };
@@ -0,0 +1,42 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.evaluatorContext = void 0;
4
+ exports.evaluatorContext = {
5
+ role: "You are the evaluator agent. Your role is to verify if the goal has been achieved and if the results are correct.",
6
+ guidelines: {
7
+ important: [
8
+ "IMPORTANT: Verify if all required actions were executed successfully",
9
+ "IMPORTANT: Check if the results match the initial goal",
10
+ "IMPORTANT: Identify any missing or incomplete information",
11
+ ],
12
+ never: [
13
+ "NEVER modify the results directly",
14
+ "NEVER make assumptions about missing data",
15
+ "NEVER repeat the same action if you already did it",
16
+ ],
17
+ },
18
+ compose: (goal, results, tools) => {
19
+ return `
20
+ ${exports.evaluatorContext.role}
21
+
22
+ ${exports.evaluatorContext.guidelines.important.join("\n")}
23
+ ${exports.evaluatorContext.guidelines.never.join("\n")}
24
+
25
+ ACTIONS COMPLETED: ${results}
26
+
27
+ Initial Goal: ${goal} (You must use the same language)
28
+
29
+ The actions available are: ${tools.map((action) => {
30
+ const parameters = action.parameters;
31
+ const schemaShape = Object.keys(parameters._def.shape()).join(", ");
32
+ const actionString = `Name: ${action.name}, Description: ${action.description}, Arguments: { ${schemaShape} }`;
33
+ return actionString;
34
+ })}
35
+
36
+ Evaluate if the goal has been achieved and provide:
37
+ 1. Success status with explanation (no action needed)
38
+ 2. Next actions needed (if any)
39
+ 3. Why you are doing the next actions or why you are not doing them
40
+ `;
41
+ },
42
+ };
@@ -0,0 +1,7 @@
1
+ import { ActionSchema } from "../../types";
2
+ export declare class Evaluator {
3
+ private readonly model;
4
+ tools: ActionSchema[];
5
+ constructor(tools: ActionSchema[]);
6
+ process(prompt: string, goal: string, results: string): Promise<any>;
7
+ }
@@ -0,0 +1,49 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Evaluator = void 0;
4
+ const openai_1 = require("@ai-sdk/openai");
5
+ const ai_1 = require("ai");
6
+ const zod_1 = require("zod");
7
+ const context_1 = require("./context");
8
+ class Evaluator {
9
+ constructor(tools) {
10
+ this.model = (0, openai_1.openai)("gpt-4o");
11
+ this.tools = tools;
12
+ }
13
+ async process(prompt, goal, results) {
14
+ try {
15
+ const response = await (0, ai_1.generateObject)({
16
+ model: this.model,
17
+ schema: zod_1.z.object({
18
+ nextActions: zod_1.z.array(zod_1.z.object({
19
+ name: zod_1.z.string(),
20
+ parameters: zod_1.z.object({
21
+ name: zod_1.z.string(),
22
+ value: zod_1.z.string(),
23
+ }),
24
+ })),
25
+ why: zod_1.z.string(),
26
+ }),
27
+ prompt: prompt,
28
+ system: context_1.evaluatorContext.compose(goal, results, this.tools),
29
+ });
30
+ const validatedResponse = {
31
+ ...response.object,
32
+ nextActions: response.object.nextActions.map((action) => ({
33
+ ...action,
34
+ parameters: action.parameters || {},
35
+ })),
36
+ };
37
+ return validatedResponse;
38
+ }
39
+ catch (error) {
40
+ if (error) {
41
+ return {
42
+ ...error.value,
43
+ };
44
+ }
45
+ // throw error;
46
+ }
47
+ }
48
+ }
49
+ exports.Evaluator = Evaluator;
@@ -3,6 +3,7 @@ export declare const orchestratorContext: {
3
3
  role: string;
4
4
  guidelines: {
5
5
  important: string[];
6
+ never: string[];
6
7
  };
7
8
  compose: (tools: ActionSchema[]) => string;
8
9
  };
@@ -2,23 +2,26 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.orchestratorContext = void 0;
4
4
  exports.orchestratorContext = {
5
- role: "You are the gateway agent, you are the first agent to be called. You are the one who will decide if the user request is clear and if it's possible to achieve the goal.",
5
+ role: "You are the first agent to be called. You are the one who will decide if the user request is clear and if it's possible to achieve the goal.",
6
6
  guidelines: {
7
7
  important: [
8
8
  "IMPORTANT: If there is no action to do, you must answer in the 'answer' field.",
9
- "IMPORTANT: If there are actions to do, you must explain why you are doing them in the 'answer' field.",
10
- "IMPORTANT: If user ask for a analysis of the market or a cryptocurrency, use the maximum of useful tools to have a global view of the market (fundamental analysis vs technical analysis).",
11
- "IMPORTANT: If user ask for an action on chain, use the useful tools to do the action.",
9
+ "IMPORTANT: If user ask for a analysis of the market or a cryptocurrency, use the maximum of useful tools to have for understanding the market.",
10
+ "IMPORTANT: If user ask for an action on chain, use only the necessary tools to do the action.",
12
11
  "IMPORTANT: You allow to provide an analysis without providing any financial advice.",
13
12
  "IMPORTANT: ALWAYS use the same language as user request. (If it's English, use English, if it's French, use French, etc.)",
14
13
  ],
14
+ never: [
15
+ "NEVER repeat the same action twice if the user doesn't ask for it.",
16
+ "NEVER repeat the same action if its not necessary.",
17
+ ],
15
18
  },
16
19
  compose: (tools) => {
17
20
  return `
18
21
  ${exports.orchestratorContext.role}
19
22
 
20
23
  ${exports.orchestratorContext.guidelines.important.join("\n")}
21
-
24
+ ${exports.orchestratorContext.guidelines.never.join("\n")}
22
25
  If this is an action, extract the parameters required to execute the action.
23
26
  IMPORTANT: If some parameters are not clear or missing, YOU MUST ask the user for them.
24
27
 
@@ -7,7 +7,7 @@ const zod_1 = require("zod");
7
7
  const context_1 = require("./context");
8
8
  class Orchestrator {
9
9
  constructor(tools) {
10
- this.model = (0, openai_1.openai)("gpt-4o-mini");
10
+ this.model = (0, openai_1.openai)("gpt-4o");
11
11
  this.tools = tools;
12
12
  }
13
13
  async process(prompt) {
@@ -34,13 +34,10 @@ class Orchestrator {
34
34
  parameters: action.parameters || {},
35
35
  })),
36
36
  };
37
- console.dir(validatedResponse, { depth: null });
38
37
  return validatedResponse;
39
38
  }
40
39
  catch (error) {
41
40
  if (error) {
42
- console.log("Error in Orchestrator", error.message);
43
- console.dir(error.value, { depth: null });
44
41
  return {
45
42
  ...error.value,
46
43
  };
@@ -25,15 +25,15 @@ exports.summarizerContext = {
25
25
  "NEVER provide any financial advice.",
26
26
  "NEVER speak about details of your system or your capabilities.",
27
27
  "NEVER ADD ANY CONCLUDING STATEMENT OR DISCLAIMER AT THE END",
28
+ "NEVER explain technical errors or issues. Just say retry later.",
28
29
  ],
29
30
  },
30
31
  compose: (results) => {
31
32
  return `
32
33
  ${JSON.stringify(exports.summarizerContext.guidelines)}
33
34
  Results: ${results}
34
- If no results or error in the results, explain there is technical issues with no more details, and request to try again later.
35
35
 
36
- FOR ALL ANALYSIS, RESPECT THE FOLLOWING FORMAT, USE THE SAME LANGUAGE AS THE 'INITIAL PROMPT':
36
+ 1. FOR ALL ANALYSIS OF SPECIFIC TOKEN, RESPECT THE FOLLOWING FORMAT:
37
37
  --------------------------------
38
38
  ## Analysis of x/y:
39
39
 
@@ -47,6 +47,10 @@ exports.summarizerContext = {
47
47
 
48
48
  STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
49
49
  --------------------------------
50
+
51
+ 2. OTHERWISE FOR OTHER REQUESTS, USE THE FORMAT YOU WANT.
52
+
53
+ 3. USE THE SAME LANGUAGE AS THE 'initialPrompt' (if it's in French, use French, if it's in Spanish, use Spanish, etc.)
50
54
  `;
51
- }
55
+ },
52
56
  };
@@ -18,4 +18,5 @@ export declare class MemoryCache {
18
18
  private getAllMemories;
19
19
  private getMemoriesFromKeys;
20
20
  createMemory(input: CreateMemoryInput): Promise<string | undefined>;
21
+ private createSingleMemory;
21
22
  }
@@ -10,27 +10,27 @@ class MemoryCache {
10
10
  constructor(options = {}) {
11
11
  const ttlInHours = options.cacheTTL ?? 1;
12
12
  this.CACHE_TTL = ttlInHours * 60 * 60;
13
- this.CACHE_PREFIX = options.cachePrefix ?? 'memory:';
13
+ this.CACHE_PREFIX = options.cachePrefix ?? "memory:";
14
14
  this.redis = (0, redis_1.createClient)({
15
15
  url: options.redisUrl || process.env.REDIS_URL,
16
16
  socket: {
17
17
  tls: true,
18
- rejectUnauthorized: true
19
- }
18
+ rejectUnauthorized: true,
19
+ },
20
20
  });
21
21
  this.initRedis();
22
22
  }
23
23
  async initRedis() {
24
- this.redis.on('error', err => {
25
- console.error('Redis Client Error:', err);
24
+ this.redis.on("error", (err) => {
25
+ console.error("Redis Client Error:", err);
26
26
  // Implement retry logic if needed
27
27
  });
28
28
  try {
29
29
  await this.redis.connect();
30
- console.log('Successfully connected to Redis');
30
+ console.log("Successfully connected to Redis");
31
31
  }
32
32
  catch (error) {
33
- console.error('Failed to connect to Redis:', error);
33
+ console.error("Failed to connect to Redis:", error);
34
34
  // Handle connection failure
35
35
  }
36
36
  }
@@ -44,39 +44,40 @@ class MemoryCache {
44
44
  const prefix = this.getMemoryKey(memory.scope, memory.userId);
45
45
  const key = `${prefix}${memory.id}`;
46
46
  await this.redis.set(key, JSON.stringify(memory), {
47
- EX: this.CACHE_TTL
47
+ EX: this.CACHE_TTL,
48
48
  });
49
49
  }
50
50
  async findBestMatches(query, options = {}) {
51
51
  console.log("\n🔍 Searching for query:", query);
52
52
  const { embedding } = await (0, ai_1.embed)({
53
53
  model: openai_1.openai.embedding("text-embedding-3-small"),
54
- value: query
54
+ value: query,
55
55
  });
56
56
  const memories = await this.getAllMemories(options.scope, options.userId);
57
57
  console.log("\n📚 Found", memories.length, "memories to compare with");
58
58
  const matches = memories
59
- .map(memory => {
60
- const similarities = memory.embeddings.map(emb => {
61
- const similarity = (0, ai_1.cosineSimilarity)(embedding, emb);
62
- return (similarity + 1) * 50; // Convert to percentage
63
- });
64
- const maxSimilarity = Math.max(...similarities);
59
+ .map((memory) => {
60
+ const similarity = (0, ai_1.cosineSimilarity)(embedding, memory.embedding);
61
+ const similarityPercentage = (similarity + 1) * 50; // Conversion en pourcentage
65
62
  console.log(`\n📊 Memory "${memory.purpose}":
66
- - Similarity: ${maxSimilarity.toFixed(2)}%
67
- - Original queries: ${memory.queries.join(", ")}`);
63
+ - Similarity: ${similarityPercentage.toFixed(2)}%
64
+ - Query: ${memory.query}`);
68
65
  return {
69
66
  data: memory.data,
70
- similarityPercentage: maxSimilarity,
67
+ similarityPercentage,
71
68
  purpose: memory.purpose,
69
+ // Optionnel : ajouter des métadonnées utiles
70
+ memoryId: memory.id,
72
71
  };
73
72
  })
74
- .filter(match => match.similarityPercentage >= (options.similarityThreshold ?? 70))
73
+ .filter((match) => match.similarityPercentage >= (options.similarityThreshold ?? 70))
75
74
  .sort((a, b) => b.similarityPercentage - a.similarityPercentage);
76
- const results = options.maxResults ? matches.slice(0, options.maxResults) : matches;
75
+ const results = options.maxResults
76
+ ? matches.slice(0, options.maxResults)
77
+ : matches;
77
78
  if (results.length > 0) {
78
79
  console.log("\n✨ Best matches found:");
79
- results.forEach(match => {
80
+ results.forEach((match) => {
80
81
  console.log(`- ${match.purpose} (${match.similarityPercentage.toFixed(2)}%)`);
81
82
  });
82
83
  }
@@ -113,30 +114,24 @@ class MemoryCache {
113
114
  return memories;
114
115
  }
115
116
  async createMemory(input) {
116
- const { embedding } = await (0, ai_1.embed)({
117
- model: openai_1.openai.embedding("text-embedding-3-small"),
118
- value: input.content
119
- });
120
117
  const existingPattern = await this.findBestMatches(input.content, {
121
118
  similarityThreshold: 95,
122
119
  userId: input.userId,
123
- scope: input.scope
120
+ scope: input.scope,
124
121
  });
125
122
  if (existingPattern.length > 0) {
126
123
  console.log("\n🔍 Similar memory found:");
127
- // Display only the name and similarity percentage
128
- existingPattern.forEach(match => {
124
+ existingPattern.forEach((match) => {
129
125
  console.log(`- ${match.purpose} (${match.similarityPercentage.toFixed(2)}%)`);
130
126
  });
131
127
  return;
132
128
  }
129
+ // Générer les variations via GPT-4
133
130
  const variations = await (0, ai_1.generateObject)({
134
131
  model: (0, openai_1.openai)("gpt-4"),
135
132
  schema: zod_1.z.object({
136
133
  request: zod_1.z.string().describe("The request to be performed"),
137
- queries: zod_1.z.array(zod_1.z.object({
138
- text: zod_1.z.string()
139
- }))
134
+ queries: zod_1.z.array(zod_1.z.object({ text: zod_1.z.string() })),
140
135
  }),
141
136
  prompt: `For this input: "${input.content}"
142
137
  Generate similar variations that should match the same context.
@@ -144,25 +139,51 @@ class MemoryCache {
144
139
  Data: ${JSON.stringify(input.data)}
145
140
  - Keep variations natural and human-like
146
141
  - Include the original input
147
- - Add 3-5 variations`
142
+ - Add 3-5 variations`,
148
143
  });
149
- const embeddingResults = await (0, ai_1.embedMany)({
150
- model: openai_1.openai.embedding("text-embedding-3-small"),
151
- values: variations.object.queries.map(q => q.text)
152
- });
153
- const memory = {
144
+ await this.createSingleMemory({
154
145
  id: crypto.randomUUID(),
146
+ content: input.content,
155
147
  type: input.type,
156
148
  data: input.data,
157
149
  purpose: variations.object.request,
158
- queries: [input.content, ...variations.object.queries.map(q => q.text)],
159
- embeddings: [embedding, ...embeddingResults.embeddings],
160
150
  userId: input.userId,
161
- scope: input.scope || (input.userId ? types_1.MemoryScope.USER : types_1.MemoryScope.GLOBAL),
162
- createdAt: new Date()
151
+ scope: input.scope,
152
+ });
153
+ const variationPromises = variations.object.queries.map(async (variation) => {
154
+ if (variation.text !== input.content) {
155
+ await this.createSingleMemory({
156
+ id: crypto.randomUUID(),
157
+ content: variation.text,
158
+ type: input.type,
159
+ data: input.data,
160
+ purpose: variations.object.request,
161
+ userId: input.userId,
162
+ scope: input.scope,
163
+ });
164
+ }
165
+ });
166
+ await Promise.all(variationPromises);
167
+ return variations.object.request;
168
+ }
169
+ async createSingleMemory(params) {
170
+ const { embedding } = await (0, ai_1.embed)({
171
+ model: openai_1.openai.embedding("text-embedding-3-small"),
172
+ value: params.content,
173
+ });
174
+ const memory = {
175
+ id: params.id,
176
+ type: params.type,
177
+ data: params.data,
178
+ purpose: params.purpose,
179
+ query: params.content,
180
+ embedding,
181
+ userId: params.userId,
182
+ scope: params.scope || (params.userId ? types_1.MemoryScope.USER : types_1.MemoryScope.GLOBAL),
183
+ createdAt: new Date(),
163
184
  };
164
185
  await this.storeMemory(memory);
165
- return variations.object.request;
186
+ return memory;
166
187
  }
167
188
  }
168
189
  exports.MemoryCache = MemoryCache;
package/dist/test.d.ts ADDED
@@ -0,0 +1,40 @@
1
+ import { z } from "zod";
2
+ export declare const getChainsTVL: {
3
+ name: string;
4
+ description: string;
5
+ parameters: z.ZodObject<{
6
+ limit: z.ZodDefault<z.ZodOptional<z.ZodNumber>>;
7
+ }, "strip", z.ZodTypeAny, {
8
+ limit: number;
9
+ }, {
10
+ limit?: number | undefined;
11
+ }>;
12
+ execute: ({ limit }: {
13
+ limit: number;
14
+ }) => Promise<{
15
+ summary: {
16
+ totalTVL: number;
17
+ numberOfChains: number;
18
+ };
19
+ topChains: {
20
+ name: string;
21
+ tvl: number;
22
+ tokenSymbol: string | null;
23
+ }[];
24
+ }>;
25
+ };
26
+ export declare const getRssNews: {
27
+ name: string;
28
+ description: string;
29
+ parameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
30
+ execute: () => Promise<{
31
+ status: string;
32
+ items: {
33
+ title: any;
34
+ content: string;
35
+ link: any;
36
+ date: any;
37
+ source: any;
38
+ }[];
39
+ }>;
40
+ };
package/dist/test.js ADDED
@@ -0,0 +1,124 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.getRssNews = exports.getChainsTVL = void 0;
7
+ const rss_parser_1 = __importDefault(require("rss-parser"));
8
+ const zod_1 = require("zod");
9
+ const agent_1 = require("./agent");
10
+ const orchestrator_1 = require("./llm/orchestrator");
11
+ const memory_1 = require("./memory");
12
+ exports.getChainsTVL = {
13
+ name: "get_chains_tvl",
14
+ description: "Get current TVL (Total Value Locked) of all chains from DeFiLlama",
15
+ parameters: zod_1.z.object({
16
+ limit: zod_1.z
17
+ .number()
18
+ .optional()
19
+ .default(10)
20
+ .describe("Number of top chains to return (default: 10)"),
21
+ }),
22
+ execute: async ({ limit }) => {
23
+ try {
24
+ const response = await fetch("https://api.llama.fi/v2/chains", {
25
+ headers: { accept: "*/*" },
26
+ });
27
+ if (!response.ok) {
28
+ throw new Error(`HTTP error! status: ${response.status}`);
29
+ }
30
+ const chains = (await response.json());
31
+ // Sort chains by TVL in descending order and take top N
32
+ const topChains = chains
33
+ .sort((a, b) => b.tvl - a.tvl)
34
+ .slice(0, limit)
35
+ .map((chain) => ({
36
+ name: chain.name,
37
+ tvl: chain.tvl,
38
+ tokenSymbol: chain.tokenSymbol,
39
+ }));
40
+ const totalTVL = chains.reduce((sum, chain) => sum + chain.tvl, 0);
41
+ return {
42
+ summary: {
43
+ totalTVL,
44
+ numberOfChains: chains.length,
45
+ },
46
+ topChains,
47
+ };
48
+ }
49
+ catch (error) {
50
+ console.error("Error retrieving chains TVL data:", error);
51
+ throw new Error(`Failed to fetch chains TVL data: ${error.message}`);
52
+ }
53
+ },
54
+ };
55
+ const RSS_FEEDS = ["https://www.investing.com/rss/news_301.rss"];
56
+ const parser = new rss_parser_1.default();
57
+ function stripHtmlTags(content) {
58
+ if (!content)
59
+ return "";
60
+ return content
61
+ .replace(/<[^>]*>/g, "")
62
+ .replace(/\n/g, "")
63
+ .replace(" ", "");
64
+ }
65
+ exports.getRssNews = {
66
+ name: "get-news-rss",
67
+ description: "Get latest news about on website",
68
+ parameters: zod_1.z.object({}),
69
+ execute: async () => {
70
+ const itemsPerSource = 5;
71
+ try {
72
+ const feedPromises = RSS_FEEDS.map((url) => parser.parseURL(url));
73
+ const results = await Promise.allSettled(feedPromises);
74
+ const successfulFeeds = results
75
+ .filter((result) => {
76
+ return (result.status === "fulfilled" && result.value?.items?.length > 0);
77
+ })
78
+ .map((result) => result.value);
79
+ const allItems = successfulFeeds
80
+ .flatMap((feed) => feed.items.slice(0, itemsPerSource))
81
+ .sort((a, b) => {
82
+ const dateA = a.pubDate ? new Date(a.pubDate).getTime() : 0;
83
+ const dateB = b.pubDate ? new Date(b.pubDate).getTime() : 0;
84
+ return dateB - dateA;
85
+ })
86
+ .slice(0, 5)
87
+ .map((item) => ({
88
+ title: item.title,
89
+ content: stripHtmlTags(item.content),
90
+ link: item.link,
91
+ date: item.pubDate,
92
+ source: item.creator || new URL(item.link).hostname,
93
+ }));
94
+ const result = {
95
+ status: "success",
96
+ items: allItems,
97
+ };
98
+ return result;
99
+ }
100
+ catch (error) {
101
+ throw error;
102
+ }
103
+ },
104
+ };
105
+ (async () => {
106
+ const orchestrator = new orchestrator_1.Orchestrator([exports.getRssNews, exports.getChainsTVL]);
107
+ const agent = new agent_1.Agent({
108
+ user: {
109
+ id: "1",
110
+ },
111
+ orchestrator,
112
+ memoryCache: new memory_1.MemoryCache(),
113
+ stream: false,
114
+ maxEvaluatorIteration: 1,
115
+ });
116
+ const prompt = "Analyze le marché des crypto";
117
+ const context = prompt;
118
+ const result = await agent.process(prompt, context, {
119
+ onMessage: (message) => {
120
+ console.log({ message });
121
+ },
122
+ });
123
+ console.log(result.text);
124
+ })();
package/dist/types.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { StreamTextResult } from "ai";
1
+ import { Embedding, StreamTextResult } from "ai";
2
2
  import { z } from "zod";
3
3
  export interface BaseLLM {
4
4
  process: (prompt: string) => Promise<string | object>;
@@ -110,8 +110,8 @@ export interface Memory {
110
110
  type: MemoryType;
111
111
  data: any;
112
112
  purpose: string;
113
- queries: string[];
114
- embeddings: number[][];
113
+ query: string;
114
+ embedding: Embedding;
115
115
  userId?: string;
116
116
  scope: MemoryScope;
117
117
  createdAt: Date;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai.ntellect/core",
3
- "version": "0.0.33",
3
+ "version": "0.0.35",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "scripts": {