@ai.ntellect/core 0.3.3 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/.nvmrc +1 -0
  2. package/README.FR.md +242 -247
  3. package/README.md +249 -246
  4. package/agent/index.ts +199 -215
  5. package/agent/tools/get-rss.ts +64 -0
  6. package/bull.ts +5 -0
  7. package/dist/agent/index.d.ts +29 -26
  8. package/dist/agent/index.js +123 -112
  9. package/dist/agent/tools/get-rss.d.ts +16 -0
  10. package/dist/agent/tools/get-rss.js +62 -0
  11. package/dist/bull.d.ts +1 -0
  12. package/dist/bull.js +9 -0
  13. package/dist/examples/index.d.ts +2 -0
  14. package/dist/examples/index.js +89 -0
  15. package/dist/llm/interpreter/context.d.ts +5 -22
  16. package/dist/llm/interpreter/context.js +8 -9
  17. package/dist/llm/interpreter/index.d.ts +9 -5
  18. package/dist/llm/interpreter/index.js +55 -48
  19. package/dist/llm/memory-manager/context.d.ts +2 -0
  20. package/dist/llm/memory-manager/context.js +22 -0
  21. package/dist/llm/memory-manager/index.d.ts +17 -0
  22. package/dist/llm/memory-manager/index.js +107 -0
  23. package/dist/llm/orchestrator/context.d.ts +2 -10
  24. package/dist/llm/orchestrator/context.js +19 -14
  25. package/dist/llm/orchestrator/index.d.ts +36 -21
  26. package/dist/llm/orchestrator/index.js +122 -88
  27. package/dist/llm/orchestrator/types.d.ts +12 -0
  28. package/dist/llm/orchestrator/types.js +2 -0
  29. package/dist/memory/cache.d.ts +6 -5
  30. package/dist/memory/cache.js +31 -21
  31. package/dist/memory/persistent.d.ts +5 -3
  32. package/dist/memory/persistent.js +89 -73
  33. package/dist/services/redis-cache.d.ts +37 -0
  34. package/dist/services/redis-cache.js +93 -0
  35. package/dist/services/scheduler.d.ts +39 -16
  36. package/dist/services/scheduler.js +81 -103
  37. package/dist/services/telegram-monitor.d.ts +0 -15
  38. package/dist/services/telegram-monitor.js +117 -101
  39. package/dist/test.js +106 -172
  40. package/dist/types.d.ts +38 -7
  41. package/dist/utils/generate-object.d.ts +12 -0
  42. package/dist/utils/generate-object.js +90 -0
  43. package/dist/utils/header-builder.d.ts +11 -0
  44. package/dist/utils/header-builder.js +34 -0
  45. package/dist/utils/inject-actions.js +2 -2
  46. package/dist/utils/queue-item-transformer.d.ts +2 -2
  47. package/dist/utils/schema-generator.d.ts +16 -0
  48. package/dist/utils/schema-generator.js +46 -0
  49. package/examples/index.ts +103 -0
  50. package/llm/interpreter/context.ts +20 -8
  51. package/llm/interpreter/index.ts +81 -54
  52. package/llm/memory-manager/context.ts +21 -0
  53. package/llm/memory-manager/index.ts +163 -0
  54. package/llm/orchestrator/context.ts +20 -13
  55. package/llm/orchestrator/index.ts +210 -130
  56. package/llm/orchestrator/types.ts +14 -0
  57. package/memory/cache.ts +37 -31
  58. package/memory/persistent.ts +121 -99
  59. package/package.json +11 -2
  60. package/services/redis-cache.ts +128 -0
  61. package/services/scheduler.ts +102 -141
  62. package/services/telegram-monitor.ts +138 -138
  63. package/t.py +79 -0
  64. package/t.spec +38 -0
  65. package/types.ts +40 -7
  66. package/utils/generate-object.ts +105 -0
  67. package/utils/header-builder.ts +40 -0
  68. package/utils/inject-actions.ts +4 -6
  69. package/utils/queue-item-transformer.ts +2 -1
  70. package/utils/schema-generator.ts +73 -0
  71. package/agent/handlers/ActionHandler.ts +0 -48
  72. package/agent/handlers/ConfirmationHandler.ts +0 -37
  73. package/agent/handlers/EventHandler.ts +0 -35
  74. package/dist/agent/handlers/ActionHandler.d.ts +0 -8
  75. package/dist/agent/handlers/ActionHandler.js +0 -36
  76. package/dist/agent/handlers/ConfirmationHandler.d.ts +0 -7
  77. package/dist/agent/handlers/ConfirmationHandler.js +0 -31
  78. package/dist/agent/handlers/EventHandler.d.ts +0 -10
  79. package/dist/agent/handlers/EventHandler.js +0 -34
  80. package/dist/llm/evaluator/context.d.ts +0 -10
  81. package/dist/llm/evaluator/context.js +0 -24
  82. package/dist/llm/evaluator/index.d.ts +0 -16
  83. package/dist/llm/evaluator/index.js +0 -150
  84. package/llm/evaluator/context.ts +0 -21
  85. package/llm/evaluator/index.ts +0 -193
@@ -0,0 +1,103 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { deepseek } from "@ai-sdk/deepseek";
4
+ import { configDotenv } from "dotenv";
5
+ import readline from "readline";
6
+ import { Agent } from "../agent";
7
+ import { getRssNews } from "../agent/tools/get-rss";
8
+ import { Interpreter } from "../llm/interpreter";
9
+ import {
10
+ generalInterpreterCharacter,
11
+ marketInterpreterCharacter,
12
+ securityInterpreterCharacter,
13
+ } from "../llm/interpreter/context";
14
+ configDotenv();
15
+ // Initialiser l'agent une fois pour toute la session
16
+ const initializeAgent = () => {
17
+ const model = deepseek("deepseek-reasoner");
18
+
19
+ const securityInterpreter = new Interpreter({
20
+ name: "security",
21
+ model,
22
+ character: securityInterpreterCharacter,
23
+ });
24
+ const marketInterpreter = new Interpreter({
25
+ name: "market",
26
+ model,
27
+ character: marketInterpreterCharacter,
28
+ });
29
+ const generalInterpreter = new Interpreter({
30
+ name: "general",
31
+ model,
32
+ character: generalInterpreterCharacter,
33
+ });
34
+
35
+ const agent = new Agent({
36
+ cache: {
37
+ host: process.env.REDIS_HOST || "localhost",
38
+ port: Number(process.env.REDIS_PORT) || 6379,
39
+ },
40
+ orchestrator: {
41
+ model,
42
+ tools: [getRssNews],
43
+ },
44
+ interpreters: [securityInterpreter, marketInterpreter, generalInterpreter],
45
+ memoryManager: {
46
+ model,
47
+ },
48
+ maxIterations: 3,
49
+ });
50
+
51
+ return agent;
52
+ };
53
+
54
+ // Fonction pour lancer une session interactive
55
+ const startChatSession = async () => {
56
+ console.log("Bienvenue dans votre session de chat avec l'agent !");
57
+ console.log("Tapez 'exit' pour quitter.\n");
58
+
59
+ const agent = initializeAgent();
60
+
61
+ const rl = readline.createInterface({
62
+ input: process.stdin,
63
+ output: process.stdout,
64
+ prompt: "Vous > ",
65
+ });
66
+
67
+ let state = {
68
+ currentContext: "",
69
+ previousActions: [],
70
+ };
71
+
72
+ rl.prompt();
73
+
74
+ rl.on("line", async (line) => {
75
+ const input = line.trim();
76
+
77
+ if (input.toLowerCase() === "exit") {
78
+ console.log("Fin de la session. À bientôt !");
79
+ rl.close();
80
+ return;
81
+ }
82
+
83
+ state.currentContext = input;
84
+
85
+ console.log("Agent en réflexion...");
86
+ try {
87
+ const result = await agent.process(state);
88
+ console.log(`Agent > ${result}\n`);
89
+ } catch (error) {
90
+ console.error("Erreur avec l'agent :", error);
91
+ }
92
+
93
+ rl.prompt();
94
+ });
95
+
96
+ rl.on("close", () => {
97
+ console.log("Session terminée.");
98
+ process.exit(0);
99
+ });
100
+ };
101
+
102
+ // Lancer la session de chat
103
+ startChatSession();
@@ -1,15 +1,28 @@
1
- export const generalInterpreterContext = {
1
+ export type Character = {
2
+ role: string;
3
+ language: string;
4
+ guidelines: {
5
+ important: string[];
6
+ warnings: string[];
7
+ };
8
+ examplesMessages?: {
9
+ role: string;
10
+ content: string;
11
+ }[];
12
+ };
13
+
14
+ export const generalInterpreterCharacter: Character = {
2
15
  role: "You are the general assistant. Your role is to provide a clear and factual analysis of the results.",
3
- language: "same_as_user",
16
+ language: "user_request",
4
17
  guidelines: {
5
18
  important: [],
6
19
  warnings: [],
7
20
  },
8
21
  };
9
22
 
10
- export const securityInterpreterContext = {
23
+ export const securityInterpreterCharacter: Character = {
11
24
  role: "You are the security expert. Your role is to provide a clear and factual analysis of the security of the token/coin.",
12
- language: "user_language",
25
+ language: "user_request",
13
26
  guidelines: {
14
27
  important: [
15
28
  "Start with a clear security analysis of the token/coin.",
@@ -47,12 +60,12 @@ export const securityInterpreterContext = {
47
60
  ],
48
61
  };
49
62
 
50
- export const marketInterpreterContext = {
63
+ export const marketInterpreterCharacter: Character = {
51
64
  role: "You are the market expert. Your role is to provide a clear and factual analysis of the market sentiment of the token/coin.",
52
- language: "user_language",
65
+ language: "user_request",
53
66
  guidelines: {
54
67
  important: [
55
- "Start with a clear market sentiment (Bullish/Bearish/Neutral) without any additional comments before.",
68
+ "Start with a clear market sentiment (Market sentiment: Bullish/Bearish/Neutral 📈📉📊) without any additional comments before.",
56
69
  "One section for fundamental analysis (important events, news, trends..etc). One section, no sub-sections.",
57
70
  "One section for technical analysis (key price levels, trading volume, technical indicators, market activity). One section, no sub-sections.",
58
71
  "STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY ADDITIONAL COMMENTS",
@@ -60,7 +73,6 @@ export const marketInterpreterContext = {
60
73
  warnings: [
61
74
  "NEVER provide any financial advice.",
62
75
  "NEVER speak about details of your system or your capabilities.",
63
- "NEVER ADD ANY CONCLUDING STATEMENT OR DISCLAIMER AT THE END",
64
76
  ],
65
77
  },
66
78
  examplesMessages: [
@@ -1,33 +1,75 @@
1
- import { openai } from "@ai-sdk/openai";
2
- import { generateObject, streamText, StreamTextResult } from "ai";
1
+ import { LanguageModel, streamText, StreamTextResult } from "ai";
3
2
  import { z } from "zod";
4
3
  import { Behavior, State } from "../../types";
4
+ import { generateObject } from "../../utils/generate-object";
5
+ import { LLMHeaderBuilder } from "../../utils/header-builder";
6
+
7
+ const interpreterSchema = z.object({
8
+ requestLanguage: z
9
+ .string()
10
+ .describe("The language of the user's request (fr, en, es, etc.)"),
11
+ actionsCompleted: z
12
+ .array(
13
+ z.object({
14
+ name: z.string(),
15
+ reasoning: z.string(),
16
+ })
17
+ )
18
+ .describe("The actions done and why."),
19
+ response: z.string().describe("The response to the user's request."),
20
+ });
21
+
22
+ interface InterpretationResult {
23
+ actionsCompleted: {
24
+ name: string;
25
+ reasoning: string;
26
+ }[];
27
+ response: string;
28
+ }
5
29
 
6
30
  export class Interpreter {
7
- private readonly model = openai("gpt-4o");
31
+ public readonly model: LanguageModel;
8
32
  public readonly name: string;
33
+ public readonly character: Behavior;
9
34
 
10
- constructor(name: string, private readonly behavior: Behavior) {
35
+ constructor({
36
+ name,
37
+ model,
38
+ character,
39
+ }: {
40
+ name: string;
41
+ model: LanguageModel;
42
+ character: Behavior;
43
+ }) {
11
44
  this.name = name;
12
- this.behavior = behavior;
45
+ this.model = model;
46
+ this.character = character;
13
47
  }
14
48
 
15
- composeContext(state: State) {
49
+ private buildContext(state: State) {
16
50
  const { userRequest, results } = state;
17
- const { role, language, guidelines, examplesMessages } = this.behavior;
18
-
51
+ const { role, language, guidelines } = this.character;
19
52
  const { important, warnings, steps } = guidelines;
20
53
 
21
- const context = `
22
- # ROLE: ${role}
23
- # LANGUAGE: ${language}
24
- # IMPORTANT: ${important.join("\n")}
25
- # NEVER: ${warnings.join("\n")}
26
- # USER_REQUEST: ${userRequest}
27
- # CURRENT_RESULTS: ${results}
28
- # STEPS: ${steps?.join("\n") || ""}
29
- # MESSAGES EXAMPLES: ${JSON.stringify(examplesMessages, null, 2)}
30
- `;
54
+ const context = LLMHeaderBuilder.create();
55
+
56
+ if (role) {
57
+ context.addHeader("ROLE", role);
58
+ }
59
+
60
+ if (language) {
61
+ context.addHeader("LANGUAGE", language);
62
+ }
63
+
64
+ if (important.length > 0) {
65
+ context.addHeader("IMPORTANT", important);
66
+ }
67
+
68
+ if (warnings.length > 0) {
69
+ context.addHeader("NEVER", warnings);
70
+ }
71
+
72
+ context.addHeader("CURRENT_RESULTS", results);
31
73
  return context;
32
74
  }
33
75
 
@@ -45,43 +87,27 @@ export class Interpreter {
45
87
  }
46
88
  | StreamTextResult<Record<string, any>>
47
89
  > {
48
- console.log("\n🎨 Starting interpretation process");
49
- console.log("Prompt:", prompt);
50
- console.log("Results to interpret:", JSON.stringify(state, null, 2));
90
+ try {
91
+ console.log("\n🎨 Starting interpretation process");
92
+ console.log("Prompt:", prompt);
93
+ console.log("Results to interpret:", JSON.stringify(state, null, 2));
51
94
 
52
- const context = this.composeContext(state);
53
-
54
- const result = await generateObject({
55
- model: this.model,
56
- schema: z.object({
57
- requestLanguage: z.string(),
58
- actionsCompleted: z.array(
59
- z.object({
60
- name: z.string(),
61
- reasoning: z.string(),
62
- })
63
- ),
64
- response: z.string(),
65
- }),
66
- prompt,
67
- system: context,
68
- });
69
-
70
- console.log("\n✅ Interpretation completed");
71
- console.log("─".repeat(50));
72
- console.log("Generated response:", result.object);
73
-
74
- if (result.object.actionsCompleted.length > 0) {
75
- console.log("\n📋 Suggested actions:");
76
- result.object.actionsCompleted.forEach((action, index) => {
77
- console.log(`\n${index + 1}. Action Details:`);
78
- console.log(` Name: ${action.name}`);
79
- console.log(` Reasoning: ${action.reasoning}`);
95
+ const context = this.buildContext(state);
96
+ console.log("Context:", context.toString());
97
+ const result = await generateObject<InterpretationResult>({
98
+ model: this.model,
99
+ prompt,
100
+ system: context.toString(),
101
+ temperature: 1.3,
102
+ schema: interpreterSchema,
80
103
  });
81
- }
82
104
 
83
- if (onFinish) onFinish(result.object);
84
- return result.object;
105
+ if (onFinish) onFinish(result.object);
106
+ return result.object;
107
+ } catch (error) {
108
+ console.error("Error parsing schema:", error);
109
+ throw error;
110
+ }
85
111
  }
86
112
 
87
113
  async streamProcess(
@@ -92,7 +118,7 @@ export class Interpreter {
92
118
  console.log("\n🎨 Starting streaming interpretation");
93
119
  console.log("Prompt:", prompt);
94
120
 
95
- const context = this.composeContext(state);
121
+ const context = this.buildContext(state);
96
122
 
97
123
  const result = await streamText({
98
124
  model: this.model,
@@ -101,7 +127,8 @@ export class Interpreter {
101
127
  if (onFinish) onFinish(event);
102
128
  },
103
129
  prompt,
104
- system: context,
130
+ system: context.toString(),
131
+ temperature: 1.3,
105
132
  });
106
133
 
107
134
  return result;
@@ -0,0 +1,21 @@
1
+ import { Character } from "../interpreter/context";
2
+
3
+ export const memoryManagerInstructions: Character = {
4
+ role: "You are the memory curator. Your role is to extract and format memories from interactions.",
5
+ language: "user_request",
6
+ guidelines: {
7
+ important: [
8
+ "Generate memories based on the user request",
9
+ "Generate query for requested data as the user could ask for it later",
10
+ "Should be short-term memories only if it's ephemeral but relevant and reusable",
11
+ "Only store as long-term: User information, User preferences, Important facts that don't change often, Historical milestones",
12
+ "Make memory data concise and clear",
13
+ "Set appropriate TTL based on data volatility",
14
+ ],
15
+ warnings: [
16
+ "Never store data that is not provided by the results",
17
+ "Never store data that is not relevant to the user request",
18
+ ],
19
+ },
20
+ examplesMessages: [],
21
+ };
@@ -0,0 +1,163 @@
1
+ import { LanguageModelV1 } from "ai";
2
+ import { z } from "zod";
3
+ import { CacheMemory } from "../../memory/cache";
4
+ import { PersistentMemory } from "../../memory/persistent";
5
+ import { MemoryScope } from "../../types";
6
+ import { generateObject } from "../../utils/generate-object";
7
+ import { LLMHeaderBuilder } from "../../utils/header-builder";
8
+ import { State } from "../orchestrator/types";
9
+ import { memoryManagerInstructions } from "./context";
10
+
11
+ interface MemoryResponse {
12
+ memories: Array<{
13
+ data: string;
14
+ type: "short-term" | "long-term";
15
+ category:
16
+ | "user_information"
17
+ | "user_preference"
18
+ | "task"
19
+ | "current_goal"
20
+ | "news"
21
+ | "fact"
22
+ | "other";
23
+ queryForMemory: string;
24
+ tags: string[];
25
+ ttl: number;
26
+ }>;
27
+ }
28
+ export class MemoryManager {
29
+ private readonly model: LanguageModelV1;
30
+ private readonly memory?: {
31
+ cache?: CacheMemory;
32
+ persistent?: PersistentMemory;
33
+ };
34
+
35
+ constructor(config: {
36
+ model: LanguageModelV1;
37
+ memory?: {
38
+ cache?: CacheMemory;
39
+ persistent?: PersistentMemory;
40
+ };
41
+ }) {
42
+ this.model = config.model;
43
+ this.memory = config.memory;
44
+ }
45
+
46
+ buildContext(state: State) {
47
+ const context = LLMHeaderBuilder.create()
48
+ .addHeader("ROLE", memoryManagerInstructions.role)
49
+ .addHeader("LANGUAGE", memoryManagerInstructions.language)
50
+ .addHeader("IMPORTANT", memoryManagerInstructions.guidelines.important)
51
+ .addHeader("WARNINGS", memoryManagerInstructions.guidelines.warnings)
52
+ .addHeader("CURRENT_CONTEXT", state.currentContext)
53
+ .addHeader("RESULTS", JSON.stringify(state.results));
54
+ return context.toString();
55
+ }
56
+
57
+ async process(state: State, result: string) {
58
+ const context = this.buildContext(state);
59
+
60
+ const memories = await generateObject<MemoryResponse>({
61
+ model: this.model,
62
+ schema: z.object({
63
+ memories: z.array(
64
+ z.object({
65
+ data: z.string(),
66
+ type: z.enum(["short-term", "long-term"]),
67
+ category: z.enum([
68
+ "user_information",
69
+ "user_preference",
70
+ "task",
71
+ "current_goal",
72
+ "news",
73
+ "fact",
74
+ "other",
75
+ ]),
76
+ queryForData: z.string(),
77
+ tags: z.array(z.string()),
78
+ ttl: z.number(),
79
+ })
80
+ ),
81
+ }),
82
+ prompt: state.currentContext,
83
+ system: context.toString(),
84
+ temperature: 1,
85
+ });
86
+
87
+ console.log("Memories:", memories.object.memories);
88
+
89
+ if (!this.memory) {
90
+ return;
91
+ }
92
+
93
+ // Store memories after all processing is complete
94
+ await Promise.all([
95
+ // Store short-term memories in cache
96
+ ...memories.object.memories
97
+ .filter((m: any) => m.type === "short-term")
98
+ .map(async (memoryItem: any) => {
99
+ if (!this.memory?.cache) {
100
+ return;
101
+ }
102
+
103
+ const existingCacheMemories =
104
+ await this.memory.cache.findSimilarActions(memoryItem.data, {
105
+ similarityThreshold: 85,
106
+ maxResults: 3,
107
+ scope: MemoryScope.GLOBAL,
108
+ });
109
+
110
+ if (existingCacheMemories.length > 0) {
111
+ console.log(
112
+ "⚠️ Similar memory already exists in cache:",
113
+ memoryItem.data
114
+ );
115
+ return;
116
+ }
117
+
118
+ await this.memory.cache.createMemory({
119
+ query: memoryItem.queryForMemory,
120
+ data: memoryItem.data,
121
+ ttl: memoryItem.ttl, // Use TTL from LLM
122
+ });
123
+ console.log("✅ Memory stored in cache:", memoryItem.data);
124
+ }),
125
+
126
+ // Store long-term memories in persistent storage
127
+ ...memories.object.memories
128
+ .filter((m) => m.type === "long-term")
129
+ .map(async (memoryItem) => {
130
+ if (!this.memory?.persistent) {
131
+ return;
132
+ }
133
+
134
+ const existingPersistentMemories =
135
+ await this.memory.persistent.findRelevantDocuments(
136
+ memoryItem.data,
137
+ {
138
+ similarityThreshold: 85,
139
+ }
140
+ );
141
+
142
+ if (existingPersistentMemories.length > 0) {
143
+ console.log(
144
+ "⚠️ Similar memory already exists in persistent storage:",
145
+ memoryItem.data
146
+ );
147
+ return;
148
+ }
149
+
150
+ await this.memory.persistent.createMemory({
151
+ query: memoryItem.queryForMemory,
152
+ data: memoryItem.data,
153
+ category: memoryItem.category,
154
+ tags: memoryItem.tags,
155
+ roomId: "global",
156
+ createdAt: new Date(),
157
+ id: crypto.randomUUID(),
158
+ });
159
+ console.log("✅ Memory stored in persistent storage:", memoryItem);
160
+ }),
161
+ ]);
162
+ }
163
+ }
@@ -1,15 +1,22 @@
1
- export const orchestratorContext = {
2
- behavior: {
3
- language: "same_as_user",
4
- role: "Your role is to determine what actions are needed to achieve the user goal.",
5
- guidelines: {
6
- important: [
7
- "If there is no action to do, you must answer in the 'answer' field.",
8
- "If some parameters are not clear or missing, don't add the action, YOU MUST ask the user for them.",
9
- "For QUESTIONS or ANALYSIS, search first in your internal knowledge base before using actions.",
10
- "For ON-CHAIN actions, just use the useful actions.",
11
- ],
12
- warnings: ["NEVER repeat same actions if the user doesn't ask for it."],
13
- },
1
+ import { Character } from "../interpreter/context";
2
+
3
+ export const orchestratorInstructions: Character = {
4
+ role: "You are the orchestrator. Your role is to evaluate the current state and determine next actions.",
5
+ language: "user_request",
6
+ guidelines: {
7
+ important: [
8
+ "Continue executing actions until ALL necessary goals are achieved",
9
+ "You can schedule actions in cron expression to be executed later (if needed)",
10
+ "Only stop when you have a complete picture of the goal",
11
+ "Social responses can be partial while gathering more data",
12
+ "Set shouldContinue to false if no more actions are needed",
13
+ "Once all actions are completed, choose the right interpreter to interpret the results",
14
+ ],
15
+ warnings: [
16
+ "Never use a tool if it's not related to the user request",
17
+ "Never schedule actions that are not related to the user request",
18
+ "Never repeat the same action if it's not required to achieve the goal",
19
+ "Never repeat scheduled actions if not required to achieve the goal",
20
+ ],
14
21
  },
15
22
  };