@ai.ntellect/core 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.FR.md +624 -18
- package/README.md +127 -72
- package/agent/index.ts +57 -149
- package/agent/workflow/conditions.ts +16 -0
- package/agent/workflow/handlers/interpreter.handler.ts +48 -0
- package/agent/workflow/handlers/memory.handler.ts +106 -0
- package/agent/workflow/handlers/orchestrator.handler.ts +23 -0
- package/agent/workflow/handlers/queue.handler.ts +34 -0
- package/agent/workflow/handlers/scheduler.handler.ts +61 -0
- package/agent/workflow/index.ts +62 -0
- package/dist/agent/index.d.ts +1 -1
- package/dist/agent/index.js +3 -3
- package/{agent/tools → examples/actions}/get-rss.ts +8 -1
- package/examples/index.ts +10 -15
- package/index.html +42 -0
- package/llm/dynamic-condition/example.ts +36 -0
- package/llm/dynamic-condition/index.ts +108 -0
- package/llm/interpreter/context.ts +5 -12
- package/llm/interpreter/index.ts +20 -16
- package/llm/memory-manager/context.ts +4 -6
- package/llm/memory-manager/index.ts +32 -80
- package/llm/orchestrator/context.ts +5 -8
- package/llm/orchestrator/index.ts +62 -102
- package/llm/orchestrator/types.ts +2 -2
- package/package.json +3 -1
- package/script.js +167 -0
- package/services/{scheduler.ts → agenda.ts} +20 -35
- package/services/cache.ts +298 -0
- package/services/queue.ts +3 -3
- package/services/workflow.ts +491 -0
- package/t.ts +21 -129
- package/tsconfig.json +2 -1
- package/types.ts +91 -12
- package/utils/generate-object.ts +24 -12
- package/utils/inject-actions.ts +3 -3
- package/utils/state-manager.ts +25 -0
- package/bull.ts +0 -5
- package/services/redis-cache.ts +0 -128
- package/t.spec +0 -38
@@ -11,18 +11,9 @@ export type Character = {
|
|
11
11
|
}[];
|
12
12
|
};
|
13
13
|
|
14
|
-
export const generalInterpreterCharacter: Character = {
|
15
|
-
role: "You are the general assistant. Your role is to provide a clear and factual analysis of the results.",
|
16
|
-
language: "user_request",
|
17
|
-
guidelines: {
|
18
|
-
important: [],
|
19
|
-
warnings: [],
|
20
|
-
},
|
21
|
-
};
|
22
|
-
|
23
14
|
export const securityInterpreterCharacter: Character = {
|
24
15
|
role: "You are the security expert. Your role is to provide a clear and factual analysis of the security of the token/coin.",
|
25
|
-
language: "
|
16
|
+
language: "same_as_request",
|
26
17
|
guidelines: {
|
27
18
|
important: [
|
28
19
|
"Start with a clear security analysis of the token/coin.",
|
@@ -52,7 +43,8 @@ export const securityInterpreterCharacter: Character = {
|
|
52
43
|
|
53
44
|
### Bad:
|
54
45
|
Speak about the bad points of the security check. If there is no bad point, say "No bad point found"
|
55
|
-
|
46
|
+
|
47
|
+
TRANSLATE ALL THE TEXT TO LANGUAGE OF THE USER REQUEST
|
56
48
|
STOP AFTER SECURITY CHECK SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
|
57
49
|
--------------------------------
|
58
50
|
`,
|
@@ -62,7 +54,7 @@ export const securityInterpreterCharacter: Character = {
|
|
62
54
|
|
63
55
|
export const marketInterpreterCharacter: Character = {
|
64
56
|
role: "You are the market expert. Your role is to provide a clear and factual analysis of the market sentiment of the token/coin.",
|
65
|
-
language: "
|
57
|
+
language: "same_as_request",
|
66
58
|
guidelines: {
|
67
59
|
important: [
|
68
60
|
"Start with a clear market sentiment (Market sentiment: Bullish/Bearish/Neutral 📈📉📊) without any additional comments before.",
|
@@ -93,6 +85,7 @@ export const marketInterpreterCharacter: Character = {
|
|
93
85
|
### Technical analysis (No sub-sections):
|
94
86
|
Speak about key price levels, trading volume, technical indicators, market activity..etc
|
95
87
|
|
88
|
+
TRANSLATE ALL THE TEXT TO LANGUAGE OF THE USER REQUEST
|
96
89
|
STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
|
97
90
|
--------------------------------
|
98
91
|
`,
|
package/llm/interpreter/index.ts
CHANGED
@@ -1,8 +1,9 @@
|
|
1
1
|
import { LanguageModel, streamText, StreamTextResult } from "ai";
|
2
2
|
import { z } from "zod";
|
3
|
-
import { Behavior,
|
3
|
+
import { Behavior, MyContext, SharedState } from "../../types";
|
4
4
|
import { generateObject } from "../../utils/generate-object";
|
5
5
|
import { LLMHeaderBuilder } from "../../utils/header-builder";
|
6
|
+
import { State } from "../orchestrator/types";
|
6
7
|
|
7
8
|
const interpreterSchema = z.object({
|
8
9
|
requestLanguage: z
|
@@ -46,8 +47,7 @@ export class Interpreter {
|
|
46
47
|
this.character = character;
|
47
48
|
}
|
48
49
|
|
49
|
-
private buildContext(
|
50
|
-
const { userRequest, results } = state;
|
50
|
+
private buildContext() {
|
51
51
|
const { role, language, guidelines } = this.character;
|
52
52
|
const { important, warnings, steps } = guidelines;
|
53
53
|
|
@@ -68,14 +68,11 @@ export class Interpreter {
|
|
68
68
|
if (warnings.length > 0) {
|
69
69
|
context.addHeader("NEVER", warnings);
|
70
70
|
}
|
71
|
-
|
72
|
-
context.addHeader("CURRENT_RESULTS", results);
|
73
71
|
return context;
|
74
72
|
}
|
75
73
|
|
76
74
|
async process(
|
77
|
-
|
78
|
-
state: State,
|
75
|
+
state: SharedState<MyContext>,
|
79
76
|
onFinish?: (event: any) => void
|
80
77
|
): Promise<
|
81
78
|
| {
|
@@ -88,17 +85,24 @@ export class Interpreter {
|
|
88
85
|
| StreamTextResult<Record<string, any>>
|
89
86
|
> {
|
90
87
|
try {
|
91
|
-
console.log("\n🎨
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
88
|
+
console.log("\n🎨 Start ing interpretation process");
|
89
|
+
|
90
|
+
const context = this.buildContext();
|
91
|
+
let prompt = LLMHeaderBuilder.create();
|
92
|
+
if (state.messages) {
|
93
|
+
prompt.addHeader(
|
94
|
+
"REQUEST",
|
95
|
+
state.messages[state.messages.length - 2].content.toString()
|
96
|
+
);
|
97
|
+
}
|
98
|
+
if (state.context.results) {
|
99
|
+
prompt.addHeader("RESULTS", JSON.stringify(state.context.results));
|
100
|
+
}
|
97
101
|
const result = await generateObject<InterpretationResult>({
|
98
102
|
model: this.model,
|
99
|
-
prompt,
|
103
|
+
prompt: prompt.toString(),
|
100
104
|
system: context.toString(),
|
101
|
-
temperature:
|
105
|
+
temperature: 0.5,
|
102
106
|
schema: interpreterSchema,
|
103
107
|
});
|
104
108
|
|
@@ -118,7 +122,7 @@ export class Interpreter {
|
|
118
122
|
console.log("\n🎨 Starting streaming interpretation");
|
119
123
|
console.log("Prompt:", prompt);
|
120
124
|
|
121
|
-
const context = this.buildContext(
|
125
|
+
const context = this.buildContext();
|
122
126
|
|
123
127
|
const result = await streamText({
|
124
128
|
model: this.model,
|
@@ -1,15 +1,13 @@
|
|
1
1
|
import { Character } from "../interpreter/context";
|
2
2
|
|
3
3
|
export const memoryManagerInstructions: Character = {
|
4
|
-
role: "You are the memory curator. Your role is to extract
|
5
|
-
language: "
|
4
|
+
role: "You are the memory curator. Your role is to extract relevant memories from interactions.",
|
5
|
+
language: "same_as_request",
|
6
6
|
guidelines: {
|
7
7
|
important: [
|
8
|
-
"Generate
|
9
|
-
"
|
10
|
-
"Should be short-term memories only if it's ephemeral but relevant and reusable",
|
8
|
+
"Generate query for requested data as the user could ask for it later (Eg: 'What is the price of Bitcoin today?')s",
|
9
|
+
"Short-term memories need to be necessary and reusable",
|
11
10
|
"Only store as long-term: User information, User preferences, Important facts that don't change often, Historical milestones",
|
12
|
-
"Make memory data concise and clear",
|
13
11
|
"Set appropriate TTL based on data volatility",
|
14
12
|
],
|
15
13
|
warnings: [
|
@@ -2,10 +2,9 @@ import { LanguageModelV1 } from "ai";
|
|
2
2
|
import { z } from "zod";
|
3
3
|
import { CacheMemory } from "../../memory/cache";
|
4
4
|
import { PersistentMemory } from "../../memory/persistent";
|
5
|
-
import {
|
5
|
+
import { MyContext, SharedState } from "../../types";
|
6
6
|
import { generateObject } from "../../utils/generate-object";
|
7
7
|
import { LLMHeaderBuilder } from "../../utils/header-builder";
|
8
|
-
import { State } from "../orchestrator/types";
|
9
8
|
import { memoryManagerInstructions } from "./context";
|
10
9
|
|
11
10
|
interface MemoryResponse {
|
@@ -27,7 +26,7 @@ interface MemoryResponse {
|
|
27
26
|
}
|
28
27
|
export class MemoryManager {
|
29
28
|
private readonly model: LanguageModelV1;
|
30
|
-
|
29
|
+
public readonly memory?: {
|
31
30
|
cache?: CacheMemory;
|
32
31
|
persistent?: PersistentMemory;
|
33
32
|
};
|
@@ -43,19 +42,39 @@ export class MemoryManager {
|
|
43
42
|
this.memory = config.memory;
|
44
43
|
}
|
45
44
|
|
46
|
-
buildContext(
|
45
|
+
buildContext() {
|
47
46
|
const context = LLMHeaderBuilder.create()
|
48
47
|
.addHeader("ROLE", memoryManagerInstructions.role)
|
49
48
|
.addHeader("LANGUAGE", memoryManagerInstructions.language)
|
50
49
|
.addHeader("IMPORTANT", memoryManagerInstructions.guidelines.important)
|
51
|
-
.addHeader("WARNINGS", memoryManagerInstructions.guidelines.warnings)
|
52
|
-
.addHeader("CURRENT_CONTEXT", state.currentContext)
|
53
|
-
.addHeader("RESULTS", JSON.stringify(state.results));
|
50
|
+
.addHeader("WARNINGS", memoryManagerInstructions.guidelines.warnings);
|
54
51
|
return context.toString();
|
55
52
|
}
|
56
53
|
|
57
|
-
async process(
|
58
|
-
|
54
|
+
async process(
|
55
|
+
state: SharedState<MyContext>,
|
56
|
+
callbacks?: {
|
57
|
+
onMemoriesGenerated?: (event: any) => void;
|
58
|
+
}
|
59
|
+
) {
|
60
|
+
const context = this.buildContext();
|
61
|
+
let prompt = LLMHeaderBuilder.create();
|
62
|
+
if (state.messages) {
|
63
|
+
prompt.addHeader(
|
64
|
+
"REQUEST",
|
65
|
+
state.messages[state.messages.length - 2].content.toString()
|
66
|
+
);
|
67
|
+
}
|
68
|
+
if (state.messages && state.messages.length > 0) {
|
69
|
+
prompt.addHeader("RECENT_MESSAGES", JSON.stringify(state.messages));
|
70
|
+
}
|
71
|
+
|
72
|
+
if (state.context.actions) {
|
73
|
+
prompt.addHeader(
|
74
|
+
"PREVIOUS_ACTIONS",
|
75
|
+
JSON.stringify(state.context.actions)
|
76
|
+
);
|
77
|
+
}
|
59
78
|
|
60
79
|
const memories = await generateObject<MemoryResponse>({
|
61
80
|
model: this.model,
|
@@ -79,85 +98,18 @@ export class MemoryManager {
|
|
79
98
|
})
|
80
99
|
),
|
81
100
|
}),
|
82
|
-
prompt: state.currentContext,
|
83
101
|
system: context.toString(),
|
84
102
|
temperature: 1,
|
103
|
+
prompt: prompt.toString(),
|
85
104
|
});
|
86
105
|
|
87
|
-
console.log("Memories:", memories.object.memories);
|
88
|
-
|
89
106
|
if (!this.memory) {
|
90
107
|
return;
|
91
108
|
}
|
92
109
|
|
93
|
-
|
94
|
-
|
95
|
-
// Store short-term memories in cache
|
96
|
-
...memories.object.memories
|
97
|
-
.filter((m: any) => m.type === "short-term")
|
98
|
-
.map(async (memoryItem: any) => {
|
99
|
-
if (!this.memory?.cache) {
|
100
|
-
return;
|
101
|
-
}
|
102
|
-
|
103
|
-
const existingCacheMemories =
|
104
|
-
await this.memory.cache.findSimilarActions(memoryItem.data, {
|
105
|
-
similarityThreshold: 85,
|
106
|
-
maxResults: 3,
|
107
|
-
scope: MemoryScope.GLOBAL,
|
108
|
-
});
|
109
|
-
|
110
|
-
if (existingCacheMemories.length > 0) {
|
111
|
-
console.log(
|
112
|
-
"⚠️ Similar memory already exists in cache:",
|
113
|
-
memoryItem.data
|
114
|
-
);
|
115
|
-
return;
|
116
|
-
}
|
117
|
-
|
118
|
-
await this.memory.cache.createMemory({
|
119
|
-
query: memoryItem.queryForMemory,
|
120
|
-
data: memoryItem.data,
|
121
|
-
ttl: memoryItem.ttl, // Use TTL from LLM
|
122
|
-
});
|
123
|
-
console.log("✅ Memory stored in cache:", memoryItem.data);
|
124
|
-
}),
|
125
|
-
|
126
|
-
// Store long-term memories in persistent storage
|
127
|
-
...memories.object.memories
|
128
|
-
.filter((m) => m.type === "long-term")
|
129
|
-
.map(async (memoryItem) => {
|
130
|
-
if (!this.memory?.persistent) {
|
131
|
-
return;
|
132
|
-
}
|
133
|
-
|
134
|
-
const existingPersistentMemories =
|
135
|
-
await this.memory.persistent.findRelevantDocuments(
|
136
|
-
memoryItem.data,
|
137
|
-
{
|
138
|
-
similarityThreshold: 85,
|
139
|
-
}
|
140
|
-
);
|
141
|
-
|
142
|
-
if (existingPersistentMemories.length > 0) {
|
143
|
-
console.log(
|
144
|
-
"⚠️ Similar memory already exists in persistent storage:",
|
145
|
-
memoryItem.data
|
146
|
-
);
|
147
|
-
return;
|
148
|
-
}
|
110
|
+
if (callbacks?.onMemoriesGenerated)
|
111
|
+
callbacks.onMemoriesGenerated(memories.object);
|
149
112
|
|
150
|
-
|
151
|
-
query: memoryItem.queryForMemory,
|
152
|
-
data: memoryItem.data,
|
153
|
-
category: memoryItem.category,
|
154
|
-
tags: memoryItem.tags,
|
155
|
-
roomId: "global",
|
156
|
-
createdAt: new Date(),
|
157
|
-
id: crypto.randomUUID(),
|
158
|
-
});
|
159
|
-
console.log("✅ Memory stored in persistent storage:", memoryItem);
|
160
|
-
}),
|
161
|
-
]);
|
113
|
+
return memories.object;
|
162
114
|
}
|
163
115
|
}
|
@@ -1,16 +1,13 @@
|
|
1
1
|
import { Character } from "../interpreter/context";
|
2
2
|
|
3
3
|
export const orchestratorInstructions: Character = {
|
4
|
-
role: "
|
5
|
-
language: "
|
4
|
+
role: "Your role is to evaluate the current state and determine next actions.",
|
5
|
+
language: "same_as_request",
|
6
6
|
guidelines: {
|
7
7
|
important: [
|
8
|
-
"
|
9
|
-
"
|
10
|
-
"
|
11
|
-
"Social responses can be partial while gathering more data",
|
12
|
-
"Set shouldContinue to false if no more actions are needed",
|
13
|
-
"Once all actions are completed, choose the right interpreter to interpret the results",
|
8
|
+
"If no actions are needed, just answer",
|
9
|
+
"If required, you can schedule actions in cron expression to be executed later",
|
10
|
+
"If required, you choose one interpreter to interpret the results when you have a complete picture of the goal",
|
14
11
|
],
|
15
12
|
warnings: [
|
16
13
|
"Never use a tool if it's not related to the user request",
|
@@ -1,30 +1,17 @@
|
|
1
|
-
import { LanguageModelV1 } from "ai";
|
1
|
+
import { generateObject, LanguageModelV1 } from "ai";
|
2
2
|
import { z } from "zod";
|
3
3
|
import { CacheMemory } from "../../memory/cache";
|
4
4
|
import { PersistentMemory } from "../../memory/persistent";
|
5
|
-
import {
|
6
|
-
import { CacheConfig, RedisCache } from "../../services/redis-cache";
|
7
|
-
import { TaskScheduler } from "../../services/scheduler";
|
8
|
-
import {
|
9
|
-
ActionSchema,
|
10
|
-
GenerateObjectResponse,
|
11
|
-
MemoryScope,
|
12
|
-
QueueCallbacks,
|
13
|
-
} from "../../types";
|
14
|
-
import { generateObject } from "../../utils/generate-object";
|
5
|
+
import { ActionSchema, MemoryScope, MyContext, SharedState } from "../../types";
|
15
6
|
import { LLMHeaderBuilder } from "../../utils/header-builder";
|
16
7
|
import { injectActions } from "../../utils/inject-actions";
|
17
8
|
import { Interpreter } from "../interpreter";
|
18
9
|
import { orchestratorInstructions } from "./context";
|
19
|
-
import { State } from "./types";
|
20
10
|
|
21
|
-
export class
|
11
|
+
export class Orchestrator {
|
22
12
|
private readonly model: LanguageModelV1;
|
23
13
|
private readonly tools: ActionSchema[];
|
24
14
|
private readonly interpreters: Interpreter[];
|
25
|
-
private readonly queueManager: ActionQueueManager;
|
26
|
-
private readonly scheduler: TaskScheduler;
|
27
|
-
private readonly cache: RedisCache;
|
28
15
|
private memory?: {
|
29
16
|
persistent?: PersistentMemory;
|
30
17
|
cache?: CacheMemory;
|
@@ -34,23 +21,18 @@ export class AgentRuntime {
|
|
34
21
|
model: LanguageModelV1,
|
35
22
|
tools: ActionSchema[],
|
36
23
|
interpreters: Interpreter[],
|
37
|
-
redisConfig: CacheConfig,
|
38
24
|
memory?: {
|
39
25
|
persistent?: PersistentMemory;
|
40
26
|
cache?: CacheMemory;
|
41
|
-
}
|
42
|
-
callbacks?: QueueCallbacks
|
27
|
+
}
|
43
28
|
) {
|
44
29
|
this.model = model;
|
45
30
|
this.tools = tools;
|
46
31
|
this.interpreters = interpreters;
|
47
|
-
this.queueManager = new ActionQueueManager(tools, callbacks);
|
48
32
|
this.memory = memory;
|
49
|
-
this.cache = new RedisCache(redisConfig);
|
50
|
-
this.scheduler = new TaskScheduler(this, this.cache);
|
51
33
|
}
|
52
34
|
|
53
|
-
private async buildContext(state:
|
35
|
+
private async buildContext(state: SharedState<MyContext>): Promise<string> {
|
54
36
|
console.log("🧠 Building context with RAG and CAG...");
|
55
37
|
const context = LLMHeaderBuilder.create();
|
56
38
|
|
@@ -65,18 +47,10 @@ export class AgentRuntime {
|
|
65
47
|
// Add tools to context
|
66
48
|
context.addHeader("TOOLS", injectActions(this.tools));
|
67
49
|
|
68
|
-
// Add previous actions if any
|
69
|
-
if (state.previousActions?.length) {
|
70
|
-
context.addHeader(
|
71
|
-
"PREVIOUS_ACTIONS",
|
72
|
-
JSON.stringify(state.previousActions)
|
73
|
-
);
|
74
|
-
}
|
75
|
-
|
76
50
|
// Get recent similar actions (CAG)
|
77
|
-
if (this.memory?.cache) {
|
51
|
+
if (this.memory?.cache && state.messages) {
|
78
52
|
const cacheMemories = await this.memory.cache.findSimilarActions(
|
79
|
-
state.
|
53
|
+
state.messages[state.messages.length - 1].content.toString(),
|
80
54
|
{
|
81
55
|
similarityThreshold: 80,
|
82
56
|
maxResults: 3,
|
@@ -90,10 +64,10 @@ export class AgentRuntime {
|
|
90
64
|
}
|
91
65
|
|
92
66
|
// Get relevant knowledge (RAG)
|
93
|
-
if (this.memory?.persistent) {
|
67
|
+
if (this.memory?.persistent && state.messages) {
|
94
68
|
const persistentMemory =
|
95
69
|
await this.memory.persistent.findRelevantDocuments(
|
96
|
-
state.
|
70
|
+
state.messages[state.messages.length - 1].content.toString(),
|
97
71
|
{
|
98
72
|
similarityThreshold: 80,
|
99
73
|
}
|
@@ -109,15 +83,25 @@ export class AgentRuntime {
|
|
109
83
|
|
110
84
|
// Add available interpreters
|
111
85
|
context.addHeader(
|
112
|
-
"
|
86
|
+
"INTERPRETERS (choose one)",
|
113
87
|
JSON.stringify(this.interpreters.map((i) => i.name))
|
88
|
+
.replace("[", "")
|
89
|
+
.replace("]", "")
|
114
90
|
);
|
115
|
-
console.log("Context built with memories", context.toString());
|
116
91
|
return context.toString();
|
117
92
|
}
|
118
93
|
|
119
|
-
async process(
|
120
|
-
|
94
|
+
async process(
|
95
|
+
state: SharedState<MyContext>,
|
96
|
+
callbacks?: {
|
97
|
+
onStart?: () => void;
|
98
|
+
onFinish?: (event: any) => void;
|
99
|
+
}
|
100
|
+
): Promise<{
|
101
|
+
processing: {
|
102
|
+
stop: boolean;
|
103
|
+
stopReason?: string;
|
104
|
+
};
|
121
105
|
actions: Array<{
|
122
106
|
name: string;
|
123
107
|
parameters: Array<{
|
@@ -126,37 +110,40 @@ export class AgentRuntime {
|
|
126
110
|
}>;
|
127
111
|
scheduler?: {
|
128
112
|
isScheduled: boolean;
|
129
|
-
|
130
|
-
interval?: string;
|
113
|
+
cronExpression?: string;
|
131
114
|
reason?: string;
|
132
115
|
};
|
133
116
|
}>;
|
134
|
-
|
135
|
-
|
136
|
-
response?: string;
|
137
|
-
isPartialResponse?: boolean;
|
138
|
-
};
|
139
|
-
interpreter?: string;
|
117
|
+
response: string;
|
118
|
+
interpreter?: string | null;
|
140
119
|
results?: string;
|
141
120
|
}> {
|
142
|
-
|
143
|
-
console.dir(state, { depth: null });
|
144
|
-
if (state.previousActions?.length) {
|
145
|
-
console.log(
|
146
|
-
"📊 Previous actions:",
|
147
|
-
state.previousActions
|
148
|
-
.map((a) => (typeof a === "string" ? a : a.name))
|
149
|
-
.join(", ")
|
150
|
-
);
|
151
|
-
}
|
121
|
+
if (callbacks?.onStart) callbacks.onStart();
|
152
122
|
|
153
123
|
const context = await this.buildContext(state);
|
124
|
+
let prompt = LLMHeaderBuilder.create();
|
125
|
+
if (state.messages) {
|
126
|
+
prompt.addHeader(
|
127
|
+
"REQUEST",
|
128
|
+
state.messages[state.messages.length - 1].content.toString()
|
129
|
+
);
|
130
|
+
|
131
|
+
if (state.messages.length > 1) {
|
132
|
+
prompt.addHeader("RECENT_MESSAGES", JSON.stringify(state.messages));
|
133
|
+
}
|
134
|
+
}
|
135
|
+
if (state.context.results) {
|
136
|
+
prompt.addHeader("ACTIONS_DONE", JSON.stringify(state.context.results));
|
137
|
+
}
|
154
138
|
|
155
|
-
console.log("\n🧠 Generating response from
|
156
|
-
const response = await generateObject
|
139
|
+
console.log("\n🧠 Generating response from Orchestrator...");
|
140
|
+
const response = await generateObject({
|
157
141
|
model: this.model,
|
158
142
|
schema: z.object({
|
159
|
-
|
143
|
+
processing: z.object({
|
144
|
+
stop: z.boolean(),
|
145
|
+
reason: z.string(),
|
146
|
+
}),
|
160
147
|
actions: z.array(
|
161
148
|
z.object({
|
162
149
|
name: z.string(),
|
@@ -166,67 +153,40 @@ export class AgentRuntime {
|
|
166
153
|
value: z.any(),
|
167
154
|
})
|
168
155
|
),
|
169
|
-
scheduler: z
|
170
|
-
.
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
})
|
175
|
-
.optional(),
|
156
|
+
scheduler: z.object({
|
157
|
+
isScheduled: z.boolean(),
|
158
|
+
cronExpression: z.string(),
|
159
|
+
reason: z.string(),
|
160
|
+
}),
|
176
161
|
})
|
177
162
|
),
|
178
|
-
|
179
|
-
|
180
|
-
shouldRespond: z.boolean(),
|
181
|
-
response: z.string().optional(),
|
182
|
-
isPartialResponse: z.boolean().optional(),
|
183
|
-
})
|
184
|
-
.optional(),
|
185
|
-
interpreter: z.string().optional(),
|
163
|
+
response: z.string(),
|
164
|
+
interpreter: z.string().or(z.null()),
|
186
165
|
}),
|
187
|
-
prompt: state.currentContext,
|
188
166
|
system: context.toString(),
|
189
167
|
temperature: 0,
|
168
|
+
prompt: prompt.toString(),
|
190
169
|
});
|
191
170
|
console.log("🔄 Orchestrator response:");
|
192
171
|
console.dir(response.object, { depth: null });
|
193
172
|
|
194
173
|
// Force shouldContinue to false if no actions are planned
|
195
174
|
if (response.object.actions.length === 0) {
|
196
|
-
response.object.
|
197
|
-
console.log("⚠️ No actions planned, forcing
|
175
|
+
response.object.processing.stop = true;
|
176
|
+
console.log("⚠️ No actions planned, forcing isProcessing to false");
|
198
177
|
}
|
199
178
|
|
200
179
|
// Handle social interactions and actions in a single block
|
201
|
-
if (response.object.
|
180
|
+
if (response.object.response) {
|
202
181
|
console.log("\n💬 Processing social response");
|
203
|
-
if (response.object.
|
204
|
-
console.log("📢 Response:", response.object.
|
182
|
+
if (response.object.response) {
|
183
|
+
console.log("📢 Response:", response.object.response);
|
205
184
|
// Ensure all parameters have a value property
|
206
185
|
}
|
207
186
|
}
|
208
187
|
|
209
|
-
|
210
|
-
for (const action of response.object.actions) {
|
211
|
-
if (action.scheduler?.isScheduled) {
|
212
|
-
await this.scheduler.scheduleRequest({
|
213
|
-
originalRequest: state.currentContext,
|
214
|
-
cronExpression: action.scheduler.cronExpression,
|
215
|
-
});
|
216
|
-
}
|
217
|
-
}
|
218
|
-
|
219
|
-
// Store actions in Redis cache
|
220
|
-
if (response.object.actions.length > 0) {
|
221
|
-
const requestId = crypto.randomUUID();
|
222
|
-
await this.cache.storePreviousActions(requestId, response.object.actions);
|
223
|
-
}
|
224
|
-
|
225
|
-
// Store message in recent messages
|
226
|
-
await this.cache.storeRecentMessage(state.currentContext, {
|
227
|
-
socialResponse: response.object.socialResponse,
|
228
|
-
});
|
188
|
+
if (callbacks?.onFinish) callbacks.onFinish(response.object);
|
229
189
|
|
230
|
-
return response.object;
|
190
|
+
return response.object as any;
|
231
191
|
}
|
232
192
|
}
|
@@ -1,11 +1,11 @@
|
|
1
|
+
import { CoreMessage } from "ai";
|
1
2
|
import { QueueResult } from "../../types";
|
2
3
|
|
3
4
|
export interface State {
|
4
5
|
currentContext: string;
|
5
6
|
previousActions: (string | QueueResult)[];
|
6
|
-
reward?: number;
|
7
|
-
userRequest?: string;
|
8
7
|
results?: string;
|
8
|
+
recentMessages: CoreMessage[];
|
9
9
|
}
|
10
10
|
|
11
11
|
export interface Action {
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@ai.ntellect/core",
|
3
|
-
"version": "0.
|
3
|
+
"version": "0.5.0",
|
4
4
|
"description": "",
|
5
5
|
"main": "dist/index.js",
|
6
6
|
"scripts": {
|
@@ -16,7 +16,9 @@
|
|
16
16
|
"@ai-sdk/openai": "1.0.6",
|
17
17
|
"@types/node-cron": "^3.0.11",
|
18
18
|
"ai": "^3.0.0",
|
19
|
+
"chalk": "^5.4.1",
|
19
20
|
"ethers": "^6.13.5",
|
21
|
+
"ioredis": "^5.4.2",
|
20
22
|
"langchain": "^0.3.11",
|
21
23
|
"node-cron": "^3.0.3",
|
22
24
|
"readline": "^1.3.0",
|