@ai.ntellect/core 0.1.85 → 0.1.90

Sign up to get free protection for your applications and to get access to all the features.
package/agent/index.ts CHANGED
@@ -186,7 +186,6 @@ export class Agent {
186
186
  const sanitizedResults = ResultSanitizer.sanitize(this.accumulatedResults);
187
187
  const summaryData = JSON.stringify({
188
188
  result: sanitizedResults,
189
- initialPrompt: actionsResult.initialPrompt,
190
189
  });
191
190
 
192
191
  this.accumulatedResults = [];
@@ -200,8 +199,13 @@ export class Agent {
200
199
  });
201
200
 
202
201
  return this.stream
203
- ? (await synthesizer.streamProcess(summaryData)).toDataStreamResponse()
204
- : await synthesizer.process(summaryData);
202
+ ? (
203
+ await synthesizer.streamProcess(
204
+ actionsResult.initialPrompt,
205
+ summaryData
206
+ )
207
+ ).toDataStreamResponse()
208
+ : await synthesizer.process(actionsResult.initialPrompt, summaryData);
205
209
  }
206
210
 
207
211
  private transformActions(actions: ActionSchema[]) {
@@ -9,6 +9,7 @@ export class Orchestrator implements BaseLLM {
9
9
  private readonly model = openai("gpt-4o");
10
10
  public tools: ActionSchema[];
11
11
  private memory: PersistentMemory;
12
+
12
13
  constructor(tools: ActionSchema[], memory: PersistentMemory) {
13
14
  this.memory = memory;
14
15
  this.tools = [
@@ -24,6 +25,7 @@ export class Orchestrator implements BaseLLM {
24
25
  const memories = await this.memory.searchSimilarQueries(query, {
25
26
  similarityThreshold: 95,
26
27
  });
28
+
27
29
  return memories;
28
30
  },
29
31
  },
@@ -1,5 +1,5 @@
1
1
  export const synthesizerContext = {
2
- role: "You are the synthesizer agent. Your role is to provide a clear and factual analysis of the results.",
2
+ role: "You are the synthesizer agent. Your role is to provide a clear and factual analysis of the results. You are also the expert in the field of security analysis.",
3
3
  guidelines: {
4
4
  important: [
5
5
  "AVOID MULTIPLE UPPERCASE IN TITLE/SUBTITLE LIKE ('Market Sentiment: Bullish'). USE ONLY ONE UPPERCASE IN TITLE/SUBTITLE.",
@@ -25,12 +25,14 @@ export const synthesizerContext = {
25
25
  "NEVER explain technical errors or issues. Just say retry later.",
26
26
  ],
27
27
  },
28
- compose: (results: string) => {
28
+ compose: (initialPrompt: string, summaryData?: string) => {
29
29
  return `
30
30
  ${JSON.stringify(synthesizerContext.guidelines)}
31
- Results: ${results}
32
31
 
33
- 1. FOR ALL ANALYSIS OF SPECIFIC TOKEN, RESPECT THE FOLLOWING FORMAT:
32
+ Initial prompt: ${initialPrompt} (Speak in the same language as the initial prompt)
33
+ Results: ${summaryData}
34
+
35
+ 1. FOR BASIC ANALYSIS OF COINS/TOKENS, USE THE FOLLOWING FORMAT:
34
36
  --------------------------------
35
37
  ## Analysis of x/y:
36
38
 
@@ -44,8 +46,21 @@ export const synthesizerContext = {
44
46
 
45
47
  STOP AFTER TECHNICAL ANALYSIS SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
46
48
  --------------------------------
49
+
50
+ 2. FOR SECURITY ANALYSIS, USE THE FOLLOWING FORMAT:
51
+ --------------------------------
52
+ ## Security analysis of x/y:
53
+
54
+ ### Good:
55
+ Speak about the good points of the security check. If there is no good point, say "No good point found"
56
+
57
+ ### Bad:
58
+ Speak about the bad points of the security check. If there is no bad point, say "No bad point found"
59
+
60
+ STOP AFTER SECURITY CHECK SECTION WITHOUT ANY CONCLUDING STATEMENT OR DISCLAIMER OR ADDITIONAL COMMENTS
61
+ --------------------------------
47
62
 
48
- 2. OTHERWISE FOR OTHER REQUESTS, USE THE FORMAT YOU WANT.
63
+ 3. OTHERWISE FOR OTHER REQUESTS, USE THE FORMAT YOU WANT.
49
64
  `;
50
65
  },
51
66
  };
@@ -9,6 +9,7 @@ export class Synthesizer implements BaseLLM {
9
9
 
10
10
  async process(
11
11
  prompt: string,
12
+ summaryData?: string,
12
13
  onFinish?: (event: any) => void
13
14
  ): Promise<
14
15
  | {
@@ -32,7 +33,7 @@ export class Synthesizer implements BaseLLM {
32
33
  ),
33
34
  response: z.string(),
34
35
  }),
35
- prompt: synthesizerContext.compose(prompt),
36
+ prompt: synthesizerContext.compose(prompt, summaryData || ""),
36
37
  system: synthesizerContext.role,
37
38
  });
38
39
  console.log("Synthesizer");
@@ -43,11 +44,12 @@ export class Synthesizer implements BaseLLM {
43
44
 
44
45
  async streamProcess(
45
46
  prompt: string,
47
+ summaryData?: string,
46
48
  onFinish?: (event: any) => void
47
49
  ): Promise<StreamTextResult<Record<string, any>>> {
48
50
  const result = await streamText({
49
51
  model: this.model,
50
- prompt: synthesizerContext.compose(prompt),
52
+ prompt: synthesizerContext.compose(prompt, summaryData || ""),
51
53
  onFinish: onFinish,
52
54
  system: synthesizerContext.role,
53
55
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai.ntellect/core",
3
- "version": "0.1.85",
3
+ "version": "0.1.90",
4
4
  "description": "",
5
5
  "main": "dist/index.js",
6
6
  "scripts": {