@elizaos/plugin-research 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +400 -0
- package/dist/index.cjs +9366 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.js +9284 -0
- package/dist/index.js.map +1 -0
- package/package.json +80 -0
- package/src/__tests__/action-chaining.test.ts +532 -0
- package/src/__tests__/actions.test.ts +118 -0
- package/src/__tests__/cache-rate-limiter.test.ts +303 -0
- package/src/__tests__/content-extractors.test.ts +26 -0
- package/src/__tests__/deepresearch-bench-integration.test.ts +520 -0
- package/src/__tests__/deepresearch-bench-simplified.e2e.test.ts +290 -0
- package/src/__tests__/deepresearch-bench.e2e.test.ts +376 -0
- package/src/__tests__/e2e.test.ts +1870 -0
- package/src/__tests__/multi-benchmark-runner.ts +427 -0
- package/src/__tests__/providers.test.ts +156 -0
- package/src/__tests__/real-world.e2e.test.ts +788 -0
- package/src/__tests__/research-scenarios.test.ts +755 -0
- package/src/__tests__/research.e2e.test.ts +704 -0
- package/src/__tests__/research.test.ts +174 -0
- package/src/__tests__/search-providers.test.ts +174 -0
- package/src/__tests__/single-benchmark-runner.ts +735 -0
- package/src/__tests__/test-search-providers.ts +171 -0
- package/src/__tests__/verify-apis.test.ts +82 -0
- package/src/actions.ts +1677 -0
- package/src/benchmark/deepresearch-benchmark.ts +369 -0
- package/src/evaluation/research-evaluator.ts +444 -0
- package/src/examples/api-integration.md +498 -0
- package/src/examples/browserbase-integration.md +132 -0
- package/src/examples/debug-research-query.ts +162 -0
- package/src/examples/defi-code-scenarios.md +536 -0
- package/src/examples/defi-implementation-guide.md +454 -0
- package/src/examples/eliza-research-example.ts +142 -0
- package/src/examples/fix-renewable-energy-research.ts +209 -0
- package/src/examples/research-scenarios.md +408 -0
- package/src/examples/run-complete-renewable-research.ts +303 -0
- package/src/examples/run-deep-research.ts +352 -0
- package/src/examples/run-logged-research.ts +304 -0
- package/src/examples/run-real-research.ts +151 -0
- package/src/examples/save-research-output.ts +133 -0
- package/src/examples/test-file-logging.ts +199 -0
- package/src/examples/test-real-research.ts +67 -0
- package/src/examples/test-renewable-energy-research.ts +229 -0
- package/src/index.ts +28 -0
- package/src/integrations/cache.ts +128 -0
- package/src/integrations/content-extractors/firecrawl.ts +314 -0
- package/src/integrations/content-extractors/pdf-extractor.ts +350 -0
- package/src/integrations/content-extractors/playwright.ts +420 -0
- package/src/integrations/factory.ts +419 -0
- package/src/integrations/index.ts +18 -0
- package/src/integrations/rate-limiter.ts +181 -0
- package/src/integrations/search-providers/academic.ts +290 -0
- package/src/integrations/search-providers/exa.ts +205 -0
- package/src/integrations/search-providers/npm.ts +330 -0
- package/src/integrations/search-providers/pypi.ts +211 -0
- package/src/integrations/search-providers/serpapi.ts +277 -0
- package/src/integrations/search-providers/serper.ts +358 -0
- package/src/integrations/search-providers/stagehand-google.ts +87 -0
- package/src/integrations/search-providers/tavily.ts +187 -0
- package/src/processing/relevance-analyzer.ts +353 -0
- package/src/processing/research-logger.ts +450 -0
- package/src/processing/result-processor.ts +372 -0
- package/src/prompts/research-prompts.ts +419 -0
- package/src/providers/cacheProvider.ts +164 -0
- package/src/providers.ts +173 -0
- package/src/service.ts +2588 -0
- package/src/services/swe-bench.ts +286 -0
- package/src/strategies/research-strategies.ts +790 -0
- package/src/types/pdf-parse.d.ts +34 -0
- package/src/types.ts +551 -0
- package/src/verification/claim-verifier.ts +443 -0
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
import { ModelType } from '@elizaos/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Advanced prompt templates for deep research
|
|
5
|
+
* These prompts are designed to elicit comprehensive, evidence-based responses
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
export const RESEARCH_PROMPTS = {
|
|
9
|
+
/**
|
|
10
|
+
* Query Analysis - Extract deep understanding of research intent
|
|
11
|
+
*/
|
|
12
|
+
QUERY_ANALYSIS: `Analyze this research query with academic rigor:
|
|
13
|
+
|
|
14
|
+
Query: "{query}"
|
|
15
|
+
|
|
16
|
+
Provide a comprehensive analysis including:
|
|
17
|
+
|
|
18
|
+
1. PRIMARY RESEARCH QUESTION
|
|
19
|
+
- Core inquiry (what is being asked)
|
|
20
|
+
- Implicit assumptions
|
|
21
|
+
- Scope boundaries
|
|
22
|
+
|
|
23
|
+
2. KEY CONCEPTS & ENTITIES
|
|
24
|
+
- Primary concepts (with definitions)
|
|
25
|
+
- Secondary concepts
|
|
26
|
+
- Named entities (people, organizations, places)
|
|
27
|
+
- Technical terms requiring clarification
|
|
28
|
+
|
|
29
|
+
3. RESEARCH DIMENSIONS
|
|
30
|
+
- Temporal scope (historical/current/predictive)
|
|
31
|
+
- Geographic scope
|
|
32
|
+
- Disciplinary lens
|
|
33
|
+
- Theoretical vs. practical focus
|
|
34
|
+
|
|
35
|
+
4. METHODOLOGICAL REQUIREMENTS
|
|
36
|
+
- Type of evidence needed (empirical/theoretical/mixed)
|
|
37
|
+
- Required data types (quantitative/qualitative)
|
|
38
|
+
- Appropriate research methods
|
|
39
|
+
|
|
40
|
+
5. POTENTIAL BIASES & LIMITATIONS
|
|
41
|
+
- Query biases to be aware of
|
|
42
|
+
- Potential blind spots
|
|
43
|
+
- Alternative framings to consider
|
|
44
|
+
|
|
45
|
+
6. SUCCESS CRITERIA
|
|
46
|
+
- What constitutes a complete answer
|
|
47
|
+
- Required depth of analysis
|
|
48
|
+
- Expected deliverables
|
|
49
|
+
|
|
50
|
+
Format as structured JSON with all sections.`,
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Sub-query Generation - Create comprehensive search strategy
|
|
54
|
+
*/
|
|
55
|
+
SUB_QUERY_GENERATION: `Generate a comprehensive search strategy for this research:
|
|
56
|
+
|
|
57
|
+
Main Query: "{query}"
|
|
58
|
+
Domain: {domain}
|
|
59
|
+
Task Type: {taskType}
|
|
60
|
+
Temporal Focus: {temporalFocus}
|
|
61
|
+
|
|
62
|
+
Create 7-10 highly specific sub-queries that:
|
|
63
|
+
1. Cover all aspects of the main query
|
|
64
|
+
2. Target different types of sources (academic, industry, government)
|
|
65
|
+
3. Include methodological variations
|
|
66
|
+
4. Address potential counter-arguments
|
|
67
|
+
5. Seek quantitative data and statistics
|
|
68
|
+
6. Find case studies and real-world examples
|
|
69
|
+
7. Identify theoretical frameworks
|
|
70
|
+
8. Explore historical context
|
|
71
|
+
9. Investigate future implications
|
|
72
|
+
|
|
73
|
+
For EACH sub-query provide:
|
|
74
|
+
- QUERY: The exact search query (optimized for search engines)
|
|
75
|
+
- PURPOSE: Why this query is essential (2-3 sentences)
|
|
76
|
+
- EXPECTED_SOURCES: Types of sources likely to have this information
|
|
77
|
+
- KEYWORDS: Additional keywords and synonyms
|
|
78
|
+
- PRIORITY: critical/high/medium/low
|
|
79
|
+
- DEPENDENCIES: Other queries that must be completed first
|
|
80
|
+
- VERIFICATION_NEEDS: What claims will need fact-checking
|
|
81
|
+
|
|
82
|
+
Use advanced search operators where appropriate (site:, filetype:, intitle:, etc.)`,
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Finding Extraction - Deep content analysis
|
|
86
|
+
*/
|
|
87
|
+
FINDING_EXTRACTION: `Extract comprehensive research findings from this source:
|
|
88
|
+
|
|
89
|
+
Source: {title} ({url})
|
|
90
|
+
Original Query: "{query}"
|
|
91
|
+
Content Length: {contentLength} characters
|
|
92
|
+
|
|
93
|
+
Content:
|
|
94
|
+
{content}
|
|
95
|
+
|
|
96
|
+
Extract ALL significant findings following this structure:
|
|
97
|
+
|
|
98
|
+
For EACH finding:
|
|
99
|
+
1. CORE CLAIM
|
|
100
|
+
- Main assertion (precise statement)
|
|
101
|
+
- Supporting evidence (quotes with context)
|
|
102
|
+
- Confidence level (0-1) with justification
|
|
103
|
+
|
|
104
|
+
2. CONTEXT & NUANCE
|
|
105
|
+
- Conditions under which claim holds
|
|
106
|
+
- Exceptions or limitations mentioned
|
|
107
|
+
- Conflicting viewpoints presented
|
|
108
|
+
|
|
109
|
+
3. METHODOLOGY (if applicable)
|
|
110
|
+
- How was this finding derived?
|
|
111
|
+
- Sample size/data sources
|
|
112
|
+
- Statistical significance
|
|
113
|
+
- Potential methodological weaknesses
|
|
114
|
+
|
|
115
|
+
4. CONNECTIONS
|
|
116
|
+
- How this relates to the main research query
|
|
117
|
+
- Connections to other findings
|
|
118
|
+
- Implications for further research
|
|
119
|
+
|
|
120
|
+
5. VERIFICATION REQUIREMENTS
|
|
121
|
+
- What needs to be cross-checked
|
|
122
|
+
- Other sources that might confirm/refute
|
|
123
|
+
- Data that should be verified
|
|
124
|
+
|
|
125
|
+
Extract 5-15 findings, prioritizing:
|
|
126
|
+
- Direct answers to research question
|
|
127
|
+
- Surprising or counterintuitive insights
|
|
128
|
+
- Methodologically robust claims
|
|
129
|
+
- Recent developments
|
|
130
|
+
- Quantitative data
|
|
131
|
+
- Expert opinions with credentials
|
|
132
|
+
|
|
133
|
+
Format as JSON array with all fields populated.`,
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Category Synthesis - Deep thematic analysis
|
|
137
|
+
*/
|
|
138
|
+
CATEGORY_SYNTHESIS: `Synthesize findings within this thematic category:
|
|
139
|
+
|
|
140
|
+
Category: {category}
|
|
141
|
+
Original Query: "{query}"
|
|
142
|
+
Number of Findings: {findingCount}
|
|
143
|
+
|
|
144
|
+
Findings:
|
|
145
|
+
{findings}
|
|
146
|
+
|
|
147
|
+
Create a comprehensive synthesis (1000-1500 words) that:
|
|
148
|
+
|
|
149
|
+
1. THEMATIC OVERVIEW
|
|
150
|
+
- Define the category's scope
|
|
151
|
+
- Explain relevance to research question
|
|
152
|
+
- Identify major themes and sub-themes
|
|
153
|
+
|
|
154
|
+
2. EVIDENCE INTEGRATION
|
|
155
|
+
- Synthesize findings into coherent narrative
|
|
156
|
+
- Identify patterns across sources
|
|
157
|
+
- Note frequency of similar claims
|
|
158
|
+
- Highlight strongest evidence
|
|
159
|
+
|
|
160
|
+
3. CRITICAL ANALYSIS
|
|
161
|
+
- Evaluate quality of evidence
|
|
162
|
+
- Identify methodological strengths/weaknesses
|
|
163
|
+
- Discuss conflicting findings
|
|
164
|
+
- Assess generalizability
|
|
165
|
+
|
|
166
|
+
4. THEORETICAL FRAMEWORK
|
|
167
|
+
- Connect to established theories
|
|
168
|
+
- Identify new theoretical contributions
|
|
169
|
+
- Discuss paradigm shifts
|
|
170
|
+
|
|
171
|
+
5. GAPS & LIMITATIONS
|
|
172
|
+
- What questions remain unanswered
|
|
173
|
+
- Methodological gaps in literature
|
|
174
|
+
- Geographic/demographic blind spots
|
|
175
|
+
- Temporal limitations
|
|
176
|
+
|
|
177
|
+
6. PRACTICAL IMPLICATIONS
|
|
178
|
+
- Real-world applications
|
|
179
|
+
- Policy recommendations
|
|
180
|
+
- Industry implications
|
|
181
|
+
- Future research directions
|
|
182
|
+
|
|
183
|
+
Use academic writing style with:
|
|
184
|
+
- Topic sentences for each paragraph
|
|
185
|
+
- Evidence-based arguments
|
|
186
|
+
- Balanced perspective
|
|
187
|
+
- Clear transitions
|
|
188
|
+
- Precise language`,
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Report Enhancement - Second pass deep dive
|
|
192
|
+
*/
|
|
193
|
+
REPORT_ENHANCEMENT: `Enhance this research section using detailed source analysis:
|
|
194
|
+
|
|
195
|
+
Section: {sectionTitle}
|
|
196
|
+
Original Content ({originalLength} words):
|
|
197
|
+
{originalContent}
|
|
198
|
+
|
|
199
|
+
Top 10 Source Excerpts (50,000 characters total):
|
|
200
|
+
{detailedSources}
|
|
201
|
+
|
|
202
|
+
Your task is to create a dramatically improved section (1500-2000 words) that:
|
|
203
|
+
|
|
204
|
+
1. DEPTH ENHANCEMENT
|
|
205
|
+
- Add specific examples from sources
|
|
206
|
+
- Include relevant statistics and data
|
|
207
|
+
- Cite exact studies with methodologies
|
|
208
|
+
- Add historical context
|
|
209
|
+
- Include expert quotes with credentials
|
|
210
|
+
|
|
211
|
+
2. ANALYTICAL RIGOR
|
|
212
|
+
- Compare conflicting viewpoints
|
|
213
|
+
- Evaluate strength of different arguments
|
|
214
|
+
- Discuss methodological approaches
|
|
215
|
+
- Identify consensus vs. debate areas
|
|
216
|
+
- Address potential biases
|
|
217
|
+
|
|
218
|
+
3. EVIDENCE INTEGRATION
|
|
219
|
+
- Weave source material naturally
|
|
220
|
+
- Use variety of integration techniques
|
|
221
|
+
- Balance paraphrasing and direct quotes
|
|
222
|
+
- Maintain clear attribution
|
|
223
|
+
- Build arguments progressively
|
|
224
|
+
|
|
225
|
+
4. CRITICAL INSIGHTS
|
|
226
|
+
- Identify patterns not obvious in sources
|
|
227
|
+
- Make connections between disparate findings
|
|
228
|
+
- Propose new interpretations
|
|
229
|
+
- Highlight surprising discoveries
|
|
230
|
+
- Challenge conventional wisdom where warranted
|
|
231
|
+
|
|
232
|
+
5. SCHOLARLY APPARATUS
|
|
233
|
+
- Use proper academic citations
|
|
234
|
+
- Include footnotes for tangential points
|
|
235
|
+
- Define technical terms
|
|
236
|
+
- Provide context for specialized knowledge
|
|
237
|
+
- Acknowledge limitations
|
|
238
|
+
|
|
239
|
+
Maintain sophisticated academic tone while ensuring clarity. Every major claim must be supported by evidence from the provided sources.`,
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* Verification Check - Fact verification system
|
|
243
|
+
*/
|
|
244
|
+
CLAIM_VERIFICATION: `Verify this factual claim against source evidence:
|
|
245
|
+
|
|
246
|
+
Claim: "{claim}"
|
|
247
|
+
Claimed Source: {sourceUrl}
|
|
248
|
+
Supporting Evidence Provided: "{evidence}"
|
|
249
|
+
|
|
250
|
+
Source Content (relevant excerpt):
|
|
251
|
+
{sourceContent}
|
|
252
|
+
|
|
253
|
+
Perform rigorous verification:
|
|
254
|
+
|
|
255
|
+
1. EXACT MATCH ANALYSIS
|
|
256
|
+
- Does the source contain this exact claim? (quote if yes)
|
|
257
|
+
- Is the claim a reasonable interpretation? (explain)
|
|
258
|
+
- Are there qualifiers in source not in claim?
|
|
259
|
+
|
|
260
|
+
2. CONTEXT EVALUATION
|
|
261
|
+
- What is the broader context in the source?
|
|
262
|
+
- Could the claim be misleading out of context?
|
|
263
|
+
- Are there contradicting statements nearby?
|
|
264
|
+
|
|
265
|
+
3. PRECISION CHECK
|
|
266
|
+
- Are numbers/dates/names exactly correct?
|
|
267
|
+
- Is the scope accurately represented?
|
|
268
|
+
- Are correlations presented as causations?
|
|
269
|
+
|
|
270
|
+
4. SOURCE CREDIBILITY
|
|
271
|
+
- Is this a primary or secondary source?
|
|
272
|
+
- What are the author's credentials?
|
|
273
|
+
- Is this peer-reviewed/officially published?
|
|
274
|
+
- Are there potential conflicts of interest?
|
|
275
|
+
|
|
276
|
+
5. VERIFICATION RESULT
|
|
277
|
+
- Status: VERIFIED/PARTIALLY_VERIFIED/UNVERIFIED/CONTRADICTED
|
|
278
|
+
- Confidence: 0-1 with detailed justification
|
|
279
|
+
- Corrections needed (if any)
|
|
280
|
+
- Additional sources needed for confirmation
|
|
281
|
+
|
|
282
|
+
Be extremely rigorous - default to "unverified" unless evidence is clear.`,
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* Gap Analysis - Identify research gaps
|
|
286
|
+
*/
|
|
287
|
+
GAP_ANALYSIS: `Analyze research completeness and identify gaps:
|
|
288
|
+
|
|
289
|
+
Original Query: "{query}"
|
|
290
|
+
Research Summary: {researchSummary}
|
|
291
|
+
Categories Covered: {categories}
|
|
292
|
+
Sources Analyzed: {sourceCount}
|
|
293
|
+
Key Findings: {keyFindings}
|
|
294
|
+
|
|
295
|
+
Perform comprehensive gap analysis:
|
|
296
|
+
|
|
297
|
+
1. COVERAGE ASSESSMENT
|
|
298
|
+
- Which aspects of the query are well-covered?
|
|
299
|
+
- Which aspects have limited coverage?
|
|
300
|
+
- What perspectives are missing?
|
|
301
|
+
- Geographic/temporal gaps?
|
|
302
|
+
|
|
303
|
+
2. EVIDENCE QUALITY GAPS
|
|
304
|
+
- Where is evidence weak or outdated?
|
|
305
|
+
- Which claims lack sufficient support?
|
|
306
|
+
- Where are better methodologies needed?
|
|
307
|
+
- What quantitative data is missing?
|
|
308
|
+
|
|
309
|
+
3. THEORETICAL GAPS
|
|
310
|
+
- Missing theoretical frameworks?
|
|
311
|
+
- Unaddressed assumptions?
|
|
312
|
+
- Alternative explanations not considered?
|
|
313
|
+
- Interdisciplinary perspectives needed?
|
|
314
|
+
|
|
315
|
+
4. PRACTICAL GAPS
|
|
316
|
+
- Real-world applications not explored?
|
|
317
|
+
- Implementation challenges not addressed?
|
|
318
|
+
- Cost-benefit analyses missing?
|
|
319
|
+
- Stakeholder perspectives absent?
|
|
320
|
+
|
|
321
|
+
5. PRIORITIZED RECOMMENDATIONS
|
|
322
|
+
For each gap, specify:
|
|
323
|
+
- Specific searches needed
|
|
324
|
+
- Types of sources to target
|
|
325
|
+
- Experts to consult
|
|
326
|
+
- Data to acquire
|
|
327
|
+
- Estimated impact on research quality
|
|
328
|
+
|
|
329
|
+
Format as actionable gap-filling strategy.`,
|
|
330
|
+
|
|
331
|
+
/**
|
|
332
|
+
* Claim Extraction - Extract verifiable claims from text
|
|
333
|
+
*/
|
|
334
|
+
CLAIM_EXTRACTION: `Extract specific, verifiable claims from the following text.
|
|
335
|
+
|
|
336
|
+
Text: "{text}"
|
|
337
|
+
|
|
338
|
+
Number of available sources: {sourceCount}
|
|
339
|
+
|
|
340
|
+
Extract claims that are:
|
|
341
|
+
1. SPECIFIC and factual (not vague statements)
|
|
342
|
+
2. VERIFIABLE against sources
|
|
343
|
+
3. IMPORTANT to the topic
|
|
344
|
+
|
|
345
|
+
For each claim, provide:
|
|
346
|
+
- statement: The exact claim being made
|
|
347
|
+
- confidence: Confidence level (0-1)
|
|
348
|
+
- sources: URLs of sources that might support this
|
|
349
|
+
- evidence: Key supporting evidence snippets
|
|
350
|
+
- category: Category of the claim
|
|
351
|
+
|
|
352
|
+
Return as JSON:
|
|
353
|
+
{
|
|
354
|
+
"claims": [
|
|
355
|
+
{
|
|
356
|
+
"statement": "specific factual claim",
|
|
357
|
+
"confidence": 0.8,
|
|
358
|
+
"sources": ["url1", "url2"],
|
|
359
|
+
"evidence": ["supporting snippet 1", "supporting snippet 2"],
|
|
360
|
+
"category": "category name"
|
|
361
|
+
}
|
|
362
|
+
]
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
Extract at least 5-10 key claims from the text.`,
|
|
366
|
+
};
|
|
367
|
+
|
|
368
|
+
/**
|
|
369
|
+
* Helper function to format prompts with variables
|
|
370
|
+
*/
|
|
371
|
+
export function formatPrompt(template: string, variables: Record<string, any>): string {
|
|
372
|
+
let formatted = template;
|
|
373
|
+
for (const [key, value] of Object.entries(variables)) {
|
|
374
|
+
formatted = formatted.replace(new RegExp(`{${key}}`, 'g'), String(value));
|
|
375
|
+
}
|
|
376
|
+
return formatted;
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
/**
|
|
380
|
+
* Get prompt configuration for different research phases
|
|
381
|
+
*/
|
|
382
|
+
export function getPromptConfig(phase: string): {
|
|
383
|
+
temperature: number;
|
|
384
|
+
maxTokens: number;
|
|
385
|
+
modelType: string;
|
|
386
|
+
} {
|
|
387
|
+
switch (phase) {
|
|
388
|
+
case 'analysis':
|
|
389
|
+
return {
|
|
390
|
+
temperature: 0.3,
|
|
391
|
+
maxTokens: 2000,
|
|
392
|
+
modelType: ModelType.TEXT_LARGE,
|
|
393
|
+
};
|
|
394
|
+
case 'extraction':
|
|
395
|
+
return {
|
|
396
|
+
temperature: 0.2,
|
|
397
|
+
maxTokens: 4000,
|
|
398
|
+
modelType: ModelType.TEXT_LARGE,
|
|
399
|
+
};
|
|
400
|
+
case 'synthesis':
|
|
401
|
+
return {
|
|
402
|
+
temperature: 0.7,
|
|
403
|
+
maxTokens: 4000,
|
|
404
|
+
modelType: ModelType.TEXT_LARGE,
|
|
405
|
+
};
|
|
406
|
+
case 'verification':
|
|
407
|
+
return {
|
|
408
|
+
temperature: 0.1,
|
|
409
|
+
maxTokens: 1500,
|
|
410
|
+
modelType: ModelType.TEXT_LARGE,
|
|
411
|
+
};
|
|
412
|
+
default:
|
|
413
|
+
return {
|
|
414
|
+
temperature: 0.5,
|
|
415
|
+
maxTokens: 2000,
|
|
416
|
+
modelType: ModelType.TEXT_LARGE,
|
|
417
|
+
};
|
|
418
|
+
}
|
|
419
|
+
}
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
import { Provider, IAgentRuntime, Memory, State, ProviderResult } from '@elizaos/core';
|
|
2
|
+
import { validateActionKeywords, validateActionRegex } from "@elizaos/core";
|
|
3
|
+
|
|
4
|
+
// Cache manager interface
|
|
5
|
+
interface CacheManager {
|
|
6
|
+
get: (key: string) => Promise<string | null>;
|
|
7
|
+
set: (key: string, value: string, expirationTTL?: number) => Promise<void>;
|
|
8
|
+
delete: (key: string) => Promise<void>;
|
|
9
|
+
clear: () => Promise<void>;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
// Extend IAgentRuntime to include cacheManager
|
|
13
|
+
interface ExtendedRuntime extends IAgentRuntime {
|
|
14
|
+
cacheManager?: CacheManager;
|
|
15
|
+
logger?: any;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Provider that adds caching context to agent conversations
|
|
20
|
+
*/
|
|
21
|
+
export const cacheProvider: Provider = {
|
|
22
|
+
name: 'cacheProvider',
|
|
23
|
+
description: 'Provides caching context and capabilities',
|
|
24
|
+
|
|
25
|
+
dynamic: true,
|
|
26
|
+
relevanceKeywords: [
|
|
27
|
+
"cacheprovider",
|
|
28
|
+
"plugin",
|
|
29
|
+
"research",
|
|
30
|
+
"status",
|
|
31
|
+
"state",
|
|
32
|
+
"context",
|
|
33
|
+
"info",
|
|
34
|
+
"details",
|
|
35
|
+
"chat",
|
|
36
|
+
"conversation",
|
|
37
|
+
"agent",
|
|
38
|
+
"room",
|
|
39
|
+
"channel",
|
|
40
|
+
"user",
|
|
41
|
+
],
|
|
42
|
+
get: async (
|
|
43
|
+
runtime: ExtendedRuntime,
|
|
44
|
+
message: Memory,
|
|
45
|
+
state?: State
|
|
46
|
+
): Promise<ProviderResult> => { const __providerKeywords = ["cacheprovider", "plugin", "research", "status", "state", "context", "info", "details", "chat", "conversation", "agent", "room", "channel", "user"];
|
|
47
|
+
const __providerRegex = new RegExp(`\\b(${__providerKeywords.join("|")})\\b`, "i");
|
|
48
|
+
const __recentMessages = state?.recentMessagesData || [];
|
|
49
|
+
const __isRelevant =
|
|
50
|
+
validateActionKeywords(message, __recentMessages, __providerKeywords) ||
|
|
51
|
+
validateActionRegex(message, __recentMessages, __providerRegex);
|
|
52
|
+
if (!__isRelevant) {
|
|
53
|
+
return { text: "" };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
if (!runtime.cacheManager) {
|
|
58
|
+
runtime.logger?.warn('Cache manager not available');
|
|
59
|
+
return {
|
|
60
|
+
text: 'Cache service is not available.',
|
|
61
|
+
values: {},
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Get cache statistics
|
|
66
|
+
let cacheStats;
|
|
67
|
+
try {
|
|
68
|
+
cacheStats = await runtime.cacheManager.get('_stats');
|
|
69
|
+
} catch (error) {
|
|
70
|
+
runtime.logger?.error('Error getting cache stats:', error);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const stats = cacheStats ? JSON.parse(cacheStats) : { hits: 0, misses: 0, size: 0 };
|
|
74
|
+
|
|
75
|
+
return {
|
|
76
|
+
text: `Cache Status:
|
|
77
|
+
- Cache hits: ${stats.hits}
|
|
78
|
+
- Cache misses: ${stats.misses}
|
|
79
|
+
- Cache size: ${stats.size} items
|
|
80
|
+
- Cache is operational and ready for use`,
|
|
81
|
+
values: {
|
|
82
|
+
cacheAvailable: true,
|
|
83
|
+
cacheStats: stats,
|
|
84
|
+
},
|
|
85
|
+
};
|
|
86
|
+
},
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Middleware to add caching functionality to runtime functions
|
|
91
|
+
*/
|
|
92
|
+
export function cacheMiddleware(runtime: ExtendedRuntime) {
|
|
93
|
+
if (!runtime.cacheManager) {
|
|
94
|
+
runtime.logger?.warn('Cache middleware initialized without cache manager');
|
|
95
|
+
return;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
const originalComposeState = runtime.composeState;
|
|
99
|
+
const originalProcessAction = runtime.processActions;
|
|
100
|
+
|
|
101
|
+
// Wrap composeState with caching
|
|
102
|
+
runtime.composeState = async (message: Memory, includeList?: string[], onlyInclude?: boolean) => {
|
|
103
|
+
if (!runtime.cacheManager) {
|
|
104
|
+
return originalComposeState.call(runtime, message, includeList, onlyInclude);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const cacheKey = `state:${message.agentId}:${message.roomId}:${message.entityId}`;
|
|
108
|
+
|
|
109
|
+
try {
|
|
110
|
+
const cached = await runtime.cacheManager.get(cacheKey);
|
|
111
|
+
if (cached) {
|
|
112
|
+
runtime.logger?.debug('Cache hit for state composition');
|
|
113
|
+
const stats = await runtime.cacheManager.get('_stats');
|
|
114
|
+
const currentStats = stats ? JSON.parse(stats) : { hits: 0, misses: 0, size: 0 };
|
|
115
|
+
currentStats.hits++;
|
|
116
|
+
await runtime.cacheManager.set('_stats', JSON.stringify(currentStats));
|
|
117
|
+
return JSON.parse(cached);
|
|
118
|
+
}
|
|
119
|
+
} catch (error) {
|
|
120
|
+
runtime.logger?.error('Error reading from cache:', error);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Cache miss - compute and cache
|
|
124
|
+
const state = await originalComposeState.call(runtime, message, includeList, onlyInclude);
|
|
125
|
+
|
|
126
|
+
try {
|
|
127
|
+
await runtime.cacheManager.set(cacheKey, JSON.stringify(state), 300); // 5 minute TTL
|
|
128
|
+
runtime.logger?.debug('State cached successfully');
|
|
129
|
+
|
|
130
|
+
const stats = await runtime.cacheManager.get('_stats');
|
|
131
|
+
const currentStats = stats ? JSON.parse(stats) : { hits: 0, misses: 0, size: 0 };
|
|
132
|
+
currentStats.misses++;
|
|
133
|
+
currentStats.size++;
|
|
134
|
+
await runtime.cacheManager.set('_stats', JSON.stringify(currentStats));
|
|
135
|
+
} catch (error) {
|
|
136
|
+
runtime.logger?.error('Error writing to cache:', error);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
return state;
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
// Wrap processActions with cache invalidation
|
|
143
|
+
runtime.processActions = async (
|
|
144
|
+
message: Memory,
|
|
145
|
+
responses: Memory[],
|
|
146
|
+
state?: State,
|
|
147
|
+
callback?: any
|
|
148
|
+
) => {
|
|
149
|
+
const result = await originalProcessAction.call(runtime, message, responses, state, callback);
|
|
150
|
+
|
|
151
|
+
if (runtime.cacheManager) {
|
|
152
|
+
// Invalidate relevant caches after processing actions
|
|
153
|
+
const cacheKey = `state:${message.agentId}:${message.roomId}:${message.entityId}`;
|
|
154
|
+
try {
|
|
155
|
+
await runtime.cacheManager.delete(cacheKey);
|
|
156
|
+
runtime.logger?.debug('Cache invalidated after action processing');
|
|
157
|
+
} catch (error) {
|
|
158
|
+
runtime.logger?.error('Error invalidating cache:', error);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return result;
|
|
163
|
+
};
|
|
164
|
+
}
|