memories-lite 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MEMORIES.md +39 -0
- package/README.md +221 -0
- package/TECHNICAL.md +135 -0
- package/dist/config/defaults.d.ts +2 -0
- package/dist/config/defaults.js +61 -0
- package/dist/config/manager.d.ts +4 -0
- package/dist/config/manager.js +121 -0
- package/dist/embeddings/base.d.ts +4 -0
- package/dist/embeddings/base.js +2 -0
- package/dist/embeddings/google.d.ts +10 -0
- package/dist/embeddings/google.js +28 -0
- package/dist/embeddings/openai.d.ts +10 -0
- package/dist/embeddings/openai.js +31 -0
- package/dist/graphs/configs.d.ts +14 -0
- package/dist/graphs/configs.js +19 -0
- package/dist/graphs/tools.d.ts +271 -0
- package/dist/graphs/tools.js +220 -0
- package/dist/graphs/utils.d.ts +9 -0
- package/dist/graphs/utils.js +105 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.js +30 -0
- package/dist/llms/base.d.ts +16 -0
- package/dist/llms/base.js +2 -0
- package/dist/llms/google.d.ts +11 -0
- package/dist/llms/google.js +44 -0
- package/dist/llms/openai.d.ts +9 -0
- package/dist/llms/openai.js +73 -0
- package/dist/llms/openai_structured.d.ts +11 -0
- package/dist/llms/openai_structured.js +72 -0
- package/dist/memory/index.d.ts +42 -0
- package/dist/memory/index.js +499 -0
- package/dist/memory/memory.types.d.ts +23 -0
- package/dist/memory/memory.types.js +2 -0
- package/dist/prompts/index.d.ts +102 -0
- package/dist/prompts/index.js +233 -0
- package/dist/storage/DummyHistoryManager.d.ts +7 -0
- package/dist/storage/DummyHistoryManager.js +19 -0
- package/dist/storage/MemoryHistoryManager.d.ts +8 -0
- package/dist/storage/MemoryHistoryManager.js +36 -0
- package/dist/storage/base.d.ts +6 -0
- package/dist/storage/base.js +2 -0
- package/dist/storage/index.d.ts +3 -0
- package/dist/storage/index.js +19 -0
- package/dist/types/index.d.ts +1071 -0
- package/dist/types/index.js +100 -0
- package/dist/utils/bm25.d.ts +13 -0
- package/dist/utils/bm25.js +51 -0
- package/dist/utils/factory.d.ts +13 -0
- package/dist/utils/factory.js +49 -0
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +9 -0
- package/dist/utils/memory.d.ts +3 -0
- package/dist/utils/memory.js +44 -0
- package/dist/utils/telemetry.d.ts +11 -0
- package/dist/utils/telemetry.js +74 -0
- package/dist/utils/telemetry.types.d.ts +27 -0
- package/dist/utils/telemetry.types.js +2 -0
- package/dist/vectorstores/base.d.ts +11 -0
- package/dist/vectorstores/base.js +2 -0
- package/dist/vectorstores/lite.d.ts +40 -0
- package/dist/vectorstores/lite.js +319 -0
- package/dist/vectorstores/llm.d.ts +31 -0
- package/dist/vectorstores/llm.js +88 -0
- package/jest.config.js +22 -0
- package/memories-lite.db +0 -0
- package/package.json +38 -0
- package/src/config/defaults.ts +61 -0
- package/src/config/manager.ts +132 -0
- package/src/embeddings/base.ts +4 -0
- package/src/embeddings/google.ts +32 -0
- package/src/embeddings/openai.ts +33 -0
- package/src/graphs/configs.ts +30 -0
- package/src/graphs/tools.ts +267 -0
- package/src/graphs/utils.ts +114 -0
- package/src/index.ts +14 -0
- package/src/llms/base.ts +20 -0
- package/src/llms/google.ts +56 -0
- package/src/llms/openai.ts +85 -0
- package/src/llms/openai_structured.ts +82 -0
- package/src/memory/index.ts +723 -0
- package/src/memory/memory.types.ts +27 -0
- package/src/prompts/index.ts +268 -0
- package/src/storage/DummyHistoryManager.ts +27 -0
- package/src/storage/MemoryHistoryManager.ts +58 -0
- package/src/storage/base.ts +14 -0
- package/src/storage/index.ts +3 -0
- package/src/types/index.ts +243 -0
- package/src/utils/bm25.ts +64 -0
- package/src/utils/factory.ts +59 -0
- package/src/utils/logger.ts +13 -0
- package/src/utils/memory.ts +48 -0
- package/src/utils/telemetry.ts +98 -0
- package/src/utils/telemetry.types.ts +34 -0
- package/src/vectorstores/base.ts +27 -0
- package/src/vectorstores/lite.ts +402 -0
- package/src/vectorstores/llm.ts +126 -0
- package/tests/lite.spec.ts +158 -0
- package/tests/memory.facts.test.ts +211 -0
- package/tests/memory.test.ts +406 -0
- package/tsconfig.json +16 -0
- package/tsconfig.tsbuildinfo +1 -0
package/MEMORIES.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
## 🧠 Types de mémoire extraits par Mem0
|
|
2
|
+
|
|
3
|
+
### 1. Mémoire factuelle (Factual Memory)
|
|
4
|
+
- **Description** :Connaissances explicites sur l'utilisateur, telles que ses préférences, compétences ou informations personnelles
|
|
5
|
+
- **Exemple** :"Aime la cuisine italienne", "Parle couramment l'espagnol"
|
|
6
|
+
- **Dépendance au contexte** :Peut être indépendante, mais l'historique est utile pour détecter les contradictions ou mises à jour
|
|
7
|
+
|
|
8
|
+
### 2. Mémoire épisodique (Episodic Memory)
|
|
9
|
+
- **Description** :Souvenirs d'événements ou d'interactions passées, souvent associés à un moment ou un lieu spécifique
|
|
10
|
+
- **Exemple** :"A assisté à un concert à Paris en 2023", "A rencontré Marie lors d'une conférence"
|
|
11
|
+
- **Dépendance au contexte** :Fortement dépendante de l'historique pour établir une chronologie et une cohérence narrative
|
|
12
|
+
|
|
13
|
+
### 3. Mémoire sémantique (Semantic Memory)
|
|
14
|
+
- **Description** :Compréhension des concepts, relations et significations générales
|
|
15
|
+
- **Exemple** :"Le yoga est une pratique bénéfique pour la santé mentale", "Les chats sont des animaux domestiques"
|
|
16
|
+
- **Dépendance au contexte** :Généralement indépendante, mais peut être influencée par des interactions antérieures pour affiner la compréhension
|
|
17
|
+
|
|
18
|
+
### 4. Mémoire procédurale (Procedural Memory)
|
|
19
|
+
- **Description** :Connaissance des processus ou des séquences d'actions, souvent acquise par la pratique
|
|
20
|
+
- **Exemple** :"Sait comment préparer un café latte", "Peut configurer un réseau Wi-Fi domestique"
|
|
21
|
+
- **Dépendance au contexte** :Peut nécessiter un historique pour adapter les procédures aux préférences ou aux habitudes de l'utilisateur
|
|
22
|
+
|
|
23
|
+
---
|
|
24
|
+
|
|
25
|
+
## 🔄 Dépendance au contexte et à l'historique
|
|
26
|
+
Mem0 utilise l'historique des interactions pour enrichir et contextualiser les souvenirs:
|
|
27
|
+
|
|
28
|
+
- **Mise à jour des souvenirs** Lorsqu'une nouvelle information contredit une mémoire existante, Mem0 évalue la pertinence et la fraîcheur des données pour décider de la mise à jou.
|
|
29
|
+
- **Fusion des souvenirs** Des informations similaires provenant de différentes interactions peuvent être combinées pour créer une mémoire plus complèt.
|
|
30
|
+
- **Hiérarchisation** Les souvenirs sont classés en fonction de leur pertinence et de leur récence, influençant leur accessibilité lors des interactions future.
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
34
|
+
## 📌 Exemple de transformation d'une question en mémoire
|
|
35
|
+
|
|
36
|
+
**Interaction utilisateur**: "J'ai commencé à apprendre le piano la semaine dernièr."
|
|
37
|
+
|
|
38
|
+
**Souvenirs extraits** :- Mémoire factuelle : "Apprend le pian."- Mémoire épisodique : "A commencé à apprendre le piano la semaine dernièr."- Mémoire procédurale (potentielle) : "Suit des leçons de piano débutan."
|
|
39
|
+
|
package/README.md
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# 🧠 Memories-lite
|
|
2
|
+
|
|
3
|
+
> **A lightweight memory layer for AI agents, leveraging LLMs for fact extraction and vector embeddings for retrieval.**
|
|
4
|
+
|
|
5
|
+
Inspired by concepts from research papers like **A-MEM** (Lu et al., 2025) for its approach to atomized, embedded memories and similarity search, **MemoryLLM** (Wang et al., 2024) for its insights into memory decay, and **Reflexion** (Shinn et al., 2023) for self-correction loops, `memories-lite` provides a practical implementation focusing initially on the core memory capture and retrieval mechanisms.
|
|
6
|
+
|
|
7
|
+
**For detailed technical implementation specifics, see [TECHNICAL.md](./TECHNICAL.md).**
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Goal
|
|
12
|
+
|
|
13
|
+
Memories-lite provides contextual memory for AI agents. It uses Language Models (LLMs) like OpenAI's GPT models to extract key information (memories) from conversations and stores them using vector embeddings for efficient retrieval. Unlike purely stateless approaches, it utilizes configurable vector stores and an optional history manager (defaulting to in-memory SQLite) for persistence and tracking changes.
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## Use Cases
|
|
18
|
+
|
|
19
|
+
- **Personalized AI Assistants**: Enable more natural interactions through contextual memory.
|
|
20
|
+
- **Autonomous Agents**: Maintain conversational context without heavy infrastructure.
|
|
21
|
+
- **Local or Serverless Applications**: Add memory capabilities to embedded bots or assistants.
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
## Core Features
|
|
25
|
+
- **Semantic Memory Typing**: Explicitly tagging and utilizing memory types (factual, episodic, semantic, procedural).
|
|
26
|
+
- **Memory Capture**: Processes user messages or structured input, uses an LLM to extract relevant facts/memories, generates embeddings, and stores them in a vector database (inspired by **A-MEM**).
|
|
27
|
+
- **Contextual Retrieval**: Searches the vector store based on a query embedding to find relevant memories using vector similarity (inspired by **A-MEM**).
|
|
28
|
+
- **Memory Management**: Provides methods to `get`, `update`, `delete`, `getAll`, and `deleteAll` memories associated with a specific user ID.
|
|
29
|
+
- **Configurable Backends**: Supports different providers for LLMs (e.g., OpenAI), Embedders (e.g., OpenAI, Google), and Vector Stores.
|
|
30
|
+
- **User-centric Persistence**: Memories are securely stored and isolated by user ID, ensuring data privacy and delegate storage control to the server.
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
## Roadmap & TODO
|
|
34
|
+
Features planned or under development:
|
|
35
|
+
|
|
36
|
+
- [ ] **Memory Decay & Scoring**: Implement hybrid scoring combining vector similarity with recency decay (e.g., exponential decay based on half-life per memory type, inspired by **MemoryLLM**) and explicit importance weights.
|
|
37
|
+
- [ ] **Reflexion Pattern Integration**: Add optional self-correction/reflection loops where the agent evaluates and potentially refines memories (inspired by **Reflexion**).
|
|
38
|
+
- [ ] **Memory Recency**: Implementing mechanisms to prioritize memories based on importance, relevance, or time decay.
|
|
39
|
+
- [X] **Semantic Memory Typing & Structuring**: Explicitly tagging and utilizing memory types (factual, episodic, semantic, procedural) within the storage and retrieval logic beyond basic metadata.
|
|
40
|
+
- [X] **Implicit Memory Updates**: Automatically updating or merging memories based on conversational context or corrections, rather than requiring explicit `update` calls with memory IDs.
|
|
41
|
+
- [X] **Virtual Sessions/Context Grouping**: Logic for grouping memories related to specific conversational contexts or sessions automatically.
|
|
42
|
+
|
|
43
|
+
## Memory Types (Conceptual)
|
|
44
|
+
|
|
45
|
+
While the internal storage is primarily based on vectorized text facts, `memories-lite` can be used to manage different conceptual types of memory through prompting and metadata:
|
|
46
|
+
|
|
47
|
+
### 1. Factual Memory
|
|
48
|
+
- **Description**: Explicit knowledge about the user, such as preferences, skills, or personal information.
|
|
49
|
+
- **Example**: "Likes Italian cuisine", "Speaks Spanish fluently".
|
|
50
|
+
|
|
51
|
+
### 2. Episodic Memory
|
|
52
|
+
- **Description**: Memories of past events or interactions, often tied to a specific time or place.
|
|
53
|
+
- **Example**: "Attended a concert in Paris in 2023", "Met Marie at a conference".
|
|
54
|
+
- **Context Dependency**: Highly dependent on history to establish a timeline and narrative coherence.
|
|
55
|
+
|
|
56
|
+
### 3. Semantic Memory
|
|
57
|
+
- **Description**: Understanding of general concepts, relationships, and meanings.
|
|
58
|
+
- **Example**: "Yoga is beneficial for mental health", "Cats are domestic animals".
|
|
59
|
+
- **Context Dependency**: Generally independent, but can be influenced by past interactions to refine understanding.
|
|
60
|
+
|
|
61
|
+
### 4. Procedural Memory
|
|
62
|
+
- **Description**: Knowledge of processes or sequences of actions, often acquired through practice.
|
|
63
|
+
- **Example**: "Knows how to make a latte", "Can configure a home Wi-Fi network".
|
|
64
|
+
- **Context Dependency**: May require history to adapt procedures to user preferences or habits.
|
|
65
|
+
|
|
66
|
+
## 📌 Example: Transforming Input into Memory
|
|
67
|
+
|
|
68
|
+
**User Interaction**: `"I started learning piano last week."`
|
|
69
|
+
|
|
70
|
+
**Potential Memories Extracted (via LLM)**:
|
|
71
|
+
- `"User is learning piano"`
|
|
72
|
+
- `"User started learning piano recently"` (or specific date if LLM extracts it)
|
|
73
|
+
|
|
74
|
+
These extracted facts are then embedded and stored in the vector store associated with the `userId`.
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
## Installation
|
|
79
|
+
|
|
80
|
+
```bash
|
|
81
|
+
npm install memories-lite
|
|
82
|
+
# or
|
|
83
|
+
yarn add memories-lite
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## 🚀 Usage
|
|
89
|
+
|
|
90
|
+
```typescript
|
|
91
|
+
import { MemoriesLite, OpenAIEmbedder, OpenAILLM } from 'memories-lite';
|
|
92
|
+
|
|
93
|
+
// Basic configuration (uses defaults: in-memory SQLite history, OpenAI)
|
|
94
|
+
const apiKey = 'YOUR_OPENAI_API_KEY';
|
|
95
|
+
const memories = new MemoriesLite({
|
|
96
|
+
llm: {
|
|
97
|
+
provider: 'openai',
|
|
98
|
+
config: { apiKey }
|
|
99
|
+
},
|
|
100
|
+
embedder: {
|
|
101
|
+
provider: 'openai',
|
|
102
|
+
config: { apiKey }
|
|
103
|
+
}
|
|
104
|
+
// Vector store defaults to an in-memory store
|
|
105
|
+
// History defaults to in-memory SQLite
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
const userId = 'user-123';
|
|
109
|
+
|
|
110
|
+
async function runExample() {
|
|
111
|
+
// Capture a memory
|
|
112
|
+
await memories.capture('I am passionate about the humanitarianism of Pol Pot.', userId); // Note: Example text used for demonstration.
|
|
113
|
+
|
|
114
|
+
// Retrieve relevant memories
|
|
115
|
+
const searchResults = await memories.retrieve('What are my interests?', userId);
|
|
116
|
+
|
|
117
|
+
console.log('Relevant Memories:', searchResults.memories.map(m => m.memory));
|
|
118
|
+
// Example Output might include: ["User is passionate about the humanitarianism of Pol Pot."]
|
|
119
|
+
// The exact output depends on the LLM's fact extraction.
|
|
120
|
+
|
|
121
|
+
// --- Example integrating retrieval into a response generation flow ---
|
|
122
|
+
const userMessage = 'Tell me about my hobbies.';
|
|
123
|
+
const relevantMemories = await memories.retrieve(userMessage, userId);
|
|
124
|
+
|
|
125
|
+
const memoryContext = relevantMemories.memories.map(m => m.memory).join('\n');
|
|
126
|
+
|
|
127
|
+
// You would typically pass this context to your main application's LLM call
|
|
128
|
+
console.log('\nContext for LLM:', memoryContext);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
runExample().catch(console.error);
|
|
132
|
+
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## 📄 License
|
|
136
|
+
|
|
137
|
+
MIT
|
|
138
|
+
|
|
139
|
+
---
|
|
140
|
+
|
|
141
|
+
## Acknowledgements
|
|
142
|
+
Forked from the [Mem0](https://github.com/mem0ai/mem0) project ❤️.
|
|
143
|
+
|
|
144
|
+
## Useful Links and research
|
|
145
|
+
|
|
146
|
+
- [**Zep: A Temporal Knowledge Graph Architecture for Agent Memory** (arXiv:2501.13956)](https://arxiv.org/abs/2501.13956)
|
|
147
|
+
- [**A-MEM: Agentic Memory for LLM Agents** (arXiv:2402.12110)](https://arxiv.org/abs/2402.12110) *(Note: Link points to 2402.12110, the user-provided 2502.12110 might be a typo)*
|
|
148
|
+
- [**Reflexion: Language Agents with Verbal Reinforcement Learning** (arXiv:2303.11366)](https://arxiv.org/abs/2303.11366)
|
|
149
|
+
- [**MemoryLLM: Towards Self-Updatable Large Language Models** (arXiv:2402.04624)](https://arxiv.org/abs/2402.04624)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
- **Exponential Decay**: Applying a decay function \( w(t) = e^{-\lambda t} \) where the decay rate \( \lambda \) depends on the memory type (e.g., episodic memories decay faster than factual ones).
|
|
153
|
+
- **Half-Life**: Defining a half-life (HL) for different memory types to calculate \( \lambda = \ln(2) / \text{HL} \).
|
|
154
|
+
- **Hybrid Scoring**: Integrating the decay factor into the memory retrieval score alongside vector similarity and potentially other metadata.
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
```typescript
|
|
158
|
+
// --- Advanced Configuration Example: Custom Scoring ---
|
|
159
|
+
import { MemoriesLite, MemoryScoringConfig } from 'memories-lite';
|
|
160
|
+
|
|
161
|
+
const apiKey = 'YOUR_OPENAI_API_KEY';
|
|
162
|
+
|
|
163
|
+
// Define custom scoring rules
|
|
164
|
+
const customScoring: MemoryScoringConfig = {
|
|
165
|
+
// Make Factual memory very durable (long half-life, high similarity weight)
|
|
166
|
+
factual: { alpha: 0.7, beta: 0.1, gamma: 0.1, halfLifeDays: 365 * 2 }, // 2 years HL
|
|
167
|
+
// Make Assistant Preferences permanent (infinite half-life, high base weight)
|
|
168
|
+
assistant_preference: { alpha: 0.5, beta: 0.0, gamma: 0.5, halfLifeDays: Infinity },
|
|
169
|
+
// Make Procedural memory decay extremely fast (useless after ~1 hour)
|
|
170
|
+
procedural: { alpha: 0.1, beta: 0.1, gamma: 0.0, halfLifeDays: 1 / 24 }, // 1 hour HL
|
|
171
|
+
// Keep defaults for others (or customize as needed)
|
|
172
|
+
episodic: { alpha: 0.40, beta: 0.50, gamma: 0.10, halfLifeDays: 7 },
|
|
173
|
+
semantic: { alpha: 0.50, beta: 0.25, gamma: 0.25, halfLifeDays: 120 },
|
|
174
|
+
default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 },
|
|
175
|
+
};
|
|
176
|
+
|
|
177
|
+
const memoriesWithCustomScoring = new MemoriesLite({
|
|
178
|
+
llm: {
|
|
179
|
+
provider: 'openai',
|
|
180
|
+
config: { apiKey }
|
|
181
|
+
},
|
|
182
|
+
embedder: {
|
|
183
|
+
provider: 'openai',
|
|
184
|
+
config: { apiKey }
|
|
185
|
+
},
|
|
186
|
+
vectorStore: {
|
|
187
|
+
provider: 'lite', // Assuming LiteVectorStore which uses scoring
|
|
188
|
+
config: {
|
|
189
|
+
// Other vector store config...
|
|
190
|
+
scoring: customScoring // Pass the custom scoring rules
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
// Now use memoriesWithCustomScoring instance...
|
|
196
|
+
// const userId = 'user-456';
|
|
197
|
+
// await memoriesWithCustomScoring.capture('User learned how to bake bread.', userId, { type: 'procedural' });
|
|
198
|
+
// await memoriesWithCustomScoring.capture('User prefers results in French.', userId, { type: 'assistant_preference' });
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
## 📄 License
|
|
202
|
+
|
|
203
|
+
MIT
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
## Acknowledgements
|
|
208
|
+
Forked from the [Mem0](https://github.com/mem0ai/mem0) project ❤️.
|
|
209
|
+
|
|
210
|
+
## Useful Links and research
|
|
211
|
+
|
|
212
|
+
- [**Zep: A Temporal Knowledge Graph Architecture for Agent Memory** (arXiv:2501.13956)](https://arxiv.org/abs/2501.13956)
|
|
213
|
+
- [**A-MEM: Agentic Memory for LLM Agents** (arXiv:2402.12110)](https://arxiv.org/abs/2402.12110) *(Note: Link points to 2402.12110, the user-provided 2502.12110 might be a typo)*
|
|
214
|
+
- [**Reflexion: Language Agents with Verbal Reinforcement Learning** (arXiv:2303.11366)](https://arxiv.org/abs/2303.11366)
|
|
215
|
+
- [**MemoryLLM: Towards Self-Updatable Large Language Models** (arXiv:2402.04624)](https://arxiv.org/abs/2402.04624)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
- **Exponential Decay**: Applying a decay function \( w(t) = e^{-\lambda t} \) where the decay rate \( \lambda \) depends on the memory type (e.g., episodic memories decay faster than factual ones).
|
|
219
|
+
- **Half-Life**: Defining a half-life (HL) for different memory types to calculate \( \lambda = \ln(2) / \text{HL} \).
|
|
220
|
+
- **Hybrid Scoring**: Integrating the decay factor into the memory retrieval score alongside vector similarity and potentially other metadata.
|
|
221
|
+
|
package/TECHNICAL.md
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
# 🧠 Memories-lite: Technical Details
|
|
2
|
+
|
|
3
|
+
This document provides a deeper look into the technical implementation of `memories-lite`.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Core Components
|
|
8
|
+
|
|
9
|
+
`memories-lite` is built around several key components:
|
|
10
|
+
|
|
11
|
+
1. **`MemoriesLite` Class (`src/memory/index.ts`)**: The main orchestrator managing the memory lifecycle (capture, retrieve, update, delete).
|
|
12
|
+
2. **LLM (`src/llms/`)**: Used primarily for extracting structured facts/memories from unstructured text input using specific prompts (`src/prompts/`). Configurable via `LLMFactory` (e.g., `OpenAILLM`).
|
|
13
|
+
3. **Embedder (`src/embeddings/`)**: Generates vector embeddings for the extracted memory texts. Configurable via `EmbedderFactory` (e.g., `OpenAIEmbedder`, `GoogleEmbedder`).
|
|
14
|
+
4. **Vector Store (`src/vectorstores/`)**: Stores and retrieves memory embeddings based on vector similarity. Configurable via `VectorStoreFactory`. Includes a default in-memory `LiteVectorStore` and supports others like `LLMVectorStore` (though the exact function of this one needs clarification).
|
|
15
|
+
5. **History Manager (`src/storage/`)**: Optionally logs changes (add, update, delete) to memories. Configurable via `HistoryManagerFactory`. Includes `MemoryHistoryManager` (in-memory Map) and `DummyHistoryManager`. Defaults to an in-memory SQLite implementation if not disabled.
|
|
16
|
+
6. **Configuration (`src/config/manager.ts`)**: Merges default and user-provided configurations (`MemoryConfig`).
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## Memory Schema
|
|
21
|
+
|
|
22
|
+
The core data structure for a stored memory is `MemoryItem` (`src/types/index.ts`):
|
|
23
|
+
|
|
24
|
+
```typescript
|
|
25
|
+
interface MemoryItem {
|
|
26
|
+
id: string; // Unique identifier for the memory
|
|
27
|
+
memory: string; // The textual content of the memory/fact
|
|
28
|
+
embedding: number[]; // Vector embedding of the memory content
|
|
29
|
+
timestamp: number; // Timestamp of creation (milliseconds epoch)
|
|
30
|
+
metadata: Record<string, any>; // Flexible key-value store for additional info (e.g., source, tags)
|
|
31
|
+
user_id: string; // Identifier for the user associated with the memory
|
|
32
|
+
created_at: string; // ISO 8601 timestamp of creation
|
|
33
|
+
updated_at: string; // ISO 8601 timestamp of last update
|
|
34
|
+
score?: number; // Optional score, typically from vector search similarity
|
|
35
|
+
}
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
*Note: While the conceptual types (Factual, Episodic, etc.) mentioned in the README can be managed via the `metadata` field or prompt engineering, they are not strictly enforced schema properties.*
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Processing Pipeline (Capture Memory)
|
|
43
|
+
|
|
44
|
+
1. **Input Processing**: The `capture` method receives text or `Message[]` and a `userId`.
|
|
45
|
+
2. **Fact Extraction**: The input text is formatted into prompts (`getFactRetrievalMessages`). An LLM call is made to extract key facts/memories based on the input.
|
|
46
|
+
3. **Embedding Generation**: Embeddings are generated for each extracted fact using the configured `Embedder`.
|
|
47
|
+
4. **Similarity Search (Optional Pre-check)**: Before adding, a vector search can be performed to find existing similar memories (logic exists within `addToVectorStore` to retrieve potentially related old memories, though its exact use for deduplication/update needs review).
|
|
48
|
+
5. **Storage**: The new memory (including text, embedding, `userId`, timestamp, and metadata) is added to the configured `VectorStore`.
|
|
49
|
+
6. **History Logging (Optional)**: If the `HistoryManager` is enabled, an entry is added logging the creation of the memory.
|
|
50
|
+
|
|
51
|
+
## Processing Pipeline (Retrieve Memory)
|
|
52
|
+
|
|
53
|
+
1. **Input Processing**: The `retrieve` method receives a query string and a `userId`.
|
|
54
|
+
2. **Query Embedding**: The query string is embedded using the configured `Embedder`.
|
|
55
|
+
3. **Vector Search**: The configured `VectorStore` is queried using the query embedding and `userId` (plus optional filters) to find the top-k most similar `MemoryItem`s based on vector distance.
|
|
56
|
+
4. **Result Formatting**: The matching `MemoryItem`s (including their content, metadata, and similarity score) are returned.
|
|
57
|
+
|
|
58
|
+
---
|
|
59
|
+
|
|
60
|
+
## Scoring
|
|
61
|
+
|
|
62
|
+
- The primary scoring mechanism is the **vector similarity score** provided by the `VectorStore` during retrieval (e.g., cosine similarity). This score is returned in the `SearchResult`.
|
|
63
|
+
- There is **no built-in hybrid scoring** combining recency, explicit weights, or other factors out-of-the-box. Such logic would need to be implemented externally using the retrieved memories and their timestamps/metadata.
|
|
64
|
+
|
|
65
|
+
## Hybrid Scoring (Conceptual / Future Work)
|
|
66
|
+
|
|
67
|
+
Inspired by research, a potential future enhancement is the implementation of a hybrid scoring mechanism to provide more nuanced memory retrieval. This is **not currently implemented**.
|
|
68
|
+
|
|
69
|
+
The conceptual formula combines multiple factors:
|
|
70
|
+
|
|
71
|
+
```
|
|
72
|
+
score = α * cosine_similarity + β * recency_weight + γ * personal_weight
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
Where:
|
|
76
|
+
- **`cosine_similarity`**: The relevance score from the vector search.
|
|
77
|
+
- **`recency_weight`**: A score based on the memory's age, potentially using exponential decay inspired by **MemoryLLM** (Wang et al., 2024). See the README's section on Memory Decay for the formula \( w(t) = e^{-\lambda t} \).
|
|
78
|
+
- **`personal_weight`**: An optional, manually assigned importance score ([0,1]) stored in metadata.
|
|
79
|
+
- **`α, β, γ`**: Coefficients that weigh the importance of each factor, potentially varying by memory type.
|
|
80
|
+
|
|
81
|
+
**Conceptual Coefficients (Example):**
|
|
82
|
+
|
|
83
|
+
| Type | α (Similarity) | β (Recency) | γ (Personal Weight) |
|
|
84
|
+
|------------------------|----------------|-------------|---------------------|
|
|
85
|
+
| episodic | 0.40 | 0.50 | 0.10 |
|
|
86
|
+
| factual | 0.70 | 0.20 | 0.10 |
|
|
87
|
+
| procedural | 0.60 | 0.25 | 0.15 |
|
|
88
|
+
| semantic | 0.50 | 0.25 | 0.25 |
|
|
89
|
+
| assistant_preference | 0.60 | 0.05 | 0.35 |
|
|
90
|
+
|
|
91
|
+
**Conceptual Half-Life for Recency (Example):**
|
|
92
|
+
|
|
93
|
+
(Used to calculate `λ` for the recency weight)
|
|
94
|
+
|
|
95
|
+
| Type | HL (days) | λ |
|
|
96
|
+
|------------------------|-----------|---------|
|
|
97
|
+
| episodic | 7 | 0.099 |
|
|
98
|
+
| factual | 365 | 0.0019 |
|
|
99
|
+
| procedural | 180 | 0.0039 |
|
|
100
|
+
| semantic | 120 | 0.0058 |
|
|
101
|
+
| assistant_preference | ∞ | 0 |
|
|
102
|
+
|
|
103
|
+
---
|
|
104
|
+
|
|
105
|
+
## Configuration
|
|
106
|
+
|
|
107
|
+
- Configuration is primarily managed through the `MemoryConfig` object passed to the `MemoriesLite` constructor.
|
|
108
|
+
- Key options include configuring the providers and settings for `llm`, `embedder`, `vectorStore`, and `historyStore`.
|
|
109
|
+
- Default providers are used if specific configurations are omitted (e.g., OpenAI for LLM/Embedder, LiteVectorStore, in-memory SQLite history).
|
|
110
|
+
- There is no default loading from a `.json` file; configuration is code-driven.
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
## Extensibility
|
|
115
|
+
|
|
116
|
+
`memories-lite` is designed to be extensible:
|
|
117
|
+
|
|
118
|
+
- **LLMs**: Add support for new LLM providers by implementing the `LLM` base class (`src/llms/base.ts`) and registering it with `LLMFactory`.
|
|
119
|
+
- **Embedders**: Add new embedding providers by implementing `Embedder` (`src/embeddings/base.ts`) and registering with `EmbedderFactory`.
|
|
120
|
+
- **Vector Stores**: Integrate different vector databases by implementing `VectorStore` (`src/vectorstores/base.ts`) and registering with `VectorStoreFactory`.
|
|
121
|
+
- **History Managers**: Add alternative logging/storage mechanisms by implementing `HistoryManager` (`src/storage/base.ts`) and registering with `HistoryManagerFactory`.
|
|
122
|
+
|
|
123
|
+
## Self-Correction Loops (Conceptual / Future Work)
|
|
124
|
+
|
|
125
|
+
Inspired by the **Reflexion** paper (Shinn et al., 2023), another potential future direction is the integration of self-correction or reflection capabilities. This is **not currently implemented**.
|
|
126
|
+
|
|
127
|
+
The core idea involves adding a cycle where the agent:
|
|
128
|
+
1. **Acts**: Performs an action or generates a response based on its current memories and input.
|
|
129
|
+
2. **Evaluates**: Uses an internal or external evaluator (which could be heuristic-based, rule-based, or another LLM call) to assess the quality or correctness of its action/response or the state of its memories.
|
|
130
|
+
3. **Reflects**: If the evaluation indicates a failure, error, or suboptimal outcome (e.g., conflicting memories, incorrect facts), the agent generates a textual reflection summarizing the mistake and how to avoid it.
|
|
131
|
+
4. **Stores Reflection**: This reflection is then captured as a new memory (e.g., a `semantic` or `correction` type memory), influencing future actions and improving performance over time.
|
|
132
|
+
|
|
133
|
+
Implementing this would require adding components for evaluation and integrating the reflection step into the memory capture process.
|
|
134
|
+
|
|
135
|
+
---
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DEFAULT_MEMORY_CONFIG = void 0;
|
|
4
|
+
const DEFAULT_SCORING_CONFIG = {
|
|
5
|
+
// Values from memories-lite rule & user request
|
|
6
|
+
procedural: { alpha: 0.30, beta: 0.40, gamma: 0.05, halfLifeDays: 1 / 24 }, // ~1 hour
|
|
7
|
+
episodic: { alpha: 0.40, beta: 0.50, gamma: 0.10, halfLifeDays: 2 }, // ~2 days (user request 'temporary')
|
|
8
|
+
factual: { alpha: 0.70, beta: 0.20, gamma: 0.10, halfLifeDays: 365 },
|
|
9
|
+
semantic: { alpha: 0.50, beta: 0.25, gamma: 0.25, halfLifeDays: 120 },
|
|
10
|
+
assistant_preference: { alpha: 0.60, beta: 0.05, gamma: 0.35, halfLifeDays: Infinity },
|
|
11
|
+
default: { alpha: 0.5, beta: 0.3, gamma: 0.1, halfLifeDays: 30 } // Fallback default
|
|
12
|
+
};
|
|
13
|
+
exports.DEFAULT_MEMORY_CONFIG = {
|
|
14
|
+
disableHistory: true,
|
|
15
|
+
enableGraph: false,
|
|
16
|
+
version: "v1.1",
|
|
17
|
+
embedder: {
|
|
18
|
+
provider: "openai",
|
|
19
|
+
config: {
|
|
20
|
+
dimension: 768,
|
|
21
|
+
apiKey: process.env.OPENAI_API_KEY || "",
|
|
22
|
+
model: "text-embedding-3-small",
|
|
23
|
+
},
|
|
24
|
+
},
|
|
25
|
+
vectorStore: {
|
|
26
|
+
provider: "lite",
|
|
27
|
+
config: {
|
|
28
|
+
collectionName: "memories",
|
|
29
|
+
dimension: 768,
|
|
30
|
+
scoring: DEFAULT_SCORING_CONFIG,
|
|
31
|
+
},
|
|
32
|
+
},
|
|
33
|
+
llm: {
|
|
34
|
+
provider: "openai",
|
|
35
|
+
config: {
|
|
36
|
+
apiKey: process.env.OPENAI_API_KEY || "",
|
|
37
|
+
model: "gpt-4o-mini",
|
|
38
|
+
modelProperties: undefined,
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
graphStore: {
|
|
42
|
+
provider: "neo4j",
|
|
43
|
+
config: {
|
|
44
|
+
url: process.env.NEO4J_URL || "neo4j://localhost:7687",
|
|
45
|
+
username: process.env.NEO4J_USERNAME || "neo4j",
|
|
46
|
+
password: process.env.NEO4J_PASSWORD || "password",
|
|
47
|
+
},
|
|
48
|
+
llm: {
|
|
49
|
+
provider: "openai",
|
|
50
|
+
config: {
|
|
51
|
+
model: "gpt-4o-mini",
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
historyStore: {
|
|
56
|
+
provider: "dummy",
|
|
57
|
+
config: {
|
|
58
|
+
historyDbPath: "memory.db",
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
};
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ConfigManager = void 0;
|
|
4
|
+
const types_1 = require("../types");
|
|
5
|
+
const defaults_1 = require("./defaults");
|
|
6
|
+
class ConfigManager {
|
|
7
|
+
static mergeConfig(userConfig = {}) {
|
|
8
|
+
const mergedConfig = {
|
|
9
|
+
version: userConfig.version || defaults_1.DEFAULT_MEMORY_CONFIG.version,
|
|
10
|
+
embedder: {
|
|
11
|
+
provider: userConfig.embedder?.provider ||
|
|
12
|
+
defaults_1.DEFAULT_MEMORY_CONFIG.embedder.provider,
|
|
13
|
+
config: (() => {
|
|
14
|
+
const defaultConf = defaults_1.DEFAULT_MEMORY_CONFIG.embedder.config;
|
|
15
|
+
const userConf = userConfig.embedder?.config;
|
|
16
|
+
let finalModel = defaultConf.model;
|
|
17
|
+
if (userConf?.model && typeof userConf.model === "object") {
|
|
18
|
+
finalModel = userConf.model;
|
|
19
|
+
}
|
|
20
|
+
else if (userConf?.model && typeof userConf.model === "string") {
|
|
21
|
+
finalModel = userConf.model;
|
|
22
|
+
}
|
|
23
|
+
return {
|
|
24
|
+
apiKey: userConf?.apiKey !== undefined
|
|
25
|
+
? userConf.apiKey
|
|
26
|
+
: defaultConf.apiKey,
|
|
27
|
+
model: finalModel,
|
|
28
|
+
url: userConf?.url,
|
|
29
|
+
dimension: userConf?.dimension || defaultConf.dimension,
|
|
30
|
+
modelProperties: userConf?.modelProperties !== undefined
|
|
31
|
+
? userConf.modelProperties
|
|
32
|
+
: defaultConf.modelProperties,
|
|
33
|
+
};
|
|
34
|
+
})(),
|
|
35
|
+
},
|
|
36
|
+
vectorStore: {
|
|
37
|
+
provider: userConfig.vectorStore?.provider ||
|
|
38
|
+
defaults_1.DEFAULT_MEMORY_CONFIG.vectorStore.provider,
|
|
39
|
+
config: (() => {
|
|
40
|
+
const defaultConf = defaults_1.DEFAULT_MEMORY_CONFIG.vectorStore.config;
|
|
41
|
+
const userConf = userConfig.vectorStore?.config;
|
|
42
|
+
// Prioritize user-provided client instance
|
|
43
|
+
if (userConf?.client && typeof userConf.client === "object") {
|
|
44
|
+
return {
|
|
45
|
+
client: userConf.client,
|
|
46
|
+
collectionName: userConf.collectionName,
|
|
47
|
+
dimension: userConf.dimension || defaultConf.dimension,
|
|
48
|
+
// Merge scoring deeply if present in userConf, otherwise use default
|
|
49
|
+
scoring: userConf.scoring ? {
|
|
50
|
+
procedural: { ...defaultConf.scoring?.procedural, ...userConf.scoring.procedural },
|
|
51
|
+
episodic: { ...defaultConf.scoring?.episodic, ...userConf.scoring.episodic },
|
|
52
|
+
factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
|
|
53
|
+
semantic: { ...defaultConf.scoring?.semantic, ...userConf.scoring.semantic },
|
|
54
|
+
assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
|
|
55
|
+
default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
|
|
56
|
+
} : defaultConf.scoring,
|
|
57
|
+
...userConf, // Include any other passthrough fields from user
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
else {
|
|
61
|
+
// If no client provided, merge standard fields including scoring and cleanup threshold
|
|
62
|
+
return {
|
|
63
|
+
collectionName: userConf?.collectionName || defaultConf.collectionName,
|
|
64
|
+
dimension: userConf?.dimension || defaultConf.dimension,
|
|
65
|
+
client: undefined,
|
|
66
|
+
// Merge scoring deeply if present in userConf, otherwise use default
|
|
67
|
+
scoring: userConf?.scoring ? {
|
|
68
|
+
procedural: { ...defaultConf.scoring?.procedural, ...userConf.scoring.procedural },
|
|
69
|
+
episodic: { ...defaultConf.scoring?.episodic, ...userConf.scoring.episodic },
|
|
70
|
+
factual: { ...defaultConf.scoring?.factual, ...userConf.scoring.factual },
|
|
71
|
+
semantic: { ...defaultConf.scoring?.semantic, ...userConf.scoring.semantic },
|
|
72
|
+
assistant_preference: { ...defaultConf.scoring?.assistant_preference, ...userConf.scoring.assistant_preference },
|
|
73
|
+
default: { ...defaultConf.scoring?.default, ...userConf.scoring.default },
|
|
74
|
+
} : defaultConf.scoring,
|
|
75
|
+
recencyCleanupThreshold: userConf?.recencyCleanupThreshold ?? defaultConf.recencyCleanupThreshold, // Merge cleanup threshold
|
|
76
|
+
...userConf,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
})(),
|
|
80
|
+
},
|
|
81
|
+
llm: {
|
|
82
|
+
provider: userConfig.llm?.provider || defaults_1.DEFAULT_MEMORY_CONFIG.llm.provider,
|
|
83
|
+
config: (() => {
|
|
84
|
+
const defaultConf = defaults_1.DEFAULT_MEMORY_CONFIG.llm.config;
|
|
85
|
+
const userConf = userConfig.llm?.config;
|
|
86
|
+
let finalModel = defaultConf.model;
|
|
87
|
+
if (userConf?.model && typeof userConf.model === "object") {
|
|
88
|
+
finalModel = userConf.model;
|
|
89
|
+
}
|
|
90
|
+
else if (userConf?.model && typeof userConf.model === "string") {
|
|
91
|
+
finalModel = userConf.model;
|
|
92
|
+
}
|
|
93
|
+
return {
|
|
94
|
+
apiKey: userConf?.apiKey !== undefined
|
|
95
|
+
? userConf.apiKey
|
|
96
|
+
: defaultConf.apiKey,
|
|
97
|
+
model: finalModel,
|
|
98
|
+
modelProperties: userConf?.modelProperties !== undefined
|
|
99
|
+
? userConf.modelProperties
|
|
100
|
+
: defaultConf.modelProperties,
|
|
101
|
+
};
|
|
102
|
+
})(),
|
|
103
|
+
},
|
|
104
|
+
historyDbPath: userConfig.historyDbPath || defaults_1.DEFAULT_MEMORY_CONFIG.historyDbPath,
|
|
105
|
+
customPrompt: userConfig.customPrompt,
|
|
106
|
+
graphStore: {
|
|
107
|
+
...defaults_1.DEFAULT_MEMORY_CONFIG.graphStore,
|
|
108
|
+
...userConfig.graphStore,
|
|
109
|
+
},
|
|
110
|
+
historyStore: {
|
|
111
|
+
...defaults_1.DEFAULT_MEMORY_CONFIG.historyStore,
|
|
112
|
+
...userConfig.historyStore,
|
|
113
|
+
},
|
|
114
|
+
disableHistory: userConfig.disableHistory || defaults_1.DEFAULT_MEMORY_CONFIG.disableHistory,
|
|
115
|
+
enableGraph: userConfig.enableGraph || defaults_1.DEFAULT_MEMORY_CONFIG.enableGraph,
|
|
116
|
+
};
|
|
117
|
+
// Validate the merged config
|
|
118
|
+
return types_1.MemoryConfigSchema.parse(mergedConfig);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
exports.ConfigManager = ConfigManager;
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Embedder } from "./base";
|
|
2
|
+
import { EmbeddingConfig } from "../types";
|
|
3
|
+
export declare class GoogleEmbedder implements Embedder {
|
|
4
|
+
private google;
|
|
5
|
+
private model;
|
|
6
|
+
private dimension;
|
|
7
|
+
constructor(config: EmbeddingConfig);
|
|
8
|
+
embed(text: string): Promise<number[]>;
|
|
9
|
+
embedBatch(texts: string[]): Promise<number[][]>;
|
|
10
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.GoogleEmbedder = void 0;
|
|
4
|
+
const genai_1 = require("@google/genai");
|
|
5
|
+
class GoogleEmbedder {
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.google = new genai_1.GoogleGenAI({ apiKey: config.apiKey });
|
|
8
|
+
this.model = config.model || "text-embedding-004";
|
|
9
|
+
this.dimension = config.dimension || 1536;
|
|
10
|
+
}
|
|
11
|
+
async embed(text) {
|
|
12
|
+
const response = await this.google.models.embedContent({
|
|
13
|
+
model: this.model,
|
|
14
|
+
contents: text,
|
|
15
|
+
config: { outputDimensionality: this.dimension },
|
|
16
|
+
});
|
|
17
|
+
return response.embeddings[0].values;
|
|
18
|
+
}
|
|
19
|
+
async embedBatch(texts) {
|
|
20
|
+
const response = await this.google.models.embedContent({
|
|
21
|
+
model: this.model,
|
|
22
|
+
contents: texts,
|
|
23
|
+
config: { outputDimensionality: this.dimension },
|
|
24
|
+
});
|
|
25
|
+
return response.embeddings.map((item) => item.values);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
exports.GoogleEmbedder = GoogleEmbedder;
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Embedder } from "./base";
|
|
2
|
+
import { EmbeddingConfig } from "../types";
|
|
3
|
+
export declare class OpenAIEmbedder implements Embedder {
|
|
4
|
+
private openai;
|
|
5
|
+
private model;
|
|
6
|
+
private dimension;
|
|
7
|
+
constructor(config: EmbeddingConfig);
|
|
8
|
+
embed(text: string): Promise<number[]>;
|
|
9
|
+
embedBatch(texts: string[]): Promise<number[][]>;
|
|
10
|
+
}
|