@strav/brain 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,42 @@
1
+ export { default, default as BrainManager } from './brain_manager.ts'
2
+
3
+ // Provider
4
+ export { default as BrainProvider } from './brain_provider.ts'
5
+ export { brain, AgentRunner, Thread } from './helpers.ts'
6
+ export { Agent } from './agent.ts'
7
+ export { defineTool, defineToolbox } from './tool.ts'
8
+ export { Workflow } from './workflow.ts'
9
+ export { AnthropicProvider } from './providers/anthropic_provider.ts'
10
+ export { OpenAIProvider } from './providers/openai_provider.ts'
11
+ export { OpenAIResponsesProvider } from './providers/openai_responses_provider.ts'
12
+ export { parseSSE } from './utils/sse_parser.ts'
13
+ export { zodToJsonSchema } from './utils/schema.ts'
14
+ export type {
15
+ AIProvider,
16
+ BrainConfig,
17
+ ProviderConfig,
18
+ CompletionRequest,
19
+ CompletionResponse,
20
+ Message,
21
+ ContentBlock,
22
+ ToolCall,
23
+ ToolDefinition,
24
+ StreamChunk,
25
+ Usage,
26
+ AgentResult,
27
+ ToolCallRecord,
28
+ AgentEvent,
29
+ WorkflowResult,
30
+ EmbeddingResponse,
31
+ JsonSchema,
32
+ SSEEvent,
33
+ BeforeHook,
34
+ AfterHook,
35
+ SerializedThread,
36
+ OutputSchema,
37
+ } from './types.ts'
38
+ export type { ChatOptions, GenerateOptions, GenerateResult, EmbedOptions } from './helpers.ts'
39
+ export type { WorkflowContext } from './workflow.ts'
40
+
41
+ // Memory
42
+ export * from './memory/index.ts'
@@ -0,0 +1,120 @@
1
+ import type { Message } from '../types.ts'
2
+ import type { MemoryConfig, Fact } from './types.ts'
3
+ import { TokenCounter } from './token_counter.ts'
4
+ import { SemanticMemory } from './semantic_memory.ts'
5
+
6
+ export interface BudgetBreakdown {
7
+ /** Total context window tokens. */
8
+ total: number
9
+ /** Reserved for the model's response. */
10
+ response: number
11
+ /** Tokens used by system prompt + injected facts. */
12
+ system: number
13
+ /** Tokens used by the episodic summary. */
14
+ summary: number
15
+ /** Available budget for working messages. */
16
+ working: number
17
+ /** Tokens currently used by working messages. */
18
+ used: number
19
+ /** Remaining headroom before compaction is triggered. */
20
+ remaining: number
21
+ }
22
+
23
+ const DEFAULT_RESPONSE_RESERVE = 0.25
24
+ const DEFAULT_MIN_WORKING_MESSAGES = 4
25
+
26
+ /**
27
+ * Calculates and tracks how the context window budget is allocated
28
+ * across system prompt, episodic summaries, semantic facts, and
29
+ * working messages.
30
+ */
31
+ export class ContextBudget {
32
+ private readonly maxTokens: number
33
+ private readonly responseReserve: number
34
+ private readonly minWorkingMessages: number
35
+
36
+ constructor(config: MemoryConfig, model: string) {
37
+ this.maxTokens = config.maxContextTokens ?? TokenCounter.contextWindow(model)
38
+ this.responseReserve = config.responseReserve ?? DEFAULT_RESPONSE_RESERVE
39
+ this.minWorkingMessages = config.minWorkingMessages ?? DEFAULT_MIN_WORKING_MESSAGES
40
+ }
41
+
42
+ /** Check whether the current context fits within the token budget. */
43
+ fits(system: string | undefined, summary: string, facts: Fact[], messages: Message[]): boolean {
44
+ const bd = this.breakdown(system, summary, facts, messages)
45
+ return bd.remaining >= 0
46
+ }
47
+
48
+ /** Get a detailed breakdown of token usage. */
49
+ breakdown(
50
+ system: string | undefined,
51
+ summary: string,
52
+ facts: Fact[],
53
+ messages: Message[]
54
+ ): BudgetBreakdown {
55
+ const total = this.maxTokens
56
+ const response = Math.ceil(total * this.responseReserve)
57
+
58
+ const systemTokens = TokenCounter.estimate(system ?? '')
59
+ const factsTokens =
60
+ facts.length > 0 ? TokenCounter.estimate(SemanticMemory.formatFacts(facts)) : 0
61
+ const systemTotal = systemTokens + factsTokens
62
+
63
+ const summaryTokens = TokenCounter.estimate(summary)
64
+ const messageTokens = TokenCounter.estimateMessages(messages)
65
+
66
+ const working = total - response - systemTotal - summaryTokens
67
+ const remaining = working - messageTokens
68
+
69
+ return {
70
+ total,
71
+ response,
72
+ system: systemTotal,
73
+ summary: summaryTokens,
74
+ working: Math.max(working, 0),
75
+ used: messageTokens,
76
+ remaining,
77
+ }
78
+ }
79
+
80
+ /**
81
+ * Determine how many messages from the front of the array
82
+ * need to be compacted so the rest fits within budget.
83
+ *
84
+ * Returns 0 if everything already fits.
85
+ * Respects minWorkingMessages — never compacts below that threshold.
86
+ */
87
+ compactionNeeded(
88
+ system: string | undefined,
89
+ summary: string,
90
+ facts: Fact[],
91
+ messages: Message[]
92
+ ): number {
93
+ if (this.fits(system, summary, facts, messages)) return 0
94
+
95
+ const total = this.maxTokens
96
+ const response = Math.ceil(total * this.responseReserve)
97
+ const systemTokens = TokenCounter.estimate(system ?? '')
98
+ const factsTokens =
99
+ facts.length > 0 ? TokenCounter.estimate(SemanticMemory.formatFacts(facts)) : 0
100
+ const summaryTokens = TokenCounter.estimate(summary)
101
+
102
+ const available = total - response - systemTokens - factsTokens - summaryTokens
103
+ const maxCompactable = Math.max(0, messages.length - this.minWorkingMessages)
104
+
105
+ // Accumulate from the back (most recent) until we exceed the budget
106
+ let kept = 0
107
+ let tokensFromBack = 0
108
+ for (let i = messages.length - 1; i >= 0; i--) {
109
+ const msgTokens = TokenCounter.estimateMessages([messages[i]!])
110
+ if (tokensFromBack + msgTokens > available) break
111
+ tokensFromBack += msgTokens
112
+ kept++
113
+ }
114
+
115
+ const toCompact = messages.length - kept
116
+
117
+ // Never compact below minWorkingMessages
118
+ return Math.min(toCompact, maxCompactable)
119
+ }
120
+ }
@@ -0,0 +1,17 @@
1
+ export { TokenCounter } from './token_counter.ts'
2
+ export { ContextBudget } from './context_budget.ts'
3
+ export type { BudgetBreakdown } from './context_budget.ts'
4
+ export { MemoryManager } from './memory_manager.ts'
5
+ export type { PreparedContext } from './memory_manager.ts'
6
+ export { SemanticMemory } from './semantic_memory.ts'
7
+ export { InMemoryThreadStore } from './thread_store.ts'
8
+ export { SlidingWindowStrategy } from './strategies/sliding_window.ts'
9
+ export { SummarizeStrategy } from './strategies/summarize.ts'
10
+ export type {
11
+ MemoryConfig,
12
+ CompactionStrategy,
13
+ CompactionResult,
14
+ ThreadStore,
15
+ SerializedMemoryThread,
16
+ Fact,
17
+ } from './types.ts'
@@ -0,0 +1,168 @@
1
+ import type { Message } from '../types.ts'
2
+ import type { CompactionStrategy, Fact, MemoryConfig } from './types.ts'
3
+ import { ContextBudget } from './context_budget.ts'
4
+ import { SemanticMemory } from './semantic_memory.ts'
5
+ import { SlidingWindowStrategy } from './strategies/sliding_window.ts'
6
+ import { SummarizeStrategy } from './strategies/summarize.ts'
7
+
8
+ const DEFAULT_COMPACTION_BATCH_SIZE = 10
9
+ const DEFAULT_EXTRACT_FACTS = true
10
+
11
+ export interface PreparedContext {
12
+ /** Original system prompt augmented with summary and facts. */
13
+ system: string | undefined
14
+ /** Working messages (trimmed, possibly after compaction). */
15
+ messages: Message[]
16
+ /** Whether compaction occurred during this preparation. */
17
+ compacted: boolean
18
+ }
19
+
20
+ /**
21
+ * Orchestrates the three-tier memory system:
22
+ * - Working memory (recent messages within context budget)
23
+ * - Episodic memory (compacted summaries)
24
+ * - Semantic memory (extracted facts)
25
+ *
26
+ * Instantiated per-Thread, not via DI. Configured through MemoryConfig.
27
+ */
28
+ export class MemoryManager {
29
+ private _strategy: CompactionStrategy
30
+ private _semanticMemory = new SemanticMemory()
31
+ private _summary = ''
32
+ private _compactionBatchSize: number
33
+ private _extractFacts: boolean
34
+
35
+ constructor(
36
+ private config: MemoryConfig,
37
+ private budget: ContextBudget
38
+ ) {
39
+ this._compactionBatchSize = config.compactionBatchSize ?? DEFAULT_COMPACTION_BATCH_SIZE
40
+ this._extractFacts = config.extractFacts ?? DEFAULT_EXTRACT_FACTS
41
+ this._strategy = MemoryManager.createStrategy(config.strategy)
42
+ }
43
+
44
+ /**
45
+ * Prepare context for sending to the LLM.
46
+ *
47
+ * This is the core method called by Thread before every completion request.
48
+ * It checks the token budget, triggers compaction if needed, and builds
49
+ * the final system prompt with summary and facts injected.
50
+ *
51
+ * Important: when compaction occurs, the `messages` array passed in is
52
+ * mutated (oldest messages are spliced out). This keeps Thread's internal
53
+ * state consistent with what was actually sent.
54
+ */
55
+ async prepareContext(
56
+ system: string | undefined,
57
+ messages: Message[],
58
+ options: { provider: string; model: string }
59
+ ): Promise<PreparedContext> {
60
+ const facts = this._semanticMemory.all()
61
+ let compacted = false
62
+
63
+ // Check if compaction is needed
64
+ const needed = this.budget.compactionNeeded(system, this._summary, facts, messages)
65
+
66
+ if (needed > 0) {
67
+ const toCompact = messages.splice(0, needed)
68
+
69
+ const result = await this._strategy.compact(toCompact, {
70
+ provider: options.provider,
71
+ model: options.model,
72
+ existingSummary: this._summary || undefined,
73
+ extractFacts: this._extractFacts,
74
+ })
75
+
76
+ // Update episodic summary
77
+ if (result.summary) {
78
+ this._summary = result.summary
79
+ }
80
+
81
+ // Merge extracted facts
82
+ if (result.facts) {
83
+ for (const fact of result.facts) {
84
+ this._semanticMemory.set(fact.key, fact.value, fact.source, fact.confidence)
85
+ }
86
+ }
87
+
88
+ compacted = true
89
+ }
90
+
91
+ // Build augmented system prompt
92
+ const augmentedSystem = this.buildSystemPrompt(system)
93
+
94
+ return {
95
+ system: augmentedSystem,
96
+ messages: [...messages],
97
+ compacted,
98
+ }
99
+ }
100
+
101
+ /** Access the semantic memory for manual fact management. */
102
+ get facts(): SemanticMemory {
103
+ return this._semanticMemory
104
+ }
105
+
106
+ /** Get the current episodic summary. */
107
+ get episodicSummary(): string {
108
+ return this._summary
109
+ }
110
+
111
+ /** Replace the current compaction strategy. */
112
+ useStrategy(strategy: CompactionStrategy): void {
113
+ this._strategy = strategy
114
+ }
115
+
116
+ /** Serialize the full memory state for persistence. */
117
+ serialize(): { summary: string; facts: Fact[] } {
118
+ return {
119
+ summary: this._summary,
120
+ facts: this._semanticMemory.serialize(),
121
+ }
122
+ }
123
+
124
+ /** Restore memory state from persisted data. */
125
+ restore(data: { summary?: string; facts?: Fact[] }): void {
126
+ if (data.summary !== undefined) {
127
+ this._summary = data.summary
128
+ }
129
+ if (data.facts) {
130
+ this._semanticMemory.restore(data.facts)
131
+ }
132
+ }
133
+
134
+ // ── Private ────────────────────────────────────────────────────────────────
135
+
136
+ /** Build the final system prompt with summary and facts injected. */
137
+ private buildSystemPrompt(original: string | undefined): string | undefined {
138
+ const parts: string[] = []
139
+
140
+ if (original) {
141
+ parts.push(original)
142
+ }
143
+
144
+ if (this._summary) {
145
+ parts.push(
146
+ `<conversation_history_summary>\n${this._summary}\n</conversation_history_summary>`
147
+ )
148
+ }
149
+
150
+ const factsBlock = this._semanticMemory.toPromptBlock()
151
+ if (factsBlock) {
152
+ parts.push(factsBlock)
153
+ }
154
+
155
+ return parts.length > 0 ? parts.join('\n\n') : undefined
156
+ }
157
+
158
+ /** Create a compaction strategy by name. */
159
+ private static createStrategy(name?: string): CompactionStrategy {
160
+ switch (name) {
161
+ case 'sliding_window':
162
+ return new SlidingWindowStrategy()
163
+ case 'summarize':
164
+ default:
165
+ return new SummarizeStrategy()
166
+ }
167
+ }
168
+ }
@@ -0,0 +1,89 @@
1
+ import type { Fact } from './types.ts'
2
+
3
+ /**
4
+ * In-memory structured fact store.
5
+ *
6
+ * Facts are key-value pairs representing stable knowledge about the
7
+ * user and their situation, extracted from conversation or set explicitly.
8
+ * They are injected into the system prompt so the model always has
9
+ * access to critical context regardless of compaction.
10
+ *
11
+ * Platform can persist facts via the ThreadStore's `facts` field.
12
+ */
13
+ export class SemanticMemory {
14
+ private _facts = new Map<string, Fact>()
15
+
16
+ /** Add or update a fact. */
17
+ set(
18
+ key: string,
19
+ value: string,
20
+ source: 'extracted' | 'explicit' = 'explicit',
21
+ confidence: number = 1.0
22
+ ): void {
23
+ const now = new Date().toISOString()
24
+ const existing = this._facts.get(key)
25
+
26
+ this._facts.set(key, {
27
+ key,
28
+ value,
29
+ source,
30
+ confidence,
31
+ createdAt: existing?.createdAt ?? now,
32
+ updatedAt: now,
33
+ })
34
+ }
35
+
36
+ /** Get a specific fact by key. */
37
+ get(key: string): Fact | undefined {
38
+ return this._facts.get(key)
39
+ }
40
+
41
+ /** Get all facts as an array. */
42
+ all(): Fact[] {
43
+ return Array.from(this._facts.values())
44
+ }
45
+
46
+ /** Get the number of stored facts. */
47
+ get size(): number {
48
+ return this._facts.size
49
+ }
50
+
51
+ /** Remove a fact by key. */
52
+ remove(key: string): boolean {
53
+ return this._facts.delete(key)
54
+ }
55
+
56
+ /** Format all facts as a prompt block for injection into the system prompt. */
57
+ toPromptBlock(): string {
58
+ return SemanticMemory.formatFacts(this.all())
59
+ }
60
+
61
+ /**
62
+ * Static formatter — also used by ContextBudget for token estimation
63
+ * without needing a SemanticMemory instance.
64
+ */
65
+ static formatFacts(facts: Fact[]): string {
66
+ if (facts.length === 0) return ''
67
+
68
+ const lines = facts.map(f => `- ${f.key}: ${f.value}`)
69
+ return `<known_facts>\n${lines.join('\n')}\n</known_facts>`
70
+ }
71
+
72
+ /** Serialize facts for persistence. */
73
+ serialize(): Fact[] {
74
+ return this.all()
75
+ }
76
+
77
+ /** Restore facts from persisted data. */
78
+ restore(facts: Fact[]): void {
79
+ this._facts.clear()
80
+ for (const fact of facts) {
81
+ this._facts.set(fact.key, { ...fact })
82
+ }
83
+ }
84
+
85
+ /** Clear all facts. */
86
+ clear(): void {
87
+ this._facts.clear()
88
+ }
89
+ }
@@ -0,0 +1,20 @@
1
+ import type { Message } from '../../types.ts'
2
+ import type { CompactionResult, CompactionStrategy } from '../types.ts'
3
+
4
+ /**
5
+ * Simplest compaction strategy — discards oldest messages
6
+ * without producing a summary. No LLM call required.
7
+ *
8
+ * Use this when you want fast, predictable compaction
9
+ * and don't need continuity from older messages.
10
+ */
11
+ export class SlidingWindowStrategy implements CompactionStrategy {
12
+ readonly name = 'sliding_window'
13
+
14
+ async compact(
15
+ _messages: Message[],
16
+ _options: { provider: string; model: string; existingSummary?: string; extractFacts?: boolean }
17
+ ): Promise<CompactionResult> {
18
+ return { summary: '', facts: [], summaryTokens: 0 }
19
+ }
20
+ }
@@ -0,0 +1,157 @@
1
+ import BrainManager from '../../brain_manager.ts'
2
+ import type { Message } from '../../types.ts'
3
+ import type { CompactionResult, CompactionStrategy, Fact } from '../types.ts'
4
+ import { TokenCounter } from '../token_counter.ts'
5
+
6
+ const SUMMARIZE_SYSTEM = `You are a conversation summarizer. Your job is to produce a concise summary that preserves all information needed for conversation continuity.
7
+
8
+ Preserve:
9
+ - Key decisions and their reasoning
10
+ - Important facts about the user and their situation
11
+ - Open questions or pending action items
12
+ - Context that would be needed to continue the conversation naturally
13
+
14
+ Be concise but thorough. Write in third person past tense.`
15
+
16
+ const SUMMARIZE_PROMPT = `Summarize the following conversation segment. The summary will replace these messages in the conversation context, so it must preserve everything needed for continuity.
17
+
18
+ <messages>
19
+ {{messages}}
20
+ </messages>`
21
+
22
+ const MERGE_PROMPT = `Below is an existing conversation summary followed by a new segment of messages. Produce a single updated summary that merges the existing summary with the new information. Do not simply append — integrate and consolidate.
23
+
24
+ <existing_summary>
25
+ {{existingSummary}}
26
+ </existing_summary>
27
+
28
+ <new_messages>
29
+ {{messages}}
30
+ </new_messages>`
31
+
32
+ const EXTRACT_FACTS_SUFFIX = `
33
+
34
+ After the summary, output a JSON block with extracted facts. Each fact should be a key-value pair representing a stable piece of information about the user or their situation. Only include facts you are confident about.
35
+
36
+ Format:
37
+ <facts>
38
+ [{"key": "fact_key", "value": "fact value", "confidence": 0.9}]
39
+ </facts>`
40
+
41
+ /**
42
+ * Uses the thread's own LLM to produce a natural-language summary
43
+ * of compacted messages. Optionally extracts structured facts.
44
+ *
45
+ * When an existing summary is provided, it merges rather than
46
+ * creating a summary-of-summary chain.
47
+ */
48
+ export class SummarizeStrategy implements CompactionStrategy {
49
+ readonly name = 'summarize'
50
+
51
+ async compact(
52
+ messages: Message[],
53
+ options: { provider: string; model: string; existingSummary?: string; extractFacts?: boolean }
54
+ ): Promise<CompactionResult> {
55
+ const messagesText = SummarizeStrategy.formatMessages(messages)
56
+
57
+ let prompt: string
58
+ if (options.existingSummary) {
59
+ prompt = MERGE_PROMPT.replace('{{existingSummary}}', options.existingSummary).replace(
60
+ '{{messages}}',
61
+ messagesText
62
+ )
63
+ } else {
64
+ prompt = SUMMARIZE_PROMPT.replace('{{messages}}', messagesText)
65
+ }
66
+
67
+ if (options.extractFacts) {
68
+ prompt += EXTRACT_FACTS_SUFFIX
69
+ }
70
+
71
+ const response = await BrainManager.complete(options.provider, {
72
+ model: options.model,
73
+ messages: [{ role: 'user', content: prompt }],
74
+ system: SUMMARIZE_SYSTEM,
75
+ maxTokens: 2048,
76
+ temperature: 0.3,
77
+ })
78
+
79
+ const { summary, facts } = SummarizeStrategy.parseResponse(
80
+ response.content,
81
+ options.extractFacts
82
+ )
83
+
84
+ return {
85
+ summary,
86
+ facts,
87
+ summaryTokens: TokenCounter.estimate(summary),
88
+ }
89
+ }
90
+
91
+ /** Format messages into readable text for the summarization prompt. */
92
+ private static formatMessages(messages: Message[]): string {
93
+ const lines: string[] = []
94
+
95
+ for (const msg of messages) {
96
+ const role = msg.role.charAt(0).toUpperCase() + msg.role.slice(1)
97
+ const content =
98
+ typeof msg.content === 'string'
99
+ ? msg.content
100
+ : msg.content
101
+ .filter(b => b.type === 'text' && b.text)
102
+ .map(b => b.text)
103
+ .join('\n')
104
+
105
+ if (content) {
106
+ lines.push(`${role}: ${content}`)
107
+ }
108
+
109
+ if (msg.toolCalls) {
110
+ for (const call of msg.toolCalls) {
111
+ lines.push(`[Tool call: ${call.name}(${JSON.stringify(call.arguments)})]`)
112
+ }
113
+ }
114
+ }
115
+
116
+ return lines.join('\n\n')
117
+ }
118
+
119
+ /** Parse the LLM response, extracting the summary and optional facts block. */
120
+ private static parseResponse(
121
+ content: string,
122
+ extractFacts?: boolean
123
+ ): { summary: string; facts: Fact[] } {
124
+ if (!extractFacts) {
125
+ return { summary: content.trim(), facts: [] }
126
+ }
127
+
128
+ const factsMatch = content.match(/<facts>\s*([\s\S]*?)\s*<\/facts>/)
129
+ const now = new Date().toISOString()
130
+
131
+ let facts: Fact[] = []
132
+ if (factsMatch?.[1]) {
133
+ try {
134
+ const parsed = JSON.parse(factsMatch[1]) as Array<{
135
+ key: string
136
+ value: string
137
+ confidence?: number
138
+ }>
139
+ facts = parsed.map(f => ({
140
+ key: f.key,
141
+ value: f.value,
142
+ source: 'extracted' as const,
143
+ confidence: f.confidence ?? 0.7,
144
+ createdAt: now,
145
+ updatedAt: now,
146
+ }))
147
+ } catch {
148
+ // If fact parsing fails, continue with just the summary
149
+ }
150
+ }
151
+
152
+ // Remove the facts block from the summary
153
+ const summary = content.replace(/<facts>[\s\S]*?<\/facts>/, '').trim()
154
+
155
+ return { summary, facts }
156
+ }
157
+ }
@@ -0,0 +1,56 @@
1
+ import type { SerializedMemoryThread, ThreadStore } from './types.ts'
2
+
3
+ /**
4
+ * In-memory thread store for development and testing.
5
+ *
6
+ * Platform will provide a DatabaseThreadStore backed by PostgreSQL.
7
+ * This implementation stores everything in a Map — data is lost
8
+ * when the process exits.
9
+ */
10
+ export class InMemoryThreadStore implements ThreadStore {
11
+ private threads = new Map<string, SerializedMemoryThread>()
12
+
13
+ async save(thread: SerializedMemoryThread): Promise<void> {
14
+ this.threads.set(thread.id, {
15
+ ...thread,
16
+ messages: [...thread.messages],
17
+ facts: thread.facts ? [...thread.facts] : undefined,
18
+ })
19
+ }
20
+
21
+ async load(id: string): Promise<SerializedMemoryThread | null> {
22
+ const thread = this.threads.get(id)
23
+ if (!thread) return null
24
+
25
+ return {
26
+ ...thread,
27
+ messages: [...thread.messages],
28
+ facts: thread.facts ? [...thread.facts] : undefined,
29
+ }
30
+ }
31
+
32
+ async delete(id: string): Promise<void> {
33
+ this.threads.delete(id)
34
+ }
35
+
36
+ async list(options?: { limit?: number; offset?: number }): Promise<SerializedMemoryThread[]> {
37
+ const all = Array.from(this.threads.values()).sort((a, b) =>
38
+ b.updatedAt.localeCompare(a.updatedAt)
39
+ )
40
+
41
+ const offset = options?.offset ?? 0
42
+ const limit = options?.limit ?? all.length
43
+
44
+ return all.slice(offset, offset + limit)
45
+ }
46
+
47
+ /** Get the number of stored threads. For testing. */
48
+ get size(): number {
49
+ return this.threads.size
50
+ }
51
+
52
+ /** Clear all stored threads. For testing. */
53
+ clear(): void {
54
+ this.threads.clear()
55
+ }
56
+ }