agentic-flow 1.8.5 → 1.8.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/reasoningbank/config/reasoningbank-types.ts +57 -0
- package/dist/reasoningbank/config/reasoningbank.yaml +145 -0
- package/dist/reasoningbank/utils/embeddings.js +76 -51
- package/package.json +3 -2
- package/wasm/reasoningbank/reasoningbank_wasm_bg.js +2 -2
- package/wasm/reasoningbank/reasoningbank_wasm_bg.wasm +0 -0
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TypeScript configuration types for ReasoningBank
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export interface ReasoningBankConfig {
|
|
6
|
+
retrieve: {
|
|
7
|
+
k: number;
|
|
8
|
+
alpha: number;
|
|
9
|
+
beta: number;
|
|
10
|
+
gamma: number;
|
|
11
|
+
delta: number;
|
|
12
|
+
recency_half_life_days: number;
|
|
13
|
+
min_score: number;
|
|
14
|
+
};
|
|
15
|
+
judge: {
|
|
16
|
+
model: string;
|
|
17
|
+
max_tokens: number;
|
|
18
|
+
confidence_threshold: number;
|
|
19
|
+
};
|
|
20
|
+
distill: {
|
|
21
|
+
model: string;
|
|
22
|
+
max_tokens: number;
|
|
23
|
+
temperature: number;
|
|
24
|
+
};
|
|
25
|
+
consolidate: {
|
|
26
|
+
duplicate_threshold: number;
|
|
27
|
+
contradiction_threshold: number;
|
|
28
|
+
trigger_threshold: number;
|
|
29
|
+
prune_age_days: number;
|
|
30
|
+
prune_min_confidence: number;
|
|
31
|
+
min_confidence_keep: number;
|
|
32
|
+
};
|
|
33
|
+
matts: {
|
|
34
|
+
parallel_k: number;
|
|
35
|
+
sequential_k: number;
|
|
36
|
+
sequential_r: number;
|
|
37
|
+
sequential_stop_on_success: boolean;
|
|
38
|
+
confidence_boost: number;
|
|
39
|
+
};
|
|
40
|
+
embeddings: {
|
|
41
|
+
provider: 'claude' | 'openai';
|
|
42
|
+
model: string;
|
|
43
|
+
dims: number;
|
|
44
|
+
dimensions: number;
|
|
45
|
+
cache_ttl_seconds: number;
|
|
46
|
+
};
|
|
47
|
+
governance: {
|
|
48
|
+
scrub_pii: boolean;
|
|
49
|
+
pii_scrubber: boolean;
|
|
50
|
+
tenant_scoped: boolean;
|
|
51
|
+
};
|
|
52
|
+
features?: {
|
|
53
|
+
enable_pre_task_hook?: boolean;
|
|
54
|
+
enable_post_task_hook?: boolean;
|
|
55
|
+
enable_matts_parallel?: boolean;
|
|
56
|
+
};
|
|
57
|
+
}
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
reasoningbank:
|
|
2
|
+
version: "1.0.0"
|
|
3
|
+
enabled: true
|
|
4
|
+
|
|
5
|
+
# ============================================================================
|
|
6
|
+
# Retrieval Configuration (Algorithm 1)
|
|
7
|
+
# ============================================================================
|
|
8
|
+
retrieve:
|
|
9
|
+
k: 3 # Top-k memories to inject into system prompt
|
|
10
|
+
alpha: 0.65 # Weight: semantic similarity (cosine)
|
|
11
|
+
beta: 0.15 # Weight: recency (exponential decay)
|
|
12
|
+
gamma: 0.20 # Weight: reliability (confidence * usage)
|
|
13
|
+
delta: 0.10 # Weight: diversity penalty (MMR)
|
|
14
|
+
recency_half_life_days: 45 # Exponential decay half-life for age
|
|
15
|
+
duplicate_threshold: 0.87 # Cosine similarity threshold for deduplication
|
|
16
|
+
min_score: 0.3 # Don't inject memories below this score
|
|
17
|
+
max_age_days: 365 # Ignore memories older than this
|
|
18
|
+
|
|
19
|
+
# ============================================================================
|
|
20
|
+
# Embedding Configuration
|
|
21
|
+
# ============================================================================
|
|
22
|
+
embeddings:
|
|
23
|
+
provider: "local" # "claude" | "openai" | "huggingface" | "local"
|
|
24
|
+
model: "Xenova/all-MiniLM-L6-v2" # local transformers.js model
|
|
25
|
+
# model: "claude-sonnet-4-5-20250929" # for Claude provider
|
|
26
|
+
# model: "text-embedding-3-large" # for OpenAI provider
|
|
27
|
+
dimensions: 384 # vector dimensions (local: 384, OpenAI: 1536/3072)
|
|
28
|
+
cache_ttl_seconds: 3600 # cache embeddings for 1 hour
|
|
29
|
+
batch_size: 16 # batch embeddings for efficiency
|
|
30
|
+
|
|
31
|
+
# ============================================================================
|
|
32
|
+
# Judge Configuration (Algorithm 2)
|
|
33
|
+
# ============================================================================
|
|
34
|
+
judge:
|
|
35
|
+
model: "claude-sonnet-4-5-20250929"
|
|
36
|
+
temperature: 0 # deterministic evaluation
|
|
37
|
+
max_tokens: 512
|
|
38
|
+
timeout_ms: 10000
|
|
39
|
+
cache_verdicts: true # cache judgments by trajectory hash
|
|
40
|
+
retry_on_parse_error: true
|
|
41
|
+
fallback_label: "Failure" # conservative fallback on judge error
|
|
42
|
+
fallback_confidence: 0.5
|
|
43
|
+
|
|
44
|
+
# ============================================================================
|
|
45
|
+
# Distillation Configuration (Algorithm 3)
|
|
46
|
+
# ============================================================================
|
|
47
|
+
distill:
|
|
48
|
+
max_items_per_trajectory: 3 # extract up to N memories per trajectory
|
|
49
|
+
success_confidence_prior: 0.75 # initial confidence for success-derived memories
|
|
50
|
+
failure_confidence_prior: 0.60 # initial confidence for failure-derived guardrails
|
|
51
|
+
redact_pii: true # scrub PII before storing
|
|
52
|
+
redact_patterns:
|
|
53
|
+
- '\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b' # emails
|
|
54
|
+
- '\b(?:\d{3}-\d{2}-\d{4}|\d{9})\b' # SSN
|
|
55
|
+
- '\b(?:sk-[a-zA-Z0-9]{48}|ghp_[a-zA-Z0-9]{36})\b' # API keys (Anthropic, GitHub)
|
|
56
|
+
- '\b(?:xoxb-[a-zA-Z0-9\-]+)\b' # Slack tokens
|
|
57
|
+
- '\b(?:\d{13,19})\b' # Credit card numbers
|
|
58
|
+
min_content_length: 20 # reject memories with content too short
|
|
59
|
+
max_content_length: 2000 # truncate if too long
|
|
60
|
+
|
|
61
|
+
# ============================================================================
|
|
62
|
+
# Consolidation Configuration (Algorithm 4)
|
|
63
|
+
# ============================================================================
|
|
64
|
+
consolidate:
|
|
65
|
+
enabled: true
|
|
66
|
+
run_every_new_items: 20 # consolidate after N new memories
|
|
67
|
+
contradiction_threshold: 0.60 # NLI probability threshold for contradictions
|
|
68
|
+
prune_age_days: 180 # hard delete if unused and old
|
|
69
|
+
min_confidence_keep: 0.30 # prune if confidence drops below this
|
|
70
|
+
max_contradictions_allowed: 5 # quarantine if contradicts > N high-usage items
|
|
71
|
+
dedup_similarity_threshold: 0.87 # merge if cosine > this
|
|
72
|
+
merge_strategy: "keep_highest_usage" # "keep_highest_usage" | "keep_most_recent"
|
|
73
|
+
|
|
74
|
+
# ============================================================================
|
|
75
|
+
# MaTTS Configuration (Algorithm 5)
|
|
76
|
+
# ============================================================================
|
|
77
|
+
matts:
|
|
78
|
+
enabled: true
|
|
79
|
+
|
|
80
|
+
# Parallel mode: k independent rollouts with self-contrast aggregation
|
|
81
|
+
parallel:
|
|
82
|
+
k: 6 # number of parallel rollouts
|
|
83
|
+
diversity_temperature: 0.9 # sampling temperature for diversity
|
|
84
|
+
max_concurrent: 3 # concurrent executions (rate limiting)
|
|
85
|
+
aggregation_model: "claude-sonnet-4-5-20250929"
|
|
86
|
+
aggregation_max_tokens: 2048
|
|
87
|
+
|
|
88
|
+
# Sequential mode: r iterative refinements with check-and-correct
|
|
89
|
+
sequential:
|
|
90
|
+
r: 3 # number of refinement iterations
|
|
91
|
+
check_instruction: "Review your previous attempt. Identify errors or missing steps. Correct and continue."
|
|
92
|
+
max_iterations: 5 # hard cap to prevent infinite loops
|
|
93
|
+
stop_on_success: true # stop if judge labels Success before r iterations
|
|
94
|
+
|
|
95
|
+
# ============================================================================
|
|
96
|
+
# Governance and Compliance
|
|
97
|
+
# ============================================================================
|
|
98
|
+
governance:
|
|
99
|
+
pii_scrubber: true # enable PII redaction
|
|
100
|
+
tenant_scoped: false # set true for multi-tenant deployments
|
|
101
|
+
audit_trail: true # log all memory operations to events table
|
|
102
|
+
max_memory_age_days: 365 # absolute max age before forced deletion
|
|
103
|
+
require_approval_for_high_impact: false # flag high-impact memories for review
|
|
104
|
+
|
|
105
|
+
# ============================================================================
|
|
106
|
+
# Performance and Observability
|
|
107
|
+
# ============================================================================
|
|
108
|
+
performance:
|
|
109
|
+
log_metrics: true # write to performance_metrics table
|
|
110
|
+
export_csv_interval_days: 7 # export metrics CSV weekly
|
|
111
|
+
alert_on_degradation: true # alert if success rate drops
|
|
112
|
+
success_rate_threshold: 0.70 # baseline threshold for alerting
|
|
113
|
+
max_retrieve_latency_ms: 500 # alert if retrieval takes longer
|
|
114
|
+
max_judge_latency_ms: 5000 # alert if judge takes longer
|
|
115
|
+
|
|
116
|
+
# ============================================================================
|
|
117
|
+
# Learning Rate and Confidence Updates
|
|
118
|
+
# ============================================================================
|
|
119
|
+
learning:
|
|
120
|
+
eta: 0.05 # learning rate for confidence updates
|
|
121
|
+
success_boost: 1.0 # confidence += eta * success_boost when used in success
|
|
122
|
+
failure_penalty: -0.5 # confidence += eta * failure_penalty when used in failure
|
|
123
|
+
usage_boost_sigmoid: true # apply sigmoid to usage_count for reliability score
|
|
124
|
+
|
|
125
|
+
# ============================================================================
|
|
126
|
+
# Feature Flags
|
|
127
|
+
# ============================================================================
|
|
128
|
+
features:
|
|
129
|
+
enable_pre_task_hook: true # retrieve and inject memories before task
|
|
130
|
+
enable_post_task_hook: true # judge, distill, consolidate after task
|
|
131
|
+
enable_matts_parallel: true # allow parallel MaTTS
|
|
132
|
+
enable_matts_sequential: true # allow sequential MaTTS
|
|
133
|
+
enable_contradiction_detection: true
|
|
134
|
+
enable_auto_pruning: true
|
|
135
|
+
enable_memory_merging: true
|
|
136
|
+
|
|
137
|
+
# ============================================================================
|
|
138
|
+
# Development and Debugging
|
|
139
|
+
# ============================================================================
|
|
140
|
+
debug:
|
|
141
|
+
verbose_logging: false
|
|
142
|
+
save_trajectories: true # persist all trajectories to task_trajectories
|
|
143
|
+
save_embeddings: true # persist embeddings for inspection
|
|
144
|
+
log_retrieval_scores: false # log detailed scoring breakdown
|
|
145
|
+
dry_run: false # don't actually upsert memories (testing only)
|
|
@@ -1,82 +1,107 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Embedding generation for semantic similarity
|
|
3
|
-
*
|
|
3
|
+
* Uses local transformers.js - no API key required!
|
|
4
4
|
*/
|
|
5
|
+
import { pipeline } from '@xenova/transformers';
|
|
5
6
|
import { loadConfig } from './config.js';
|
|
7
|
+
let embeddingPipeline = null;
|
|
8
|
+
let isInitializing = false;
|
|
6
9
|
const embeddingCache = new Map();
|
|
7
10
|
/**
|
|
8
|
-
*
|
|
9
|
-
|
|
11
|
+
* Initialize the embedding pipeline (lazy load)
|
|
12
|
+
*/
|
|
13
|
+
async function initializeEmbeddings() {
|
|
14
|
+
if (embeddingPipeline)
|
|
15
|
+
return;
|
|
16
|
+
if (isInitializing) {
|
|
17
|
+
// Wait for initialization to complete
|
|
18
|
+
while (isInitializing) {
|
|
19
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
20
|
+
}
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
isInitializing = true;
|
|
24
|
+
console.log('[Embeddings] Initializing local embedding model (Xenova/all-MiniLM-L6-v2)...');
|
|
25
|
+
console.log('[Embeddings] First run will download ~23MB model...');
|
|
26
|
+
try {
|
|
27
|
+
embeddingPipeline = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2', { quantized: true } // Smaller, faster
|
|
28
|
+
);
|
|
29
|
+
console.log('[Embeddings] Local model ready! (384 dimensions)');
|
|
30
|
+
}
|
|
31
|
+
catch (error) {
|
|
32
|
+
console.error('[Embeddings] Failed to initialize:', error?.message || error);
|
|
33
|
+
console.warn('[Embeddings] Falling back to hash-based embeddings');
|
|
34
|
+
}
|
|
35
|
+
finally {
|
|
36
|
+
isInitializing = false;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Compute embedding for text using local model
|
|
10
41
|
*/
|
|
11
42
|
export async function computeEmbedding(text) {
|
|
12
43
|
const config = loadConfig();
|
|
13
44
|
// Check cache
|
|
14
|
-
const cacheKey =
|
|
45
|
+
const cacheKey = `local:${text}`;
|
|
15
46
|
if (embeddingCache.has(cacheKey)) {
|
|
16
47
|
return embeddingCache.get(cacheKey);
|
|
17
48
|
}
|
|
18
49
|
let embedding;
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
50
|
+
// Initialize if needed
|
|
51
|
+
await initializeEmbeddings();
|
|
52
|
+
if (embeddingPipeline) {
|
|
53
|
+
try {
|
|
54
|
+
// Use transformers.js for real embeddings
|
|
55
|
+
const output = await embeddingPipeline(text, {
|
|
56
|
+
pooling: 'mean',
|
|
57
|
+
normalize: true
|
|
58
|
+
});
|
|
59
|
+
embedding = new Float32Array(output.data);
|
|
60
|
+
}
|
|
61
|
+
catch (error) {
|
|
62
|
+
console.error('[Embeddings] Generation failed:', error?.message || error);
|
|
63
|
+
embedding = hashEmbed(text, 384); // Fallback
|
|
64
|
+
}
|
|
25
65
|
}
|
|
26
66
|
else {
|
|
27
|
-
// Fallback to
|
|
28
|
-
|
|
67
|
+
// Fallback to hash-based embeddings
|
|
68
|
+
const dims = config?.embeddings?.dimensions || 384;
|
|
69
|
+
embedding = hashEmbed(text, dims);
|
|
70
|
+
}
|
|
71
|
+
// Cache with LRU (limit 1000 entries)
|
|
72
|
+
if (embeddingCache.size > 1000) {
|
|
73
|
+
const firstKey = embeddingCache.keys().next().value;
|
|
74
|
+
if (firstKey) {
|
|
75
|
+
embeddingCache.delete(firstKey);
|
|
76
|
+
}
|
|
29
77
|
}
|
|
30
|
-
// Cache with TTL
|
|
31
78
|
embeddingCache.set(cacheKey, embedding);
|
|
32
|
-
|
|
79
|
+
// Set TTL for cache entry
|
|
80
|
+
const ttl = config?.embeddings?.cache_ttl_seconds || 3600;
|
|
81
|
+
setTimeout(() => embeddingCache.delete(cacheKey), ttl * 1000);
|
|
33
82
|
return embedding;
|
|
34
83
|
}
|
|
35
84
|
/**
|
|
36
|
-
*
|
|
85
|
+
* Batch compute embeddings (more efficient)
|
|
37
86
|
*/
|
|
38
|
-
async function
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
const response = await fetch('https://api.openai.com/v1/embeddings', {
|
|
47
|
-
method: 'POST',
|
|
48
|
-
headers: {
|
|
49
|
-
'Authorization': `Bearer ${apiKey}`,
|
|
50
|
-
'Content-Type': 'application/json'
|
|
51
|
-
},
|
|
52
|
-
body: JSON.stringify({
|
|
53
|
-
model: model || 'text-embedding-3-small',
|
|
54
|
-
input: text
|
|
55
|
-
})
|
|
56
|
-
});
|
|
57
|
-
if (!response.ok) {
|
|
58
|
-
throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
|
|
59
|
-
}
|
|
60
|
-
const json = await response.json();
|
|
61
|
-
return new Float32Array(json.data[0].embedding);
|
|
62
|
-
}
|
|
63
|
-
catch (error) {
|
|
64
|
-
console.error('[ERROR] OpenAI embedding failed:', error);
|
|
65
|
-
console.warn('[WARN] Falling back to hash embeddings');
|
|
66
|
-
const config = loadConfig();
|
|
67
|
-
return hashEmbed(text, config.embeddings.dimensions);
|
|
68
|
-
}
|
|
87
|
+
export async function computeEmbeddingBatch(texts) {
|
|
88
|
+
return Promise.all(texts.map(text => computeEmbedding(text)));
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Get embedding dimensions
|
|
92
|
+
*/
|
|
93
|
+
export function getEmbeddingDimensions() {
|
|
94
|
+
return 384; // all-MiniLM-L6-v2 uses 384 dimensions
|
|
69
95
|
}
|
|
70
96
|
/**
|
|
71
|
-
* Deterministic hash-based embedding
|
|
72
|
-
* For testing and when API keys are unavailable
|
|
97
|
+
* Deterministic hash-based embedding (fallback)
|
|
73
98
|
*/
|
|
74
99
|
function hashEmbed(text, dims) {
|
|
75
100
|
const hash = simpleHash(text);
|
|
76
101
|
const vec = new Float32Array(dims);
|
|
77
102
|
// Generate deterministic pseudo-random vector from hash
|
|
78
103
|
for (let i = 0; i < dims; i++) {
|
|
79
|
-
vec[i] = Math.sin(hash * (i + 1) * 0.01)
|
|
104
|
+
vec[i] = Math.sin(hash * (i + 1) * 0.01) + Math.cos(hash * i * 0.02);
|
|
80
105
|
}
|
|
81
106
|
return normalize(vec);
|
|
82
107
|
}
|
|
@@ -87,7 +112,7 @@ function simpleHash(str) {
|
|
|
87
112
|
let hash = 0;
|
|
88
113
|
for (let i = 0; i < str.length; i++) {
|
|
89
114
|
hash = ((hash << 5) - hash) + str.charCodeAt(i);
|
|
90
|
-
hash |= 0;
|
|
115
|
+
hash |= 0;
|
|
91
116
|
}
|
|
92
117
|
return Math.abs(hash);
|
|
93
118
|
}
|
|
@@ -108,7 +133,7 @@ function normalize(vec) {
|
|
|
108
133
|
return vec;
|
|
109
134
|
}
|
|
110
135
|
/**
|
|
111
|
-
* Clear embedding cache
|
|
136
|
+
* Clear embedding cache
|
|
112
137
|
*/
|
|
113
138
|
export function clearEmbeddingCache() {
|
|
114
139
|
embeddingCache.clear();
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "agentic-flow",
|
|
3
|
-
"version": "1.8.
|
|
3
|
+
"version": "1.8.6",
|
|
4
4
|
"description": "Production-ready AI agent orchestration platform with 66 specialized agents, 213 MCP tools, ReasoningBank learning memory, and autonomous multi-agent swarms. Built by @ruvnet with Claude Agent SDK, neural networks, memory persistence, GitHub integration, and distributed consensus protocols.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -24,7 +24,7 @@
|
|
|
24
24
|
"scripts": {
|
|
25
25
|
"postinstall": "node scripts/postinstall.js || true",
|
|
26
26
|
"start": "node --enable-source-maps dist/index.js",
|
|
27
|
-
"build": "npm run build:wasm && tsc -p config/tsconfig.json --skipLibCheck || true && cp -r src/reasoningbank/prompts dist/reasoningbank/",
|
|
27
|
+
"build": "npm run build:wasm && tsc -p config/tsconfig.json --skipLibCheck || true && cp -r src/reasoningbank/prompts dist/reasoningbank/ && cp -r src/reasoningbank/config dist/reasoningbank/",
|
|
28
28
|
"build:wasm": "cd ../reasoningbank && wasm-pack build --target bundler --out-dir pkg/bundler crates/reasoningbank-wasm && wasm-pack build --target web --out-dir pkg/web crates/reasoningbank-wasm && mkdir -p ../agentic-flow/wasm/reasoningbank && cp -r crates/reasoningbank-wasm/pkg/bundler/* ../agentic-flow/wasm/reasoningbank/ && cp -r crates/reasoningbank-wasm/pkg/web ../agentic-flow/wasm/reasoningbank/",
|
|
29
29
|
"build:wasm:clean": "rm -rf ../reasoningbank/crates/reasoningbank-wasm/pkg && rm -rf wasm/reasoningbank",
|
|
30
30
|
"dev": "tsx src/index.ts",
|
|
@@ -143,6 +143,7 @@
|
|
|
143
143
|
"@anthropic-ai/claude-agent-sdk": "^0.1.5",
|
|
144
144
|
"@anthropic-ai/sdk": "^0.65.0",
|
|
145
145
|
"@google/genai": "^1.22.0",
|
|
146
|
+
"@xenova/transformers": "^2.17.2",
|
|
146
147
|
"agentdb": "^1.4.3",
|
|
147
148
|
"axios": "^1.12.2",
|
|
148
149
|
"better-sqlite3": "^11.10.0",
|
|
@@ -258,7 +258,7 @@ export function log(message) {
|
|
|
258
258
|
wasm.log(ptr0, len0);
|
|
259
259
|
}
|
|
260
260
|
|
|
261
|
-
function
|
|
261
|
+
function __wbg_adapter_4(arg0, arg1, arg2) {
|
|
262
262
|
wasm.__wbindgen_export_5(arg0, arg1, addHeapObject(arg2));
|
|
263
263
|
}
|
|
264
264
|
|
|
@@ -540,7 +540,7 @@ export function __wbindgen_cast_2241b6af4c4b2941(arg0, arg1) {
|
|
|
540
540
|
|
|
541
541
|
export function __wbindgen_cast_8eb6fd44e7238d11(arg0, arg1) {
|
|
542
542
|
// Cast intrinsic for `Closure(Closure { dtor_idx: 62, function: Function { arguments: [Externref], shim_idx: 63, ret: Unit, inner_ret: Some(Unit) }, mutable: true }) -> Externref`.
|
|
543
|
-
const ret = makeMutClosure(arg0, arg1, 62,
|
|
543
|
+
const ret = makeMutClosure(arg0, arg1, 62, __wbg_adapter_4);
|
|
544
544
|
return addHeapObject(ret);
|
|
545
545
|
};
|
|
546
546
|
|
|
Binary file
|