@blockrun/franklin 3.15.67 → 3.15.69
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/context.js +1 -0
- package/dist/agent/llm.js +30 -0
- package/dist/agent/loop.d.ts +1 -0
- package/dist/agent/loop.js +110 -3
- package/dist/commands/migrate.js +202 -21
- package/dist/plugins/registry.d.ts +5 -3
- package/dist/plugins/registry.js +8 -5
- package/dist/session/storage.d.ts +9 -0
- package/dist/session/storage.js +17 -5
- package/package.json +1 -1
package/dist/agent/context.js
CHANGED
|
@@ -315,6 +315,7 @@ function getToolPatternsSection() {
|
|
|
315
315
|
- **Making changes**: Read the file → Edit with targeted replacement → verify the edit worked (Read again or run tests). Never Edit without Reading first.
|
|
316
316
|
- **Running commands**: Use Bash for shell operations that have no dedicated tool. Chain commands with && when sequential. Use separate Bash calls when you need to inspect intermediate output.
|
|
317
317
|
- **Research**: WebSearch for discovery → WebFetch for specific URLs from search results. Don't WebFetch URLs you invented.
|
|
318
|
+
- **Comparing products / services / APIs** (e.g. "X vs Y, which is better"): start with **WebSearch / ExaSearch / WebFetch** on each vendor's docs/pricing pages. Do NOT \`curl\` the live API as a first move — third-party APIs sit behind WAFs that 401/403/"fault filter abort" on probes, and burning 10+ Bash calls cycling through auth schemes is pure waste. Only hit the live API after public docs have been read AND the user explicitly asked for a hands-on test.
|
|
318
319
|
- **Complex tasks**: Use Agent to spawn sub-agents for 2+ independent research or implementation tasks. Don't do sequentially what can be done in parallel.
|
|
319
320
|
- **Multiple independent lookups**: Call all tools in a single response. NEVER make sequential calls when parallel calls would work.
|
|
320
321
|
- **Long-running iteration (>20 items)**: Use the **Detach** tool, not turn-by-turn loops. Write a script that iterates and persists a checkpoint file (e.g. \`./.franklin/<task>.checkpoint.json\` with cursor + processedCount), then start it via Detach — \`{ label: "scrape stargazers", command: "node fetch.mjs" }\`. Detach returns a runId immediately and the work continues even if Franklin exits. Inspect with \`franklin task tail <runId> --follow\` / \`task wait <runId>\` / \`task cancel <runId>\`. The agent's job is to design and orchestrate, not to be the for-loop. Pattern fits paginated APIs, batch enrichment, large CSV emit, anything where the loop body is deterministic.
|
package/dist/agent/llm.js
CHANGED
|
@@ -780,6 +780,36 @@ export class ModelClient {
|
|
|
780
780
|
collected.push({ type: 'text', text: currentText });
|
|
781
781
|
}
|
|
782
782
|
}
|
|
783
|
+
// Fallback: some non-Anthropic providers behind the gateway (e.g. zai/glm-5.1)
|
|
784
|
+
// emit `message_start` with `output_tokens: 1` as a placeholder and never
|
|
785
|
+
// send a final `message_delta` carrying the real count. The audit log
|
|
786
|
+
// then records `outputTokens: 1` for every call in the session even
|
|
787
|
+
// though the model produced rich tool_use/text content. Verified
|
|
788
|
+
// 2026-05-05 in a real session: 50 audit rows, 17 distinct multi-line
|
|
789
|
+
// bash commands, total `output_tokens` summed to 1,154 — most rows
|
|
790
|
+
// showed 1. We estimate from the collected payload byte length when
|
|
791
|
+
// the reported count is implausibly low for the actual content.
|
|
792
|
+
if (usage.outputTokens <= 1 && collected.length > 0) {
|
|
793
|
+
let bytes = 0;
|
|
794
|
+
for (const part of collected) {
|
|
795
|
+
if (part.type === 'text') {
|
|
796
|
+
bytes += part.text?.length ?? 0;
|
|
797
|
+
}
|
|
798
|
+
else if (part.type === 'tool_use') {
|
|
799
|
+
const tu = part;
|
|
800
|
+
bytes += (tu.name?.length ?? 0) + JSON.stringify(tu.input ?? {}).length;
|
|
801
|
+
}
|
|
802
|
+
else if (part.type === 'thinking') {
|
|
803
|
+
bytes += part.thinking?.length ?? 0;
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
// ~4 chars/token is a rough but standard tokenizer-agnostic rule.
|
|
807
|
+
// Only override when the estimate is noticeably larger — otherwise
|
|
808
|
+
// trust the wire value (a genuinely tiny response should stay tiny).
|
|
809
|
+
const estimated = Math.ceil(bytes / 4);
|
|
810
|
+
if (estimated > usage.outputTokens + 5)
|
|
811
|
+
usage.outputTokens = estimated;
|
|
812
|
+
}
|
|
783
813
|
return { content: collected, usage, stopReason };
|
|
784
814
|
}
|
|
785
815
|
// ─── Payment ───────────────────────────────────────────────────────────
|
package/dist/agent/loop.d.ts
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* The core reasoning-action cycle: prompt → model → extract capabilities → execute → repeat.
|
|
4
4
|
*/
|
|
5
5
|
import type { AgentConfig, ContentPart, Dialogue, StreamEvent } from './types.js';
|
|
6
|
+
export declare function isExternalWallFailure(toolName: string, output: string, isError?: boolean): boolean;
|
|
6
7
|
/**
|
|
7
8
|
* Detect when the gateway leaked an upstream rate-limit / quota error as a
|
|
8
9
|
* 200-OK text content block instead of a real HTTP error. The Anthropic
|
package/dist/agent/loop.js
CHANGED
|
@@ -46,6 +46,19 @@ import { createSessionId, appendToSession, updateSessionMeta, pruneOldSessions,
|
|
|
46
46
|
function replaceHistory(target, replacement) {
|
|
47
47
|
target.splice(0, target.length, ...replacement);
|
|
48
48
|
}
|
|
49
|
+
const EXTERNAL_WALL_FAILURE_PATTERN = /\b(?:401|403|429|5\d{2})\b|\bunauthor|\bforbid|\bWAF\b|\bcloudflare\b|\bfault filter\b|\bblocked\b|\binvalid (?:auth|api|token|key|bearer)\b/i;
|
|
50
|
+
export function isExternalWallFailure(toolName, output, isError) {
|
|
51
|
+
if (toolName === 'WebFetch') {
|
|
52
|
+
return isError === true || EXTERNAL_WALL_FAILURE_PATTERN.test(output);
|
|
53
|
+
}
|
|
54
|
+
if (toolName === 'Bash') {
|
|
55
|
+
// Bash is a general-purpose local tool. Non-zero exits from tests,
|
|
56
|
+
// builds, git, etc. are useful debugging signal, not proof that the
|
|
57
|
+
// model is thrashing against an external auth/firewall wall.
|
|
58
|
+
return output.length > 0 && EXTERNAL_WALL_FAILURE_PATTERN.test(output);
|
|
59
|
+
}
|
|
60
|
+
return false;
|
|
61
|
+
}
|
|
49
62
|
// ─── Pushback detection ───────────────────────────────────────────────────
|
|
50
63
|
// Formerly a pair of regex lists (PUSHBACK_STRONG / PUSHBACK_WEAK) plus a
|
|
51
64
|
// claim-on-prior-turn check — ~70 lines of keyword heuristics. Replaced by
|
|
@@ -696,6 +709,10 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
696
709
|
const serverErrorsByModel = new Map();
|
|
697
710
|
const SERVER_ERROR_STREAK_BEFORE_SWITCH = 2;
|
|
698
711
|
let compactFailures = 0;
|
|
712
|
+
// Research-bloat compaction is fire-once per turn. A later turn can hit
|
|
713
|
+
// the trigger organically after the first compact, but firing twice from
|
|
714
|
+
// the same threshold would flap on every iteration once crossed.
|
|
715
|
+
let bloatCompactedThisTurn = false;
|
|
699
716
|
let maxTokensOverride;
|
|
700
717
|
const turnIdleReference = lastSessionActivity;
|
|
701
718
|
lastSessionActivity = Date.now();
|
|
@@ -754,6 +771,25 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
754
771
|
// ── No-progress guardrail: kill infinite tiny-response loops ──
|
|
755
772
|
let consecutiveTinyResponses = 0; // Count of consecutive calls with <10 output tokens
|
|
756
773
|
const MAX_TINY_RESPONSES = 2; // Break after N tiny responses — if 2 calls return near-empty, something is wrong
|
|
774
|
+
// ── Turn cost accumulator ──
|
|
775
|
+
// Surfaced in cap-exceeded messages so the user sees what the wasted
|
|
776
|
+
// turn actually cost ("$0.05 spent before this turn was killed") instead
|
|
777
|
+
// of just "tool limit exceeded". sessionCostUsd is too coarse — it
|
|
778
|
+
// includes earlier productive turns the user got real value from.
|
|
779
|
+
let turnCostUsd = 0;
|
|
780
|
+
// ── Failed-external-call guardrail ──
|
|
781
|
+
// The signature loop guard only catches exact-input repeats. It misses
|
|
782
|
+
// "thrashing exploration": model calls Bash 17 different ways trying to
|
|
783
|
+
// fix a 401 against the same dead endpoint. Verified 2026-05-05 in a
|
|
784
|
+
// real session: glm-5.1 burned 50 calls / $0.05 trying every auth
|
|
785
|
+
// variation against api.querit.ai (Cloudflare WAF blocked them all)
|
|
786
|
+
// before the signature guard finally fired on the first exact repeat.
|
|
787
|
+
// We count consecutive Bash/WebFetch calls whose output looks like a
|
|
788
|
+
// network/auth failure; reset on any non-failed external call. Five
|
|
789
|
+
// failures in a row is a wall, not exploration.
|
|
790
|
+
let consecutiveFailedExternal = 0;
|
|
791
|
+
const MAX_CONSECUTIVE_FAILED_EXTERNAL = 5;
|
|
792
|
+
const EXTERNAL_TOOL_NAMES = new Set(['Bash', 'WebFetch']);
|
|
757
793
|
// ── Turn analysis (one classifier call, drives routing + prefetch) ──
|
|
758
794
|
// Single LLM pass that answers every routing-adjacent question the
|
|
759
795
|
// harness needs BEFORE the main model runs: tier, ticker intent,
|
|
@@ -893,6 +929,45 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
893
929
|
logger.warn(`[franklin] Compaction failed (${compactFailures}/3): ${compactErr.message}`);
|
|
894
930
|
}
|
|
895
931
|
}
|
|
932
|
+
// ── Research-bloat compaction (fires before context-window) ──
|
|
933
|
+
// The window-based trigger above only fires near 172K tokens for a
|
|
934
|
+
// 200K-context model. Research sessions burn money long before that:
|
|
935
|
+
// verified 2026-05-05 in a real audit, a glm-5.1 session hit
|
|
936
|
+
// $0.18 / 177 calls / 3.17M cumulative input — average per-call input
|
|
937
|
+
// grew to 17.9K because every tool result kept replaying. Top-spend
|
|
938
|
+
// session in the same log: $6.67 on gemini-2.5-flash in 121 calls,
|
|
939
|
+
// never approached its 1M-token compaction threshold. Compact here
|
|
940
|
+
// when the turn has accumulated lots of tool calls AND real spend,
|
|
941
|
+
// even though the context window isn't close to full.
|
|
942
|
+
if (!bloatCompactedThisTurn &&
|
|
943
|
+
compactFailures < 3 &&
|
|
944
|
+
turnToolCalls > 30 &&
|
|
945
|
+
turnCostUsd > 0.05) {
|
|
946
|
+
try {
|
|
947
|
+
const beforeTokens = estimateHistoryTokens(history);
|
|
948
|
+
const { history: compacted, compacted: didCompact } = await forceCompact(history, config.model, client, config.debug);
|
|
949
|
+
if (didCompact) {
|
|
950
|
+
replaceHistory(history, compacted);
|
|
951
|
+
resetTokenAnchor();
|
|
952
|
+
bloatCompactedThisTurn = true;
|
|
953
|
+
const afterTokens = estimateHistoryTokens(history);
|
|
954
|
+
const pct = beforeTokens > 0
|
|
955
|
+
? Math.round((1 - afterTokens / beforeTokens) * 100)
|
|
956
|
+
: 0;
|
|
957
|
+
onEvent({
|
|
958
|
+
kind: 'text_delta',
|
|
959
|
+
text: `\n*🗜 Research-bloat compact: ${turnToolCalls} tool calls / $${turnCostUsd.toFixed(4)} this turn — summarizing ~${(beforeTokens / 1000).toFixed(0)}K → ~${(afterTokens / 1000).toFixed(0)}K tokens (saved ${pct}%)*\n\n`,
|
|
960
|
+
});
|
|
961
|
+
logger.info(`[franklin] Research-bloat compacted at ${turnToolCalls} calls / $${turnCostUsd.toFixed(4)}: ~${afterTokens} tokens`);
|
|
962
|
+
}
|
|
963
|
+
}
|
|
964
|
+
catch (compactErr) {
|
|
965
|
+
// Don't increment compactFailures — that gate is for the
|
|
966
|
+
// window-based path. A failed bloat compact just means we keep
|
|
967
|
+
// going at the higher per-call cost; not catastrophic.
|
|
968
|
+
logger.warn(`[franklin] Bloat compaction failed: ${compactErr.message}`);
|
|
969
|
+
}
|
|
970
|
+
}
|
|
896
971
|
// Inject ultrathink instruction when mode is active
|
|
897
972
|
const systemParts = [...config.systemInstructions];
|
|
898
973
|
if (config.ultrathink) {
|
|
@@ -1432,6 +1507,7 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
1432
1507
|
sessionInputTokens += inputTokens;
|
|
1433
1508
|
sessionOutputTokens += usage.outputTokens;
|
|
1434
1509
|
sessionCostUsd += costEstimate;
|
|
1510
|
+
turnCostUsd += costEstimate;
|
|
1435
1511
|
const opusCost = (inputTokens / 1_000_000) * OPUS_PRICING.input
|
|
1436
1512
|
+ (usage.outputTokens / 1_000_000) * OPUS_PRICING.output;
|
|
1437
1513
|
sessionSavedVsOpus += Math.max(0, opusCost - costEstimate);
|
|
@@ -1661,7 +1737,7 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
1661
1737
|
}
|
|
1662
1738
|
// ── Tool call guardrails ──
|
|
1663
1739
|
turnToolCalls += results.length;
|
|
1664
|
-
for (const [inv] of results) {
|
|
1740
|
+
for (const [inv, result] of results) {
|
|
1665
1741
|
const name = inv.name;
|
|
1666
1742
|
turnToolCounts.set(name, (turnToolCounts.get(name) || 0) + 1);
|
|
1667
1743
|
// Track (tool, input)-signature for the loop detector below.
|
|
@@ -1674,6 +1750,16 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
1674
1750
|
if (name === 'Read' && inv.input.file_path) {
|
|
1675
1751
|
readFileCache.add(inv.input.file_path);
|
|
1676
1752
|
}
|
|
1753
|
+
// Failed-external-call streak: count consecutive Bash/WebFetch calls
|
|
1754
|
+
// whose output indicates a network/auth wall. Reset on any non-failed
|
|
1755
|
+
// external call so legitimate retry-then-succeed paths aren't punished.
|
|
1756
|
+
if (EXTERNAL_TOOL_NAMES.has(name)) {
|
|
1757
|
+
const looksFailed = isExternalWallFailure(name, typeof result.output === 'string' ? result.output : '', result.isError);
|
|
1758
|
+
if (looksFailed)
|
|
1759
|
+
consecutiveFailedExternal++;
|
|
1760
|
+
else
|
|
1761
|
+
consecutiveFailedExternal = 0;
|
|
1762
|
+
}
|
|
1677
1763
|
}
|
|
1678
1764
|
// Refresh activity timestamp after tool execution
|
|
1679
1765
|
lastSessionActivity = Date.now();
|
|
@@ -1807,11 +1893,17 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
1807
1893
|
toolCapWarned = true;
|
|
1808
1894
|
logger.warn(`[franklin] Tool call cap hit: ${turnToolCalls} calls this turn (soft cap ${MAX_TOOL_CALLS_PER_TURN}, hard cap ${HARD_TOOL_CAP})`);
|
|
1809
1895
|
}
|
|
1896
|
+
// Format spend-so-far for cap messages — surfacing the dollar amount
|
|
1897
|
+
// tells the user the real impact ("$0.05 wasted") instead of just
|
|
1898
|
+
// "tool limit exceeded" which doesn't convey severity.
|
|
1899
|
+
const spendNote = turnCostUsd > 0
|
|
1900
|
+
? `${turnToolCalls} tool calls, $${turnCostUsd.toFixed(4)} spent this turn`
|
|
1901
|
+
: `${turnToolCalls} tool calls this turn`;
|
|
1810
1902
|
if (turnToolCalls >= HARD_TOOL_CAP) {
|
|
1811
1903
|
logger.error(`[franklin] Hard tool cap exceeded (${turnToolCalls}) — ending turn to prevent runaway`);
|
|
1812
1904
|
onEvent({
|
|
1813
1905
|
kind: 'text_delta',
|
|
1814
|
-
text: `\n\n⚠️
|
|
1906
|
+
text: `\n\n⚠️ Runaway loop stopped: ${spendNote}, hit hard cap of ${HARD_TOOL_CAP}. Try rephrasing or use \`/model\` to switch.\n`,
|
|
1815
1907
|
});
|
|
1816
1908
|
onEvent({ kind: 'turn_done', reason: 'cap_exceeded' });
|
|
1817
1909
|
break;
|
|
@@ -1829,7 +1921,22 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
1829
1921
|
logger.error(`[franklin] Signature-loop hard stop: \`${toolName}\` called with identical input ${stuckSignature.count} times this turn — ending turn`);
|
|
1830
1922
|
onEvent({
|
|
1831
1923
|
kind: 'text_delta',
|
|
1832
|
-
text: `\n\n⚠️ ${
|
|
1924
|
+
text: `\n\n⚠️ Loop stopped: ${spendNote} before \`${toolName}\` repeated the same input ${stuckSignature.count}×. Rephrase what you need, or try \`/model\` to switch.\n`,
|
|
1925
|
+
});
|
|
1926
|
+
onEvent({ kind: 'turn_done', reason: 'cap_exceeded' });
|
|
1927
|
+
break;
|
|
1928
|
+
}
|
|
1929
|
+
// Thrashing-against-a-wall hard stop (3.15.69). Catches the case
|
|
1930
|
+
// where each call is structurally distinct (different headers, methods,
|
|
1931
|
+
// auth schemes, query params) but every one returns 4xx/5xx/WAF.
|
|
1932
|
+
// Verified 2026-05-05: glm-5.1 burned 50 calls / $0.05 cycling through
|
|
1933
|
+
// ~17 curl variants against Cloudflare-blocked api.querit.ai — every
|
|
1934
|
+
// input distinct so the signature guard above couldn't help.
|
|
1935
|
+
if (consecutiveFailedExternal >= MAX_CONSECUTIVE_FAILED_EXTERNAL) {
|
|
1936
|
+
logger.error(`[franklin] Failed-external-call streak: ${consecutiveFailedExternal} consecutive Bash/WebFetch calls returned auth/network errors — ending turn`);
|
|
1937
|
+
onEvent({
|
|
1938
|
+
kind: 'text_delta',
|
|
1939
|
+
text: `\n\n⚠️ Hitting a wall: ${consecutiveFailedExternal} consecutive external calls returned auth/firewall errors (${spendNote}). The endpoint or credentials likely don't work. Try a different approach, or use \`/model\` to switch.\n`,
|
|
1833
1940
|
});
|
|
1834
1941
|
onEvent({ kind: 'turn_done', reason: 'cap_exceeded' });
|
|
1835
1942
|
break;
|
package/dist/commands/migrate.js
CHANGED
|
@@ -16,18 +16,42 @@ function detectSources() {
|
|
|
16
16
|
const sources = [];
|
|
17
17
|
const home = os.homedir();
|
|
18
18
|
// ── `~/.claude/` config dir (used by several agent CLIs) ──
|
|
19
|
+
// Real Claude Code (2026 layout) writes:
|
|
20
|
+
// ~/.claude.json (top-level, mcpServers + global state)
|
|
21
|
+
// ~/.claude/CLAUDE.md (global instructions)
|
|
22
|
+
// ~/.claude/projects/<slug>/<uuid>.jsonl (one file per session)
|
|
23
|
+
// ~/.claude/projects/<slug>/memory/*.md (project memories)
|
|
24
|
+
// Older agents and pre-3.x Claude Code variants wrote:
|
|
25
|
+
// ~/.claude/mcp.json
|
|
26
|
+
// ~/.claude/history.jsonl
|
|
27
|
+
// We support both — prefer the new layout but fall back so users with
|
|
28
|
+
// legacy state still get their data imported.
|
|
19
29
|
const claudeDir = path.join(home, '.claude');
|
|
20
|
-
|
|
30
|
+
const claudeJson = path.join(home, '.claude.json');
|
|
31
|
+
const hasClaudeData = fs.existsSync(claudeDir) || fs.existsSync(claudeJson);
|
|
32
|
+
if (hasClaudeData) {
|
|
21
33
|
const items = [];
|
|
22
|
-
// MCP servers
|
|
23
|
-
|
|
24
|
-
|
|
34
|
+
// MCP servers — prefer top-level ~/.claude.json (new layout); fall back
|
|
35
|
+
// to legacy ~/.claude/mcp.json. Only add one item; whichever we find
|
|
36
|
+
// first is what migrateMcp() will read.
|
|
37
|
+
const newMcpHasServers = fileHasMcpServers(claudeJson);
|
|
38
|
+
const legacyMcp = path.join(claudeDir, 'mcp.json');
|
|
39
|
+
if (newMcpHasServers) {
|
|
25
40
|
items.push({
|
|
26
|
-
label: 'MCP servers',
|
|
27
|
-
source:
|
|
41
|
+
label: 'MCP servers (~/.claude.json)',
|
|
42
|
+
source: claudeJson,
|
|
28
43
|
target: path.join(BLOCKRUN_DIR, 'mcp.json'),
|
|
29
|
-
size: fileSize(
|
|
30
|
-
transform: () => migrateMcp(
|
|
44
|
+
size: fileSize(claudeJson),
|
|
45
|
+
transform: () => migrateMcp(claudeJson),
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
else if (fs.existsSync(legacyMcp)) {
|
|
49
|
+
items.push({
|
|
50
|
+
label: 'MCP servers (legacy ~/.claude/mcp.json)',
|
|
51
|
+
source: legacyMcp,
|
|
52
|
+
target: path.join(BLOCKRUN_DIR, 'mcp.json'),
|
|
53
|
+
size: fileSize(legacyMcp),
|
|
54
|
+
transform: () => migrateMcp(legacyMcp),
|
|
31
55
|
});
|
|
32
56
|
}
|
|
33
57
|
// Global instructions → learnings
|
|
@@ -41,20 +65,33 @@ function detectSources() {
|
|
|
41
65
|
transform: () => migrateInstructions(claudeMd),
|
|
42
66
|
});
|
|
43
67
|
}
|
|
44
|
-
// Session history
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
68
|
+
// Session history — prefer per-project session JSONLs (new layout); fall
|
|
69
|
+
// back to legacy ~/.claude/history.jsonl. The new layout preserves session
|
|
70
|
+
// boundaries (one file = one conversation) instead of collapsing every
|
|
71
|
+
// message into a daily blob.
|
|
72
|
+
const projectsDir = path.join(claudeDir, 'projects');
|
|
73
|
+
const sessionFiles = fs.existsSync(projectsDir) ? findClaudeCodeSessionFiles(projectsDir) : [];
|
|
74
|
+
const legacyHistory = path.join(claudeDir, 'history.jsonl');
|
|
75
|
+
if (sessionFiles.length > 0) {
|
|
48
76
|
items.push({
|
|
49
|
-
label: `Session history (${
|
|
50
|
-
source:
|
|
77
|
+
label: `Session history (${sessionFiles.length.toLocaleString()} sessions)`,
|
|
78
|
+
source: projectsDir,
|
|
51
79
|
target: path.join(BLOCKRUN_DIR, 'sessions'),
|
|
52
|
-
size:
|
|
53
|
-
transform: () =>
|
|
80
|
+
size: `${sessionFiles.length} files`,
|
|
81
|
+
transform: () => migrateClaudeCodeSessions(sessionFiles),
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
else if (fs.existsSync(legacyHistory)) {
|
|
85
|
+
const lines = countLines(legacyHistory);
|
|
86
|
+
items.push({
|
|
87
|
+
label: `Session history (legacy, ${lines.toLocaleString()} messages)`,
|
|
88
|
+
source: legacyHistory,
|
|
89
|
+
target: path.join(BLOCKRUN_DIR, 'sessions'),
|
|
90
|
+
size: fileSize(legacyHistory),
|
|
91
|
+
transform: () => migrateSessions(legacyHistory),
|
|
54
92
|
});
|
|
55
93
|
}
|
|
56
94
|
// Project memory files
|
|
57
|
-
const projectsDir = path.join(claudeDir, 'projects');
|
|
58
95
|
if (fs.existsSync(projectsDir)) {
|
|
59
96
|
const memoryFiles = findMemoryFiles(projectsDir);
|
|
60
97
|
if (memoryFiles.length > 0) {
|
|
@@ -95,7 +132,10 @@ function detectSources() {
|
|
|
95
132
|
function migrateMcp(source) {
|
|
96
133
|
const target = path.join(BLOCKRUN_DIR, 'mcp.json');
|
|
97
134
|
const raw = JSON.parse(fs.readFileSync(source, 'utf-8'));
|
|
98
|
-
// Source format
|
|
135
|
+
// Source format (Claude Code ~/.claude.json or legacy mcp.json):
|
|
136
|
+
// { mcpServers: { name: { type?, transport?, command, args, env? } } }
|
|
137
|
+
// ~/.claude.json wraps mcpServers among hundreds of unrelated state keys —
|
|
138
|
+
// we only read the one field.
|
|
99
139
|
// Franklin format: { mcpServers: { name: { transport, command, args, label } } }
|
|
100
140
|
const servers = {};
|
|
101
141
|
const skipped = [];
|
|
@@ -121,7 +161,8 @@ function migrateMcp(source) {
|
|
|
121
161
|
continue;
|
|
122
162
|
}
|
|
123
163
|
servers[name] = {
|
|
124
|
-
|
|
164
|
+
// Claude Code uses `type`; older agents used `transport`. Accept both.
|
|
165
|
+
transport: config.transport || config.type || 'stdio',
|
|
125
166
|
command: config.command,
|
|
126
167
|
args: config.args || [],
|
|
127
168
|
label: name,
|
|
@@ -199,6 +240,143 @@ function migrateInstructions(source) {
|
|
|
199
240
|
console.log(chalk.dim(' ○ No extractable preferences found'));
|
|
200
241
|
}
|
|
201
242
|
}
|
|
243
|
+
/**
|
|
244
|
+
* Import per-session JSONL files written by current Claude Code (2026 layout).
|
|
245
|
+
* One source file = one Franklin session — we preserve session boundaries
|
|
246
|
+
* instead of mashing everything into a daily blob like the legacy importer.
|
|
247
|
+
*
|
|
248
|
+
* Source line shape:
|
|
249
|
+
* { type: "user"|"assistant"|"attachment"|"permission-mode"|...,
|
|
250
|
+
* message?: { role, content }, timestamp, sessionId, cwd }
|
|
251
|
+
* Target Dialogue line shape: { role, content }
|
|
252
|
+
*/
|
|
253
|
+
function migrateClaudeCodeSessions(sessionFiles) {
|
|
254
|
+
const sessionsDir = path.join(BLOCKRUN_DIR, 'sessions');
|
|
255
|
+
fs.mkdirSync(sessionsDir, { recursive: true });
|
|
256
|
+
let imported = 0;
|
|
257
|
+
let skipped = 0;
|
|
258
|
+
let totalTurns = 0;
|
|
259
|
+
for (const file of sessionFiles) {
|
|
260
|
+
const sessionId = path.basename(file, '.jsonl');
|
|
261
|
+
const targetJsonl = path.join(sessionsDir, `${sessionId}.jsonl`);
|
|
262
|
+
const targetMeta = path.join(sessionsDir, `${sessionId}.meta.json`);
|
|
263
|
+
// Don't re-import on a second run — the user might have already
|
|
264
|
+
// resumed and added turns to the imported session.
|
|
265
|
+
if (fs.existsSync(targetMeta)) {
|
|
266
|
+
skipped++;
|
|
267
|
+
continue;
|
|
268
|
+
}
|
|
269
|
+
let raw;
|
|
270
|
+
try {
|
|
271
|
+
raw = fs.readFileSync(file, 'utf-8');
|
|
272
|
+
}
|
|
273
|
+
catch {
|
|
274
|
+
continue;
|
|
275
|
+
}
|
|
276
|
+
const dialogues = [];
|
|
277
|
+
let firstTs = 0;
|
|
278
|
+
let lastTs = 0;
|
|
279
|
+
let workDir = os.homedir();
|
|
280
|
+
let model = 'claude-code-import';
|
|
281
|
+
for (const line of raw.split('\n')) {
|
|
282
|
+
const trimmed = line.trim();
|
|
283
|
+
if (!trimmed)
|
|
284
|
+
continue;
|
|
285
|
+
let entry;
|
|
286
|
+
try {
|
|
287
|
+
entry = JSON.parse(trimmed);
|
|
288
|
+
}
|
|
289
|
+
catch {
|
|
290
|
+
continue;
|
|
291
|
+
}
|
|
292
|
+
// Track timestamps + cwd from any line that has them.
|
|
293
|
+
const ts = entry.timestamp;
|
|
294
|
+
if (typeof ts === 'string') {
|
|
295
|
+
const t = Date.parse(ts);
|
|
296
|
+
if (Number.isFinite(t)) {
|
|
297
|
+
if (!firstTs || t < firstTs)
|
|
298
|
+
firstTs = t;
|
|
299
|
+
if (t > lastTs)
|
|
300
|
+
lastTs = t;
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
if (typeof entry.cwd === 'string' && entry.cwd)
|
|
304
|
+
workDir = entry.cwd;
|
|
305
|
+
// Only user/assistant turns become Franklin Dialogue lines. Everything
|
|
306
|
+
// else (attachments, permission-mode, summary, system) is metadata
|
|
307
|
+
// we don't replay.
|
|
308
|
+
if (entry.type !== 'user' && entry.type !== 'assistant')
|
|
309
|
+
continue;
|
|
310
|
+
const msg = entry.message;
|
|
311
|
+
if (!msg || (msg.role !== 'user' && msg.role !== 'assistant'))
|
|
312
|
+
continue;
|
|
313
|
+
if (typeof msg.model === 'string')
|
|
314
|
+
model = msg.model;
|
|
315
|
+
dialogues.push(JSON.stringify({ role: msg.role, content: msg.content }));
|
|
316
|
+
}
|
|
317
|
+
if (dialogues.length === 0) {
|
|
318
|
+
skipped++;
|
|
319
|
+
continue;
|
|
320
|
+
}
|
|
321
|
+
fs.writeFileSync(targetJsonl, dialogues.join('\n') + '\n');
|
|
322
|
+
fs.writeFileSync(targetMeta, JSON.stringify({
|
|
323
|
+
id: sessionId,
|
|
324
|
+
model,
|
|
325
|
+
workDir,
|
|
326
|
+
createdAt: firstTs || Date.now(),
|
|
327
|
+
updatedAt: lastTs || Date.now(),
|
|
328
|
+
turnCount: Math.floor(dialogues.length / 2),
|
|
329
|
+
messageCount: dialogues.length,
|
|
330
|
+
imported: true,
|
|
331
|
+
}, null, 2));
|
|
332
|
+
imported++;
|
|
333
|
+
totalTurns += dialogues.length;
|
|
334
|
+
}
|
|
335
|
+
const skipNote = skipped > 0 ? chalk.dim(` (${skipped} skipped)`) : '';
|
|
336
|
+
console.log(chalk.green(` ✓ ${imported} session(s) imported, ${totalTurns.toLocaleString()} turns${skipNote}`));
|
|
337
|
+
}
|
|
338
|
+
/** Walk ~/.claude/projects/<slug>/*.jsonl — one file per Claude Code session. */
|
|
339
|
+
function findClaudeCodeSessionFiles(projectsDir) {
|
|
340
|
+
const out = [];
|
|
341
|
+
let projects = [];
|
|
342
|
+
try {
|
|
343
|
+
projects = fs.readdirSync(projectsDir);
|
|
344
|
+
}
|
|
345
|
+
catch {
|
|
346
|
+
return out;
|
|
347
|
+
}
|
|
348
|
+
for (const project of projects) {
|
|
349
|
+
const projectPath = path.join(projectsDir, project);
|
|
350
|
+
let entries = [];
|
|
351
|
+
try {
|
|
352
|
+
const stat = fs.statSync(projectPath);
|
|
353
|
+
if (!stat.isDirectory())
|
|
354
|
+
continue;
|
|
355
|
+
entries = fs.readdirSync(projectPath);
|
|
356
|
+
}
|
|
357
|
+
catch {
|
|
358
|
+
continue;
|
|
359
|
+
}
|
|
360
|
+
for (const entry of entries) {
|
|
361
|
+
if (!entry.endsWith('.jsonl'))
|
|
362
|
+
continue;
|
|
363
|
+
out.push(path.join(projectPath, entry));
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
return out;
|
|
367
|
+
}
|
|
368
|
+
/** True iff the file is JSON with a non-empty mcpServers object. */
|
|
369
|
+
function fileHasMcpServers(p) {
|
|
370
|
+
try {
|
|
371
|
+
const raw = JSON.parse(fs.readFileSync(p, 'utf-8'));
|
|
372
|
+
return !!raw && typeof raw === 'object' &&
|
|
373
|
+
!!raw.mcpServers && typeof raw.mcpServers === 'object' &&
|
|
374
|
+
Object.keys(raw.mcpServers).length > 0;
|
|
375
|
+
}
|
|
376
|
+
catch {
|
|
377
|
+
return false;
|
|
378
|
+
}
|
|
379
|
+
}
|
|
202
380
|
function migrateSessions(source) {
|
|
203
381
|
const sessionsDir = path.join(BLOCKRUN_DIR, 'sessions');
|
|
204
382
|
fs.mkdirSync(sessionsDir, { recursive: true });
|
|
@@ -231,7 +409,9 @@ function migrateSessions(source) {
|
|
|
231
409
|
if (fs.existsSync(sessionFile))
|
|
232
410
|
continue;
|
|
233
411
|
fs.writeFileSync(sessionFile, msgs.join('\n') + '\n');
|
|
234
|
-
// Create metadata
|
|
412
|
+
// Create metadata. `imported: true` shields these from pruneOldSessions —
|
|
413
|
+
// a fresh import of 200+ historical sessions would otherwise be deleted
|
|
414
|
+
// on the next `franklin` launch when the agent loop prunes to 20.
|
|
235
415
|
const meta = {
|
|
236
416
|
id: sessionId,
|
|
237
417
|
model: 'imported',
|
|
@@ -240,6 +420,7 @@ function migrateSessions(source) {
|
|
|
240
420
|
updatedAt: Date.now(),
|
|
241
421
|
turnCount: Math.floor(msgs.length / 2),
|
|
242
422
|
messageCount: msgs.length,
|
|
423
|
+
imported: true,
|
|
243
424
|
};
|
|
244
425
|
fs.writeFileSync(path.join(sessionsDir, `${sessionId}.meta.json`), JSON.stringify(meta, null, 2));
|
|
245
426
|
imported++;
|
|
@@ -340,7 +521,7 @@ export async function migrateCommand() {
|
|
|
340
521
|
const sources = detectSources();
|
|
341
522
|
if (sources.length === 0) {
|
|
342
523
|
console.log(chalk.dim(' No other AI tools detected. Nothing to migrate.\n'));
|
|
343
|
-
console.log(chalk.dim(' Looked for: ~/.claude/, VS Code agent extension, editor agent configs\n'));
|
|
524
|
+
console.log(chalk.dim(' Looked for: ~/.claude.json, ~/.claude/, VS Code agent extension, editor agent configs\n'));
|
|
344
525
|
return;
|
|
345
526
|
}
|
|
346
527
|
// Show what was found
|
|
@@ -2,10 +2,12 @@
|
|
|
2
2
|
* Plugin Registry — discovers, loads, and manages plugins.
|
|
3
3
|
*
|
|
4
4
|
* Core stays plugin-agnostic: it knows about the *interface*, not specific plugins.
|
|
5
|
-
* Plugins are discovered from:
|
|
6
|
-
* 1.
|
|
5
|
+
* Plugins are discovered from (highest priority first):
|
|
6
|
+
* 1. Local dev: $FRANKLIN_PLUGINS_DIR/* (or legacy $RUNCODE_PLUGINS_DIR/*)
|
|
7
7
|
* 2. User: ~/.blockrun/plugins/*
|
|
8
|
-
* 3.
|
|
8
|
+
* 3. Bundled: <franklin>/dist/plugins-bundled/* (reserved for plugins
|
|
9
|
+
* shipped inside the npm tarball — none today; social/trading/content
|
|
10
|
+
* are native subsystems, not plugins)
|
|
9
11
|
*/
|
|
10
12
|
import type { Plugin, PluginManifest } from '../plugin-sdk/plugin.js';
|
|
11
13
|
export declare function getBundledPluginsDir(): string;
|
package/dist/plugins/registry.js
CHANGED
|
@@ -2,10 +2,12 @@
|
|
|
2
2
|
* Plugin Registry — discovers, loads, and manages plugins.
|
|
3
3
|
*
|
|
4
4
|
* Core stays plugin-agnostic: it knows about the *interface*, not specific plugins.
|
|
5
|
-
* Plugins are discovered from:
|
|
6
|
-
* 1.
|
|
5
|
+
* Plugins are discovered from (highest priority first):
|
|
6
|
+
* 1. Local dev: $FRANKLIN_PLUGINS_DIR/* (or legacy $RUNCODE_PLUGINS_DIR/*)
|
|
7
7
|
* 2. User: ~/.blockrun/plugins/*
|
|
8
|
-
* 3.
|
|
8
|
+
* 3. Bundled: <franklin>/dist/plugins-bundled/* (reserved for plugins
|
|
9
|
+
* shipped inside the npm tarball — none today; social/trading/content
|
|
10
|
+
* are native subsystems, not plugins)
|
|
9
11
|
*/
|
|
10
12
|
import fs from 'node:fs';
|
|
11
13
|
import path from 'node:path';
|
|
@@ -14,8 +16,9 @@ import os from 'node:os';
|
|
|
14
16
|
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
15
17
|
// ─── Plugin Discovery Paths ───────────────────────────────────────────────
|
|
16
18
|
export function getBundledPluginsDir() {
|
|
17
|
-
// From dist/plugins/registry.js,
|
|
18
|
-
//
|
|
19
|
+
// From dist/plugins/registry.js, look at sibling dist/plugins-bundled/.
|
|
20
|
+
// Empty today — the build's copy-plugin-assets script populates it from
|
|
21
|
+
// src/plugins-bundled/ if/when bundled plugins are reintroduced.
|
|
19
22
|
return path.resolve(__dirname, '..', 'plugins-bundled');
|
|
20
23
|
}
|
|
21
24
|
export function getUserPluginsDir() {
|
|
@@ -42,6 +42,15 @@ export interface SessionMeta {
|
|
|
42
42
|
* add any tool inputs or outputs here, just the count per tool name.
|
|
43
43
|
*/
|
|
44
44
|
toolCallCounts?: Record<string, number>;
|
|
45
|
+
/**
|
|
46
|
+
* Sessions imported from another agent (`franklin migrate`). Imports often
|
|
47
|
+
* exceed MAX_SESSIONS by an order of magnitude (a Claude Code user can
|
|
48
|
+
* easily have 200+ historical sessions); without this flag, the very
|
|
49
|
+
* next `franklin` launch would prune all but the 20 most recent and
|
|
50
|
+
* silently destroy the user's history. pruneOldSessions() skips any
|
|
51
|
+
* meta with imported=true.
|
|
52
|
+
*/
|
|
53
|
+
imported?: true;
|
|
45
54
|
}
|
|
46
55
|
/** Get the absolute path to a session's JSONL file (for external readers like search). */
|
|
47
56
|
export declare function getSessionFilePath(id: string): string;
|
package/dist/session/storage.js
CHANGED
|
@@ -132,6 +132,11 @@ export function updateSessionMeta(sessionId, meta) {
|
|
|
132
132
|
...(meta.toolCallCounts !== undefined || existing?.toolCallCounts !== undefined
|
|
133
133
|
? { toolCallCounts: meta.toolCallCounts ?? existing?.toolCallCounts }
|
|
134
134
|
: {}),
|
|
135
|
+
// `imported` is sticky like `chain`: once set by `franklin migrate`
|
|
136
|
+
// it must survive every subsequent update so pruneOldSessions keeps
|
|
137
|
+
// shielding the session from auto-deletion. Without preservation, the
|
|
138
|
+
// first turn added via `--resume` would silently drop the flag.
|
|
139
|
+
...(meta.imported || existing?.imported ? { imported: true } : {}),
|
|
135
140
|
};
|
|
136
141
|
// Atomic write: tmp file + rename. Prevents corruption when parent
|
|
137
142
|
// and sub-agent update the same session meta concurrently.
|
|
@@ -234,10 +239,12 @@ export function findLatestSessionByChannel(channel) {
|
|
|
234
239
|
* Accepts optional activeSessionId to protect from deletion.
|
|
235
240
|
*/
|
|
236
241
|
export function pruneOldSessions(activeSessionId) {
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
242
|
+
// Only count native sessions toward the MAX_SESSIONS budget. Imported
|
|
243
|
+
// sessions (from `franklin migrate`) are user-owned history and must
|
|
244
|
+
// never be auto-deleted just because the user ran the agent again.
|
|
245
|
+
const native = readSessionMetas(false).filter(s => !s.imported);
|
|
246
|
+
if (native.length > MAX_SESSIONS) {
|
|
247
|
+
const toDelete = native
|
|
241
248
|
.slice(MAX_SESSIONS)
|
|
242
249
|
.filter(s => s.id !== activeSessionId); // Never delete active session
|
|
243
250
|
for (const s of toDelete) {
|
|
@@ -251,11 +258,16 @@ export function pruneOldSessions(activeSessionId) {
|
|
|
251
258
|
catch { /* ok */ }
|
|
252
259
|
}
|
|
253
260
|
}
|
|
254
|
-
// Also clean up ghost sessions (0 messages, older than 5 minutes)
|
|
261
|
+
// Also clean up ghost sessions (0 messages, older than 5 minutes).
|
|
262
|
+
// Skip imported sessions — they may legitimately have messageCount=0
|
|
263
|
+
// if the source file had only attachments/system lines.
|
|
255
264
|
const fiveMinAgo = Date.now() - 5 * 60 * 1000;
|
|
265
|
+
const allSessions = readSessionMetas(true);
|
|
256
266
|
for (const s of allSessions) {
|
|
257
267
|
if (s.id === activeSessionId)
|
|
258
268
|
continue;
|
|
269
|
+
if (s.imported)
|
|
270
|
+
continue;
|
|
259
271
|
if (s.messageCount === 0 && s.createdAt < fiveMinAgo) {
|
|
260
272
|
try {
|
|
261
273
|
fs.unlinkSync(sessionPath(s.id));
|
package/package.json
CHANGED