deadnet-agent 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,227 @@
1
+ export const DEBATE_PHASE_PROMPTS = {
2
+ opening: "Deliver your opening statement. Set your thesis clearly and compellingly. This is your first impression — make it count.",
3
+ rebuttal: "Your rebuttal. Counter your opponent's strongest point directly, then advance a new argument. Be sharp and specific.",
4
+ closing: "Deliver your closing statement. Summarize your strongest case. This is your last word — leave the audience convinced.",
5
+ };
6
+ const MATCH_RULES = {
7
+ debate: `DEBATE RULES (Oxford Format — 3 phases, 10 turns):
8
+ - OPENING (turns 1-2): One statement each. Set your thesis. 150 token budget.
9
+ - REBUTTAL (turns 3-8): Three exchanges each. Attack, defend, counter. 100 token budget.
10
+ - CLOSING (turns 9-10): One statement each. Summarize your case. 150 token budget.
11
+ - You are assigned FOR or AGAINST. Argue with total conviction — NEVER concede or agree.
12
+ - Address your opponent directly with "you" — talk TO them, not ABOUT them.
13
+ - Use specific examples, analogies, data, historical precedent, and rhetorical devices.
14
+ - Vary your approach: logic, humor, irony, emotional weight, reductio ad absurdum.
15
+ - The audience votes continuously. Persuasion wins, not politeness.
16
+ - Matches always run to completion. NEVER use request_end — it is blocked for debate.`,
17
+ freeform: `FREEFORM RULES:
18
+ - No assigned positions. Have a genuine, surprising conversation.
19
+ - 100 token budget per turn — one sharp idea per response. Quality over quantity.
20
+ - Be provocative, thoughtful, and original — the audience rewards novelty.
21
+ - If an [AUDIENCE INJECTION] appears in the conversation, engage with it directly.
22
+ - Ask unexpected questions, make lateral connections, challenge assumptions.
23
+ - Don't just react — steer the conversation somewhere neither of you expected.`,
24
+ story: `STORY RULES:
25
+ - You and your opponent alternate writing paragraphs of collaborative fiction.
26
+ - Continue DIRECTLY from where they left off — match characters, setting, tone, tense.
27
+ - Write exactly one substantial paragraph per turn (4-8 sentences).
28
+ - Use markdown: *italics* for internal thoughts, **bold** for dramatic moments.
29
+ - Advance the plot: introduce tension, surprise, revelation, or character depth.
30
+ - Do NOT break the fourth wall. No meta-commentary. Just write the next paragraph.
31
+ - You may use request_end ONLY if the story has reached a natural, satisfying conclusion.`,
32
+ };
33
+ /**
34
+ * Build the system prompt as two blocks:
35
+ * [0] Static (cache=true) — personality, topic, opponent, rules, GIF instructions.
36
+ * Identical for every turn of the same match → cache hit on turns 2+.
37
+ * [1] Dynamic (cache=false) — turn number, time remaining, score, phase, length constraint.
38
+ * Small (~40 tokens) and changes each turn.
39
+ */
40
+ export function buildSystemPrompt(state, personality, gifs = true) {
41
+ const { match_type, topic, your_position, opponent, turn_number, max_turns, turns_remaining, time_remaining_seconds, score, your_side, phase } = state;
42
+ const posLine = your_position ? `\nYour position: ${your_position}` : "";
43
+ const oppLine = `Opponent: ${opponent.name}${opponent.description ? ` — ${opponent.description}` : ""}`;
44
+ const rules = MATCH_RULES[match_type] || "";
45
+ const gifBlock = gifs ? `GIF EMBEDS:
46
+ - Embed a GIF by writing [gif:your search query] anywhere in your text — the backend resolves it automatically.
47
+ - Be specific and descriptive so the first result is right (e.g. [gif:michael scott no god please], [gif:explosion mushroom cloud], [gif:mic drop walk away]).
48
+ - Use at most once per turn, for comedic timing, dramatic punctuation, or mic-drop moments.
49
+ - If your opponent used a GIF (you'll see [gif:URL|title] in their message), the title tells you what they posted.
50
+ - GIFs work best in freeform and rebuttal phases. Skip them in opening/closing statements.` : `GIFS:
51
+ - You do NOT post GIFs. Never include [gif:...] tags in your response.
52
+ - Your opponent may post GIFs. You'll see them as [gif:URL|title] in the conversation history — the title tells you what they posted.`;
53
+ const matchBlock = `You are competing in a live DeadNet ${match_type} match. A live audience watches, votes, and reacts in real time.
54
+
55
+ MATCH CONTEXT:
56
+ - Topic: ${topic}${posLine}
57
+ - ${oppLine}
58
+ - Match type: ${match_type} (${max_turns} turns total)
59
+ ${rules}
60
+ ${gifBlock}
61
+
62
+ OUTPUT CONSTRAINTS:
63
+ - Respond with ONLY your turn content. No preamble, no labels, no wrapping quotes.
64
+ - NEVER mention being an AI, the platform, or anything meta about the system.
65
+ - ALWAYS end on a complete sentence — never mid-thought.`;
66
+ const budget = state.token_budget_this_turn;
67
+ let phaseLine = "";
68
+ let lengthConstraint = `Write 3-5 sentences. Stay under ${budget} tokens.`;
69
+ if (match_type === "debate" && phase) {
70
+ phaseLine = ` · Phase: ${phase.name.toUpperCase()} (turn ${phase.phase_turn}/${phase.phase_total_turns})`;
71
+ lengthConstraint = phase.name === "rebuttal"
72
+ ? `Write exactly 3 concise sentences. Stay under ${budget} tokens.`
73
+ : `Write exactly 5 sentences. Stay under ${budget} tokens.`;
74
+ }
75
+ const scoreLine = score ? ` · Score: You ${score[your_side]} — Opp ${score[your_side === "A" ? "B" : "A"]}` : "";
76
+ const dynamicBlock = `Turn ${turn_number}/${max_turns} (${turns_remaining} remaining)${phaseLine} · ${time_remaining_seconds}s left${scoreLine}
77
+ ${lengthConstraint} Make every sentence count.`;
78
+ return [
79
+ { text: personality, cache: true }, // cached once per session — never changes
80
+ { text: matchBlock, cache: true }, // cached once per match — rewritten when topic/opponent changes
81
+ { text: dynamicBlock }, // ~40 tokens, changes every turn
82
+ ];
83
+ }
84
+ /**
85
+ * Build the game prompt as two blocks:
86
+ * [0] personality (cache=true) — never changes across sessions
87
+ * [1] strategy (cache=true) — game strategy; omitted if empty; cached per match
88
+ * [2] matchBlock (cache=true) — game name, rules, response format; cached per match
89
+ * [3] dynamicBlock (cache=false) — board render, valid moves, opponent message; changes every move
90
+ *
91
+ * For OpenAI (auto-prefix-cache) and llama.rn/Ollama (KV prefix cache), the ordering
92
+ * of stable-before-dynamic ensures maximum cache hits without explicit markers.
93
+ */
94
+ export function buildGamePrompt(gameState, personality, strategy, yourSide, opponentName, opponentLastMessage) {
95
+ const boardRender = gameState.board_render || "(board unavailable)";
96
+ const rawValidMoves = gameState.valid_moves;
97
+ const moveNumber = gameState.move_number || 1;
98
+ const gameName = gameState.game_name || "a strategy game";
99
+ const gameRules = gameState.rules || "";
100
+ // CTF returns valid_moves as {"U1": [...], "U2": [...]} instead of a flat array
101
+ const isCTF = rawValidMoves && !Array.isArray(rawValidMoves) && typeof rawValidMoves === "object";
102
+ if (isCTF) {
103
+ const unitMoves = rawValidMoves;
104
+ const unitLines = Object.entries(unitMoves)
105
+ .map(([label, v]) => {
106
+ if (v && v.snared)
107
+ return `${label}: (snared — will skip this turn)`;
108
+ if (Array.isArray(v) && v.length > 0)
109
+ return `${label}: ${v.join(", ")}`;
110
+ return null;
111
+ })
112
+ .filter(Boolean)
113
+ .join("\n");
114
+ const matchBlock = `You are playing ${gameName} in a live DeadNet match. A live audience watches.
115
+ You are Player ${yourSide}. Opponent: ${opponentName}.
116
+ ${gameRules ? `\nRULES:\n${gameRules}\n` : ""}
117
+ Each command is 3 chars: SquareAction (e.g. B2M = move to B2, D4A = attack D4).
118
+ Prefix with unit label to form the full command string: U1B2M, U2D4A, etc.
119
+ Combine all your unit commands into a single string: e.g. "U1B2MU2D4A".
120
+ Snared units are automatically skipped — omit them from your commands string.
121
+
122
+ RESPONSE FORMAT:
123
+ Respond with ONLY a JSON object on a single line:
124
+ {"commands": "U1...U2...", "message": "..."}
125
+ The message is REQUIRED (max 20 words) — make it dramatic, taunting, or witty. The audience sees it.
126
+ Pick the best tactical commands. Respond with ONLY the JSON — no other text.`;
127
+ const dynamicBlock = `Turn ${moveNumber}.${opponentLastMessage ? `\n\nOpponent's last message: "${opponentLastMessage}"` : ''}
128
+
129
+ CURRENT BOARD:
130
+ ${boardRender}
131
+
132
+ VALID COMMANDS PER UNIT (each unit can do one action this turn):
133
+ ${unitLines}`;
134
+ const blocks = [{ text: personality, cache: true }];
135
+ if (strategy)
136
+ blocks.push({ text: strategy, cache: true });
137
+ blocks.push({ text: matchBlock, cache: true });
138
+ blocks.push({ text: dynamicBlock });
139
+ return blocks;
140
+ }
141
+ const validMoves = Array.isArray(rawValidMoves) ? rawValidMoves : [];
142
+ const moveList = validMoves
143
+ .map((m, i) => `${i + 1}. ${JSON.stringify(m)}`)
144
+ .join("\n");
145
+ const matchBlock = `You are playing ${gameName} in a live DeadNet match. A live audience watches.
146
+ You are Player ${yourSide}. Opponent: ${opponentName}.
147
+ ${gameRules ? `\nRULES:\n${gameRules}\n` : ""}
148
+ RESPONSE FORMAT:
149
+ Respond with ONLY a JSON object on a single line:
150
+ {"move": N, "message": "..."}
151
+ N is the number of your chosen move from the list above.
152
+ The message is REQUIRED (max 20 words) — make it dramatic, taunting, or witty. The audience sees it.
153
+ Pick the strategically best move. Respond with ONLY the JSON — no other text.`;
154
+ const dynamicBlock = `Move ${moveNumber}.${opponentLastMessage ? `\n\nOpponent's last message: "${opponentLastMessage}"` : ''}
155
+
156
+ CURRENT BOARD:
157
+ ${boardRender}
158
+
159
+ VALID MOVES:
160
+ ${moveList}`;
161
+ const blocks = [{ text: personality, cache: true }];
162
+ if (strategy)
163
+ blocks.push({ text: strategy, cache: true });
164
+ blocks.push({ text: matchBlock, cache: true });
165
+ blocks.push({ text: dynamicBlock });
166
+ return blocks;
167
+ }
168
+ /** Replace [gif:URL|title] with [gif:"title"] so the LLM gets readable context */
169
+ function humanizeGifTags(text) {
170
+ // Resolved: [gif:https://media.giphy.com/.../giphy.gif|Some Title]
171
+ return text.replace(/\[gif:https?:\/\/[^\]|]+\|([^\]]+)\]/g, '[gif:"$1"]')
172
+ // Unresolved fallback: [gif:SOME_ID]
173
+ .replace(/\[gif:([a-zA-Z0-9]+)\]/g, '[gif:$1]');
174
+ }
175
+ export function buildMessages(state, options) {
176
+ const { history, your_side, topic, match_type, your_position, phase } = state;
177
+ if (!history || history.length === 0) {
178
+ if (match_type === "debate") {
179
+ return [{ role: "user", content: `The debate begins now. Topic: "${topic}". You are arguing ${your_position}. Deliver your opening statement — set your thesis clearly and compellingly.` }];
180
+ }
181
+ else if (match_type === "story") {
182
+ return [{ role: "user", content: `The story begins now. Theme: "${topic}". Write the opening paragraph.` }];
183
+ }
184
+ return [{ role: "user", content: `The conversation begins now. Topic: "${topic}". Make your opening remark.` }];
185
+ }
186
+ // Trim history to the most recent N entries when a window is set.
187
+ // We still anchor the first user message so Claude always has match context.
188
+ const window = options?.contextWindow;
189
+ const trimmedHistory = window && history.length > window ? history.slice(-window) : history;
190
+ const isTrimmed = trimmedHistory !== history;
191
+ const messages = [];
192
+ // When we've trimmed, inject a short anchor so the model knows what came before.
193
+ if (isTrimmed) {
194
+ messages.push({ role: "user", content: `The ${match_type} is underway. Topic: "${topic}". Earlier turns have been omitted — focus on what follows.` });
195
+ }
196
+ for (const turn of trimmedHistory) {
197
+ let role;
198
+ let content = humanizeGifTags(turn.content);
199
+ if (turn.agent === your_side) {
200
+ role = "assistant";
201
+ }
202
+ else {
203
+ role = "user";
204
+ if (turn.agent === "SYSTEM") {
205
+ content = `[AUDIENCE INJECTION]: ${content}`;
206
+ }
207
+ }
208
+ if (messages.length > 0 && messages[messages.length - 1].role === role) {
209
+ messages[messages.length - 1].content += `\n\n${content}`;
210
+ }
211
+ else {
212
+ messages.push({ role, content });
213
+ }
214
+ }
215
+ if (messages[0]?.role === "assistant") {
216
+ messages.unshift({ role: "user", content: `The ${match_type} has begun. Topic: "${topic}".` });
217
+ }
218
+ if (messages[messages.length - 1]?.role === "assistant") {
219
+ if (match_type === "debate" && phase) {
220
+ messages.push({ role: "user", content: DEBATE_PHASE_PROMPTS[phase.name] || "Your turn." });
221
+ }
222
+ else {
223
+ messages.push({ role: "user", content: "Your turn." });
224
+ }
225
+ }
226
+ return messages;
227
+ }
@@ -0,0 +1,66 @@
1
+ export type MatchType = "debate" | "freeform" | "story" | "game" | "random";
2
+ export type Side = "A" | "B";
3
+ export type MatchState = {
4
+ match_id: string;
5
+ status: string;
6
+ match_type: MatchType;
7
+ topic: string;
8
+ your_side: Side;
9
+ your_position?: string;
10
+ opponent: {
11
+ name: string;
12
+ description?: string;
13
+ };
14
+ turn_number: number;
15
+ max_turns: number;
16
+ turns_remaining: number;
17
+ current_turn: Side;
18
+ token_budget_this_turn: number;
19
+ time_remaining_seconds: number;
20
+ score: Record<Side, number>;
21
+ phase?: {
22
+ name: string;
23
+ phase_turn: number;
24
+ phase_total_turns: number;
25
+ };
26
+ history: Array<{
27
+ agent: string;
28
+ content: string;
29
+ }>;
30
+ };
31
+ export type AgentConfig = {
32
+ deadnetToken: string;
33
+ deadnetApi: string;
34
+ matchType: MatchType;
35
+ autoRequeue: boolean;
36
+ provider: "anthropic" | "openai" | "ollama" | "claude-code";
37
+ model: string;
38
+ gameModel: string;
39
+ effort: string;
40
+ gameEffort: string;
41
+ apiKey: string;
42
+ ollamaHost: string;
43
+ personality: string;
44
+ /** Game-only strategy prompt. Empty string = not set. Max ~500 tokens (2000 chars). */
45
+ strategy: string;
46
+ gifs: boolean;
47
+ contextWindow: {
48
+ debate: number;
49
+ freeform: number;
50
+ story: number | undefined;
51
+ game: number | undefined;
52
+ };
53
+ debug: boolean;
54
+ };
55
+ export type GifResult = {
56
+ id: string;
57
+ title: string;
58
+ url: string;
59
+ preview_url: string;
60
+ };
61
+ export type LogEntry = {
62
+ time: string;
63
+ level: "info" | "warn" | "error" | "debug";
64
+ message: string;
65
+ };
66
+ export type AgentPhase = "init" | "connecting" | "queuing" | "waiting" | "playing" | "thinking" | "submitting" | "opponent_turn" | "match_end" | "error" | "exiting";
@@ -0,0 +1 @@
1
+ export {};
package/dist/main.d.ts ADDED
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
package/dist/main.js ADDED
@@ -0,0 +1,44 @@
1
+ #!/usr/bin/env node
2
+ import { jsx as _jsx } from "react/jsx-runtime";
3
+ import { render } from "ink";
4
+ import { App } from "./components/App.js";
5
+ import { PrettyApp } from "./components/PrettyApp.js";
6
+ import { loadConfig, getConfigDir } from "./lib/config.js";
7
+ import { createProvider, createGameProvider } from "./providers/index.js";
8
+ // Parse args
9
+ const args = process.argv.slice(2);
10
+ const flags = args.filter((a) => a.startsWith("--"));
11
+ const positional = args.filter((a) => !a.startsWith("--"));
12
+ const pretty = flags.includes("--pretty") || process.env.PRETTY === "1";
13
+ if (flags.includes("--debug"))
14
+ process.env.DEBUG = "1";
15
+ const agentDir = positional[0];
16
+ const config = loadConfig(agentDir);
17
+ const configDir = agentDir || getConfigDir();
18
+ // Validate required config
19
+ if (!config.deadnetToken) {
20
+ console.error(`Error: DEADNET_TOKEN not set. Add it to ${configDir}/.env`);
21
+ process.exit(1);
22
+ }
23
+ if (config.provider !== "ollama" && !config.apiKey) {
24
+ console.error(`Error: API key not set for provider "${config.provider}". Add it to ${configDir}/.env`);
25
+ process.exit(1);
26
+ }
27
+ const provider = createProvider(config);
28
+ // Only instantiate a separate game provider when the model actually differs
29
+ const gameProvider = config.gameModel !== config.model ? createGameProvider(config) : provider;
30
+ if (pretty) {
31
+ // Clear terminal and take over the full screen
32
+ process.stdout.write("\x1b[2J\x1b[H\x1b[?25l"); // clear + hide cursor
33
+ const instance = render(_jsx(PrettyApp, { config: config, provider: provider, gameProvider: gameProvider }), {
34
+ exitOnCtrlC: false,
35
+ });
36
+ instance.waitUntilExit().then(() => {
37
+ process.stdout.write("\x1b[?25h"); // restore cursor
38
+ });
39
+ }
40
+ else {
41
+ render(_jsx(App, { config: config, provider: provider, gameProvider: gameProvider }), {
42
+ exitOnCtrlC: false,
43
+ });
44
+ }
@@ -0,0 +1,11 @@
1
+ import { type LLMProvider, type SystemBlock, type GenerateResult } from "./base.js";
2
+ export declare class AnthropicProvider implements LLMProvider {
3
+ name: string;
4
+ model: string;
5
+ private client;
6
+ constructor(apiKey: string, model: string);
7
+ generate(system: SystemBlock[], messages: Array<{
8
+ role: "user" | "assistant";
9
+ content: any;
10
+ }>, maxTokens: number): Promise<GenerateResult>;
11
+ }
@@ -0,0 +1,54 @@
1
+ import Anthropic from "@anthropic-ai/sdk";
2
+ export class AnthropicProvider {
3
+ name = "anthropic";
4
+ model;
5
+ client;
6
+ constructor(apiKey, model) {
7
+ this.model = model;
8
+ this.client = new Anthropic({ apiKey });
9
+ }
10
+ async generate(system, messages, maxTokens) {
11
+ // Map SystemBlocks to Anthropic content blocks — cacheable blocks get cache_control.
12
+ const systemBlocks = system.map((block) => block.cache
13
+ ? { type: "text", text: block.text, cache_control: { type: "ephemeral" } }
14
+ : { type: "text", text: block.text });
15
+ // Cache the conversation history prefix: find the last USER message that isn't
16
+ // the final message and mark it. Anthropic only supports cache_control on user
17
+ // content blocks — never on assistant blocks — so we skip backwards until we
18
+ // find a user message. Requires >=1024 tokens at the cache point to be a cache hit.
19
+ let cacheIdx = -1;
20
+ for (let i = messages.length - 2; i >= 0; i--) {
21
+ if (messages[i].role === "user") {
22
+ cacheIdx = i;
23
+ break;
24
+ }
25
+ }
26
+ const processedMessages = messages.map((msg, i) => {
27
+ if (i !== cacheIdx)
28
+ return msg;
29
+ const text = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
30
+ return {
31
+ role: msg.role,
32
+ content: [{ type: "text", text, cache_control: { type: "ephemeral" } }],
33
+ };
34
+ });
35
+ const response = await this.client.messages.create({
36
+ model: this.model,
37
+ max_tokens: maxTokens,
38
+ system: systemBlocks,
39
+ messages: processedMessages,
40
+ });
41
+ const textParts = response.content
42
+ .filter((b) => b.type === "text")
43
+ .map((b) => b.text);
44
+ const usage = response.usage;
45
+ return {
46
+ content: textParts.join("\n").trim(),
47
+ inputTokens: usage.input_tokens,
48
+ outputTokens: usage.output_tokens,
49
+ cacheReadTokens: usage.cache_read_input_tokens ?? 0,
50
+ cacheWriteTokens: usage.cache_creation_input_tokens ?? 0,
51
+ stopReason: response.stop_reason === "max_tokens" ? "truncated" : "done",
52
+ };
53
+ }
54
+ }
@@ -0,0 +1,30 @@
1
+ /**
2
+ * A block of system prompt text.
3
+ * - Anthropic: cache=true adds `cache_control: {type: "ephemeral"}` for explicit prompt caching.
4
+ * - OpenAI: cache=true is ignored — OpenAI auto-prefix-caches prompts ≥1024 tokens.
5
+ * Stable blocks first (cache=true) + dynamic last (cache=false) = maximum prefix cache hits.
6
+ * - Ollama / llama.rn: cache=true blocks are merged into the system message; cache=false blocks
7
+ * are prepended to the first user message so the stable system prefix never changes between
8
+ * turns, enabling KV prefix cache hits. Without this split, dynamic board state in the system
9
+ * message invalidates the cache every move.
10
+ */
11
+ export type SystemBlock = {
12
+ text: string;
13
+ cache?: boolean;
14
+ };
15
+ export type GenerateResult = {
16
+ content: string;
17
+ inputTokens: number;
18
+ outputTokens: number;
19
+ cacheReadTokens: number;
20
+ cacheWriteTokens: number;
21
+ stopReason: "done" | "truncated";
22
+ };
23
+ export interface LLMProvider {
24
+ name: string;
25
+ model: string;
26
+ generate(system: SystemBlock[], messages: Array<{
27
+ role: "user" | "assistant";
28
+ content: any;
29
+ }>, maxTokens: number): Promise<GenerateResult>;
30
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,21 @@
1
+ import { type LLMProvider, type SystemBlock, type GenerateResult } from "./base.js";
2
+ /**
3
+ * Runs `claude -p` as a subprocess. No API key needed — auth flows through
4
+ * Claude Code's own credentials (`claude auth login`).
5
+ *
6
+ * Limitations vs direct API providers:
7
+ * - No hard maxTokens enforcement (we add a soft hint in the prompt; the DeadNet
8
+ * backend still enforces the budget and the engine handles over_token_limit retries).
9
+ * - No prompt caching metrics (cacheReadTokens / cacheWriteTokens always 0).
10
+ * - ~1–2s subprocess startup cost per turn (fine for 60–90s turn windows).
11
+ */
12
+ export declare class ClaudeCodeProvider implements LLMProvider {
13
+ name: string;
14
+ model: string;
15
+ private effort;
16
+ constructor(model: string, effort: string);
17
+ generate(system: SystemBlock[], messages: Array<{
18
+ role: "user" | "assistant";
19
+ content: any;
20
+ }>, maxTokens: number): Promise<GenerateResult>;
21
+ }
@@ -0,0 +1,103 @@
1
+ import { spawn } from "child_process";
2
+ /**
3
+ * Runs `claude -p` as a subprocess. No API key needed — auth flows through
4
+ * Claude Code's own credentials (`claude auth login`).
5
+ *
6
+ * Limitations vs direct API providers:
7
+ * - No hard maxTokens enforcement (we add a soft hint in the prompt; the DeadNet
8
+ * backend still enforces the budget and the engine handles over_token_limit retries).
9
+ * - No prompt caching metrics (cacheReadTokens / cacheWriteTokens always 0).
10
+ * - ~1–2s subprocess startup cost per turn (fine for 60–90s turn windows).
11
+ */
12
+ export class ClaudeCodeProvider {
13
+ name = "claude-code";
14
+ model;
15
+ effort;
16
+ constructor(model, effort) {
17
+ this.model = model;
18
+ this.effort = effort;
19
+ }
20
+ async generate(system, messages, maxTokens) {
21
+ const systemText = system.map((b) => b.text).join("\n\n");
22
+ const prompt = buildPrompt(messages, maxTokens);
23
+ const args = [
24
+ "-p", prompt,
25
+ "--system-prompt", systemText,
26
+ "--model", this.model,
27
+ "--effort", this.effort,
28
+ "--output-format", "json",
29
+ "--bare", // skip local hooks / MCP / skills
30
+ "--no-session-persistence", // stateless — we own the history
31
+ ];
32
+ const stdout = await runClaude(args);
33
+ let parsed;
34
+ try {
35
+ parsed = JSON.parse(stdout);
36
+ }
37
+ catch {
38
+ throw new Error(`claude -p returned non-JSON output: ${stdout.slice(0, 200)}`);
39
+ }
40
+ const content = (parsed.result ?? "").trim();
41
+ const usage = parsed.usage ?? {};
42
+ return {
43
+ content,
44
+ inputTokens: usage.input_tokens ?? 0,
45
+ outputTokens: usage.output_tokens ?? 0,
46
+ cacheReadTokens: 0,
47
+ cacheWriteTokens: 0,
48
+ stopReason: "done",
49
+ };
50
+ }
51
+ }
52
+ /**
53
+ * Serialise the structured message array into a flat prompt string.
54
+ * The last message in the array is the current user turn (the instruction to respond).
55
+ * All prior messages are formatted as a conversation transcript above it.
56
+ */
57
+ function buildPrompt(messages, maxTokens) {
58
+ const toString = (content) => typeof content === "string" ? content : JSON.stringify(content);
59
+ const parts = [];
60
+ // Conversation history (everything except the final user message)
61
+ const history = messages.slice(0, -1);
62
+ if (history.length > 0) {
63
+ parts.push("[Conversation so far]");
64
+ for (const msg of history) {
65
+ const label = msg.role === "assistant" ? "You" : "Opponent";
66
+ parts.push(`${label}: ${toString(msg.content)}`);
67
+ }
68
+ parts.push("");
69
+ }
70
+ // Current turn instruction (the final user message)
71
+ const last = messages.at(-1);
72
+ if (last) {
73
+ parts.push(toString(last.content));
74
+ }
75
+ // Soft token hint — the backend enforces the hard limit; this nudges the model.
76
+ parts.push(`\n(Keep your response under ${maxTokens} tokens.)`);
77
+ return parts.join("\n");
78
+ }
79
+ function runClaude(args) {
80
+ return new Promise((resolve, reject) => {
81
+ const proc = spawn("claude", args, { stdio: ["ignore", "pipe", "pipe"] });
82
+ let stdout = "";
83
+ let stderr = "";
84
+ proc.stdout.on("data", (chunk) => { stdout += chunk.toString(); });
85
+ proc.stderr.on("data", (chunk) => { stderr += chunk.toString(); });
86
+ proc.on("error", (err) => {
87
+ if (err.code === "ENOENT") {
88
+ reject(new Error("claude not found in PATH — install Claude Code: https://claude.ai/code"));
89
+ }
90
+ else {
91
+ reject(err);
92
+ }
93
+ });
94
+ proc.on("close", (code) => {
95
+ if (code !== 0) {
96
+ reject(new Error(`claude exited with code ${code}: ${stderr.slice(0, 300)}`));
97
+ }
98
+ else {
99
+ resolve(stdout);
100
+ }
101
+ });
102
+ });
103
+ }
@@ -0,0 +1,5 @@
1
+ import type { AgentConfig } from "../lib/types.js";
2
+ import type { LLMProvider } from "./base.js";
3
+ export declare function createProvider(config: AgentConfig): LLMProvider;
4
+ export declare function createGameProvider(config: AgentConfig): LLMProvider;
5
+ export type { LLMProvider, GenerateResult } from "./base.js";
@@ -0,0 +1,28 @@
1
+ import { AnthropicProvider } from "./anthropic.js";
2
+ import { OpenAIProvider } from "./openai.js";
3
+ import { OllamaProvider } from "./ollama.js";
4
+ import { ClaudeCodeProvider } from "./claude-code.js";
5
+ export function createProvider(config) {
6
+ return createProviderForModel(config, config.model, config.effort);
7
+ }
8
+ export function createGameProvider(config) {
9
+ return createProviderForModel(config, config.gameModel, config.gameEffort);
10
+ }
11
+ function createProviderForModel(config, model, effort) {
12
+ switch (config.provider) {
13
+ case "anthropic":
14
+ if (!config.apiKey)
15
+ throw new Error("ANTHROPIC_API_KEY is required");
16
+ return new AnthropicProvider(config.apiKey, model);
17
+ case "openai":
18
+ if (!config.apiKey)
19
+ throw new Error("OPENAI_API_KEY is required");
20
+ return new OpenAIProvider(config.apiKey, model);
21
+ case "ollama":
22
+ return new OllamaProvider(config.ollamaHost, model);
23
+ case "claude-code":
24
+ return new ClaudeCodeProvider(model, effort);
25
+ default:
26
+ throw new Error(`Unknown provider: ${config.provider}`);
27
+ }
28
+ }
@@ -0,0 +1,11 @@
1
+ import { type LLMProvider, type SystemBlock, type GenerateResult } from "./base.js";
2
+ export declare class OllamaProvider implements LLMProvider {
3
+ name: string;
4
+ model: string;
5
+ private host;
6
+ constructor(host: string, model: string);
7
+ generate(system: SystemBlock[], messages: Array<{
8
+ role: "user" | "assistant";
9
+ content: any;
10
+ }>, maxTokens: number): Promise<GenerateResult>;
11
+ }