claude-glm 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,159 @@
1
+ // Main Fastify server that routes requests by provider prefix
2
+ import Fastify from "fastify";
3
+ import { parseProviderModel, warnIfTools } from "./map.js";
4
+ import type { AnthropicRequest, ProviderModel } from "./types.js";
5
+ import { chatOpenAI } from "./providers/openai.js";
6
+ import { chatOpenRouter } from "./providers/openrouter.js";
7
+ import { chatGemini } from "./providers/gemini.js";
8
+ import { passThrough } from "./providers/anthropic-pass.js";
9
+ import { config } from "dotenv";
10
+ import { join } from "path";
11
+ import { homedir } from "os";
12
+
13
+ // Load .env from ~/.claude-proxy/.env
14
+ const envPath = join(homedir(), ".claude-proxy", ".env");
15
+ config({ path: envPath });
16
+
17
+ const PORT = Number(process.env.CLAUDE_PROXY_PORT || 17870);
18
+
19
+ let active: ProviderModel | null = null;
20
+
21
+ const fastify = Fastify({ logger: false });
22
+
23
+ // Health check endpoint
24
+ fastify.get("/healthz", async () => ({
25
+ ok: true,
26
+ active: active ?? { provider: "glm", model: "auto" }
27
+ }));
28
+
29
+ // Status endpoint (shows current active provider/model)
30
+ fastify.get("/_status", async () => {
31
+ return active ?? { provider: "glm", model: "glm-4.6" };
32
+ });
33
+
34
+ // Main messages endpoint - routes by model prefix
35
+ fastify.post("/v1/messages", async (req, res) => {
36
+ try {
37
+ const body = req.body as AnthropicRequest;
38
+ const defaults = active ?? undefined;
39
+ const { provider, model } = parseProviderModel(body.model, defaults);
40
+
41
+ // Log every request for debugging
42
+ const tools = body.tools?.map((t: any) => t.name).join(",") || "none";
43
+ const hasSystem = !!body.system;
44
+ const msgCount = body.messages?.length || 0;
45
+ console.log(`[ccx] REQUEST: model="${body.model}" → provider="${provider}" model="${model}" | tools=[${tools}] system=${hasSystem} messages=${msgCount}`);
46
+
47
+ // Warn if using tools with providers that may not support them
48
+ warnIfTools(body, provider);
49
+
50
+ active = { provider, model };
51
+
52
+ // Validate API keys BEFORE setting headers
53
+ if (provider === "openai") {
54
+ const key = process.env.OPENAI_API_KEY;
55
+ if (!key) {
56
+ throw apiError(401, "OPENAI_API_KEY not set in ~/.claude-proxy/.env");
57
+ }
58
+ // Set headers only after validation
59
+ res.raw.setHeader("Content-Type", "text/event-stream");
60
+ res.raw.setHeader("Cache-Control", "no-cache, no-transform");
61
+ res.raw.setHeader("Connection", "keep-alive");
62
+ // @ts-ignore
63
+ res.raw.flushHeaders?.();
64
+ return chatOpenAI(res, body, model, key);
65
+ }
66
+
67
+ if (provider === "openrouter") {
68
+ const key = process.env.OPENROUTER_API_KEY;
69
+ if (!key) {
70
+ throw apiError(401, "OPENROUTER_API_KEY not set in ~/.claude-proxy/.env");
71
+ }
72
+ res.raw.setHeader("Content-Type", "text/event-stream");
73
+ res.raw.setHeader("Cache-Control", "no-cache, no-transform");
74
+ res.raw.setHeader("Connection", "keep-alive");
75
+ // @ts-ignore
76
+ res.raw.flushHeaders?.();
77
+ return chatOpenRouter(res, body, model, key);
78
+ }
79
+
80
+ if (provider === "gemini") {
81
+ const key = process.env.GEMINI_API_KEY;
82
+ if (!key) {
83
+ throw apiError(401, "GEMINI_API_KEY not set in ~/.claude-proxy/.env");
84
+ }
85
+ res.raw.setHeader("Content-Type", "text/event-stream");
86
+ res.raw.setHeader("Cache-Control", "no-cache, no-transform");
87
+ res.raw.setHeader("Connection", "keep-alive");
88
+ // @ts-ignore
89
+ res.raw.flushHeaders?.();
90
+ return chatGemini(res, body, model, key);
91
+ }
92
+
93
+ if (provider === "anthropic") {
94
+ const base = process.env.ANTHROPIC_UPSTREAM_URL;
95
+ const key = process.env.ANTHROPIC_API_KEY;
96
+ if (!base || !key) {
97
+ throw apiError(
98
+ 500,
99
+ "ANTHROPIC_UPSTREAM_URL and ANTHROPIC_API_KEY not set in ~/.claude-proxy/.env"
100
+ );
101
+ }
102
+ // Don't set headers here - passThrough will do it after validation
103
+ return passThrough({
104
+ res,
105
+ body,
106
+ model,
107
+ baseUrl: base,
108
+ headers: {
109
+ "Content-Type": "application/json",
110
+ "x-api-key": key,
111
+ "anthropic-version": process.env.ANTHROPIC_VERSION || "2023-06-01"
112
+ }
113
+ });
114
+ }
115
+
116
+ // Default: glm (Z.AI)
117
+ const glmBase = process.env.GLM_UPSTREAM_URL;
118
+ const glmKey = process.env.ZAI_API_KEY || process.env.GLM_API_KEY;
119
+ if (!glmBase || !glmKey) {
120
+ throw apiError(
121
+ 500,
122
+ "GLM_UPSTREAM_URL and ZAI_API_KEY not set in ~/.claude-proxy/.env. Run: ccx --setup"
123
+ );
124
+ }
125
+ // Don't set headers here - passThrough will do it after validation
126
+ return passThrough({
127
+ res,
128
+ body,
129
+ model,
130
+ baseUrl: glmBase,
131
+ headers: {
132
+ "Content-Type": "application/json",
133
+ Authorization: `Bearer ${glmKey}`,
134
+ "anthropic-version": process.env.ANTHROPIC_VERSION || "2023-06-01"
135
+ }
136
+ });
137
+ } catch (e: any) {
138
+ const status = e?.statusCode ?? 500;
139
+ return res.code(status).send({ error: e?.message || "proxy error" });
140
+ }
141
+ });
142
+
143
+ function apiError(status: number, message: string) {
144
+ const e = new Error(message);
145
+ // @ts-ignore
146
+ e.statusCode = status;
147
+ return e;
148
+ }
149
+
150
+ fastify
151
+ .listen({ port: PORT, host: "127.0.0.1" })
152
+ .then(() => {
153
+ console.log(`[ccx] Proxy listening on http://127.0.0.1:${PORT}`);
154
+ console.log(`[ccx] Configure API keys in: ${envPath}`);
155
+ })
156
+ .catch((err) => {
157
+ console.error("[ccx] Failed to start proxy:", err.message);
158
+ process.exit(1);
159
+ });
@@ -0,0 +1,106 @@
1
+ // Provider parsing and message mapping utilities
2
+ import { AnthropicMessage, AnthropicRequest, ProviderKey, ProviderModel } from "./types.js";
3
+
4
+ const PROVIDER_PREFIXES: ProviderKey[] = ["openai", "openrouter", "gemini", "glm", "anthropic"];
5
+
6
+ // Model shortcuts - add your own aliases here
7
+ const MODEL_SHORTCUTS: Record<string, string> = {
8
+ // GLM shortcuts
9
+ "g": "glm:glm-4.7",
10
+ "glm": "glm:glm-4.7",
11
+ "glm47": "glm:glm-4.7",
12
+ "glm45": "glm:glm-4.5",
13
+ "flash": "glm:glm-4-flash",
14
+ // Claude shortcuts (for API users)
15
+ "opus": "anthropic:claude-opus-4-5-20251101",
16
+ "sonnet": "anthropic:claude-sonnet-4-5-20250929",
17
+ "haiku": "anthropic:claude-haiku-4-5-20251001",
18
+ // Add more shortcuts as needed
19
+ };
20
+
21
+ /**
22
+ * Parse provider and model from the model field
23
+ * Supports formats: "provider:model" or "provider/model"
24
+ * Falls back to defaults if no valid prefix found
25
+ */
26
+ export function parseProviderModel(modelField: string, defaults?: ProviderModel): ProviderModel {
27
+ if (!modelField) {
28
+ if (defaults) return defaults;
29
+ throw new Error("Missing 'model' in request");
30
+ }
31
+
32
+ // Expand shortcuts first
33
+ const expanded = MODEL_SHORTCUTS[modelField.toLowerCase()] || modelField;
34
+
35
+ // Auto-detect Claude models (start with "claude-") and route to anthropic
36
+ if (expanded.toLowerCase().startsWith("claude-")) {
37
+ return { provider: "anthropic", model: expanded };
38
+ }
39
+
40
+ const sep = expanded.includes(":") ? ":" : expanded.includes("/") ? "/" : null;
41
+ if (!sep) {
42
+ // no prefix: fall back to defaults or assume glm as legacy
43
+ return defaults ?? { provider: "glm", model: expanded };
44
+ }
45
+
46
+ const [maybeProv, ...rest] = expanded.split(sep);
47
+ const prov = maybeProv.toLowerCase() as ProviderKey;
48
+
49
+ if (!PROVIDER_PREFIXES.includes(prov)) {
50
+ // unrecognized prefix -> use defaults or treat full string as model
51
+ return defaults ?? { provider: "glm", model: expanded };
52
+ }
53
+
54
+ return { provider: prov, model: rest.join(sep) };
55
+ }
56
+
57
+ /**
58
+ * Warn if tools are being used with providers that may not support them
59
+ */
60
+ export function warnIfTools(req: AnthropicRequest, provider: ProviderKey): void {
61
+ if (req.tools && req.tools.length > 0) {
62
+ // Only GLM and Anthropic support tools natively
63
+ if (provider !== "glm" && provider !== "anthropic") {
64
+ console.warn(`[proxy] Warning: ${provider} may not fully support Anthropic-style tools. Passing through anyway.`);
65
+ }
66
+ }
67
+ }
68
+
69
+ /**
70
+ * Convert Anthropic content to plain text
71
+ */
72
+ export function toPlainText(content: AnthropicMessage["content"]): string {
73
+ if (typeof content === "string") return content;
74
+ return content
75
+ .map((c) => {
76
+ if (typeof c === "string") return c;
77
+ if (c.type === "text") return c.text;
78
+ if (c.type === "tool_result") {
79
+ // Convert tool results to text representation
80
+ if (typeof c.content === "string") return c.content;
81
+ return JSON.stringify(c.content);
82
+ }
83
+ return "";
84
+ })
85
+ .join("");
86
+ }
87
+
88
+ /**
89
+ * Convert Anthropic messages to OpenAI format
90
+ */
91
+ export function toOpenAIMessages(messages: AnthropicMessage[]) {
92
+ return messages.map((m) => ({
93
+ role: m.role,
94
+ content: toPlainText(m.content)
95
+ }));
96
+ }
97
+
98
+ /**
99
+ * Convert Anthropic messages to Gemini format
100
+ */
101
+ export function toGeminiContents(messages: AnthropicMessage[]) {
102
+ return messages.map((m) => ({
103
+ role: m.role === "assistant" ? "model" : "user",
104
+ parts: [{ text: toPlainText(m.content) }]
105
+ }));
106
+ }
@@ -0,0 +1,66 @@
1
+ // Pass-through adapter for Anthropic-compatible upstreams (Anthropic API and Z.AI GLM)
2
+ import { FastifyReply } from "fastify";
3
+
4
+ type PassArgs = {
5
+ res: FastifyReply;
6
+ body: any;
7
+ model: string;
8
+ baseUrl: string;
9
+ headers: Record<string, string>;
10
+ };
11
+
12
+ /**
13
+ * Pass through requests to Anthropic-compatible APIs
14
+ * This works for both:
15
+ * - Anthropic's official API
16
+ * - Z.AI's GLM API (Anthropic-compatible)
17
+ */
18
+ export async function passThrough({ res, body, model, baseUrl, headers }: PassArgs) {
19
+ const url = `${stripEndSlash(baseUrl)}/v1/messages`;
20
+
21
+ // Replace model with parsed model name (strips provider prefix like "glm:" or "anthropic:")
22
+ body.model = model;
23
+ // Ensure stream is true for Claude Code UX
24
+ body.stream = true;
25
+
26
+ const resp = await fetch(url, {
27
+ method: "POST",
28
+ headers,
29
+ body: JSON.stringify(body)
30
+ });
31
+
32
+ if (!resp.ok || !resp.body) {
33
+ const text = await safeText(resp);
34
+ const err = new Error(`Upstream error (${resp.status}): ${text}`);
35
+ // @ts-ignore
36
+ err.statusCode = resp.status || 502;
37
+ throw err;
38
+ }
39
+
40
+ // Pipe upstream SSE as-is (already in Anthropic format)
41
+ res.raw.setHeader("Content-Type", "text/event-stream");
42
+ res.raw.setHeader("Cache-Control", "no-cache, no-transform");
43
+ res.raw.setHeader("Connection", "keep-alive");
44
+ // @ts-ignore
45
+ res.raw.flushHeaders?.();
46
+
47
+ const reader = resp.body.getReader();
48
+ while (true) {
49
+ const { value, done } = await reader.read();
50
+ if (done) break;
51
+ res.raw.write(value);
52
+ }
53
+ res.raw.end();
54
+ }
55
+
56
+ function stripEndSlash(s: string) {
57
+ return s.endsWith("/") ? s.slice(0, -1) : s;
58
+ }
59
+
60
+ async function safeText(resp: Response) {
61
+ try {
62
+ return await resp.text();
63
+ } catch {
64
+ return "<no-body>";
65
+ }
66
+ }
@@ -0,0 +1,89 @@
1
+ // Gemini adapter using streamGenerateContent (SSE)
2
+ import { FastifyReply } from "fastify";
3
+ import { createParser } from "eventsource-parser";
4
+ import { deltaText, startAnthropicMessage, stopAnthropicMessage } from "../sse.js";
5
+ import { toGeminiContents } from "../map.js";
6
+ import type { AnthropicRequest } from "../types.js";
7
+
8
+ const G_BASE = process.env.GEMINI_BASE_URL || "https://generativelanguage.googleapis.com/v1beta";
9
+
10
+ export async function chatGemini(
11
+ res: FastifyReply,
12
+ body: AnthropicRequest,
13
+ model: string,
14
+ apiKey?: string
15
+ ) {
16
+ if (!apiKey) {
17
+ throw withStatus(401, "Missing GEMINI_API_KEY. Set it in ~/.claude-proxy/.env");
18
+ }
19
+
20
+ const url = `${G_BASE}/models/${encodeURIComponent(model)}:streamGenerateContent?alt=sse&key=${apiKey}`;
21
+
22
+ const reqBody: any = {
23
+ contents: toGeminiContents(body.messages),
24
+ generationConfig: {
25
+ temperature: body.temperature ?? 0.7,
26
+ maxOutputTokens: body.max_tokens
27
+ }
28
+ };
29
+
30
+ // Note: Gemini has different tool format, just warn for now
31
+ if (body.tools && body.tools.length > 0) {
32
+ console.warn("[gemini] Tools not yet adapted to Gemini format, skipping");
33
+ }
34
+
35
+ const resp = await fetch(url, {
36
+ method: "POST",
37
+ headers: { "Content-Type": "application/json" },
38
+ body: JSON.stringify(reqBody)
39
+ });
40
+
41
+ if (!resp.ok || !resp.body) {
42
+ const text = await safeText(resp);
43
+ throw withStatus(resp.status || 500, `Gemini error: ${text}`);
44
+ }
45
+
46
+ startAnthropicMessage(res, model);
47
+
48
+ const reader = resp.body.getReader();
49
+ const decoder = new TextDecoder();
50
+ const parser = createParser((event) => {
51
+ if (event.type !== "event") return;
52
+ const data = event.data;
53
+ if (!data) return;
54
+ try {
55
+ const json = JSON.parse(data);
56
+ // Gemini response: candidates[0].content.parts[].text
57
+ const text =
58
+ json?.candidates?.[0]?.content?.parts
59
+ ?.map((p: any) => p?.text || "")
60
+ .join("") || "";
61
+ if (text) deltaText(res, text);
62
+ } catch {
63
+ // ignore parse errors
64
+ }
65
+ });
66
+
67
+ while (true) {
68
+ const { value, done } = await reader.read();
69
+ if (done) break;
70
+ parser.feed(decoder.decode(value));
71
+ }
72
+
73
+ stopAnthropicMessage(res);
74
+ }
75
+
76
+ function withStatus(status: number, message: string) {
77
+ const e = new Error(message);
78
+ // @ts-ignore
79
+ e.statusCode = status;
80
+ return e;
81
+ }
82
+
83
+ async function safeText(resp: Response) {
84
+ try {
85
+ return await resp.text();
86
+ } catch {
87
+ return "<no-body>";
88
+ }
89
+ }
@@ -0,0 +1,90 @@
1
+ // OpenAI adapter using chat.completions with SSE streaming
2
+ import { FastifyReply } from "fastify";
3
+ import { createParser } from "eventsource-parser";
4
+ import { deltaText, startAnthropicMessage, stopAnthropicMessage } from "../sse.js";
5
+ import { toOpenAIMessages } from "../map.js";
6
+ import type { AnthropicRequest } from "../types.js";
7
+
8
+ const OPENAI_BASE = process.env.OPENAI_BASE_URL || "https://api.openai.com/v1";
9
+
10
+ export async function chatOpenAI(
11
+ res: FastifyReply,
12
+ body: AnthropicRequest,
13
+ model: string,
14
+ apiKey?: string
15
+ ) {
16
+ if (!apiKey) {
17
+ throw withStatus(401, "Missing OPENAI_API_KEY. Set it in ~/.claude-proxy/.env");
18
+ }
19
+
20
+ const url = `${OPENAI_BASE}/chat/completions`;
21
+
22
+ const oaiBody: any = {
23
+ model,
24
+ messages: toOpenAIMessages(body.messages),
25
+ stream: true,
26
+ temperature: body.temperature ?? 0.7,
27
+ max_tokens: body.max_tokens
28
+ };
29
+
30
+ // Pass through tools if provided (note: OpenAI format may differ)
31
+ if (body.tools && body.tools.length > 0) {
32
+ console.warn("[openai] Tools passed through but format may not be compatible");
33
+ oaiBody.tools = body.tools;
34
+ }
35
+
36
+ const resp = await fetch(url, {
37
+ method: "POST",
38
+ headers: {
39
+ Authorization: `Bearer ${apiKey}`,
40
+ "Content-Type": "application/json"
41
+ },
42
+ body: JSON.stringify(oaiBody)
43
+ });
44
+
45
+ if (!resp.ok || !resp.body) {
46
+ const text = await safeText(resp);
47
+ throw withStatus(resp.status || 500, `OpenAI error: ${text}`);
48
+ }
49
+
50
+ // Emit Anthropic SSE start events
51
+ startAnthropicMessage(res, model);
52
+
53
+ const reader = resp.body.getReader();
54
+ const decoder = new TextDecoder();
55
+ const parser = createParser((event) => {
56
+ if (event.type !== "event") return;
57
+ const data = event.data;
58
+ if (!data || data === "[DONE]") return;
59
+ try {
60
+ const json = JSON.parse(data);
61
+ const chunk = json.choices?.[0]?.delta?.content ?? "";
62
+ if (chunk) deltaText(res, chunk);
63
+ } catch {
64
+ // ignore parse errors on keepalives, etc.
65
+ }
66
+ });
67
+
68
+ while (true) {
69
+ const { value, done } = await reader.read();
70
+ if (done) break;
71
+ parser.feed(decoder.decode(value));
72
+ }
73
+
74
+ stopAnthropicMessage(res);
75
+ }
76
+
77
+ function withStatus(status: number, message: string) {
78
+ const e = new Error(message);
79
+ // @ts-ignore
80
+ e.statusCode = status;
81
+ return e;
82
+ }
83
+
84
+ async function safeText(resp: Response) {
85
+ try {
86
+ return await resp.text();
87
+ } catch {
88
+ return "<no-body>";
89
+ }
90
+ }
@@ -0,0 +1,98 @@
1
+ // OpenRouter adapter (OpenAI-compatible API)
2
+ import { FastifyReply } from "fastify";
3
+ import { createParser } from "eventsource-parser";
4
+ import { deltaText, startAnthropicMessage, stopAnthropicMessage } from "../sse.js";
5
+ import { toOpenAIMessages } from "../map.js";
6
+ import type { AnthropicRequest } from "../types.js";
7
+
8
+ const OR_BASE = process.env.OPENROUTER_BASE_URL || "https://openrouter.ai/api/v1";
9
+
10
+ export async function chatOpenRouter(
11
+ res: FastifyReply,
12
+ body: AnthropicRequest,
13
+ model: string,
14
+ apiKey?: string
15
+ ) {
16
+ if (!apiKey) {
17
+ throw withStatus(401, "Missing OPENROUTER_API_KEY. Set it in ~/.claude-proxy/.env");
18
+ }
19
+
20
+ const url = `${OR_BASE}/chat/completions`;
21
+ const headers: Record<string, string> = {
22
+ Authorization: `Bearer ${apiKey}`,
23
+ "Content-Type": "application/json"
24
+ };
25
+
26
+ // Add optional OpenRouter headers
27
+ if (process.env.OPENROUTER_REFERER) {
28
+ headers["HTTP-Referer"] = process.env.OPENROUTER_REFERER;
29
+ }
30
+ if (process.env.OPENROUTER_TITLE) {
31
+ headers["X-Title"] = process.env.OPENROUTER_TITLE;
32
+ }
33
+
34
+ const reqBody: any = {
35
+ model,
36
+ messages: toOpenAIMessages(body.messages),
37
+ stream: true,
38
+ temperature: body.temperature ?? 0.7,
39
+ max_tokens: body.max_tokens
40
+ };
41
+
42
+ // Pass through tools if provided
43
+ if (body.tools && body.tools.length > 0) {
44
+ console.warn("[openrouter] Tools passed through but format may not be compatible");
45
+ reqBody.tools = body.tools;
46
+ }
47
+
48
+ const resp = await fetch(url, {
49
+ method: "POST",
50
+ headers,
51
+ body: JSON.stringify(reqBody)
52
+ });
53
+
54
+ if (!resp.ok || !resp.body) {
55
+ const text = await safeText(resp);
56
+ throw withStatus(resp.status || 500, `OpenRouter error: ${text}`);
57
+ }
58
+
59
+ startAnthropicMessage(res, model);
60
+
61
+ const reader = resp.body.getReader();
62
+ const decoder = new TextDecoder();
63
+ const parser = createParser((event) => {
64
+ if (event.type !== "event") return;
65
+ const data = event.data;
66
+ if (!data || data === "[DONE]") return;
67
+ try {
68
+ const json = JSON.parse(data);
69
+ const chunk = json.choices?.[0]?.delta?.content ?? "";
70
+ if (chunk) deltaText(res, chunk);
71
+ } catch {
72
+ // ignore parse errors
73
+ }
74
+ });
75
+
76
+ while (true) {
77
+ const { value, done } = await reader.read();
78
+ if (done) break;
79
+ parser.feed(decoder.decode(value));
80
+ }
81
+
82
+ stopAnthropicMessage(res);
83
+ }
84
+
85
+ function withStatus(status: number, message: string) {
86
+ const e = new Error(message);
87
+ // @ts-ignore
88
+ e.statusCode = status;
89
+ return e;
90
+ }
91
+
92
+ async function safeText(resp: Response) {
93
+ try {
94
+ return await resp.text();
95
+ } catch {
96
+ return "<no-body>";
97
+ }
98
+ }
@@ -0,0 +1,62 @@
1
+ // Server-Sent Events (SSE) utilities for Anthropic-style streaming
2
+ import type { FastifyReply } from "fastify";
3
+
4
+ export function initSSE(res: FastifyReply) {
5
+ res.raw.setHeader("Content-Type", "text/event-stream");
6
+ res.raw.setHeader("Cache-Control", "no-cache, no-transform");
7
+ res.raw.setHeader("Connection", "keep-alive");
8
+ // @ts-ignore
9
+ res.raw.flushHeaders?.();
10
+ }
11
+
12
+ export function sendEvent(res: FastifyReply, event: string, data: unknown) {
13
+ res.raw.write(`event: ${event}\n`);
14
+ res.raw.write(`data: ${JSON.stringify(data)}\n\n`);
15
+ }
16
+
17
+ export function endSSE(res: FastifyReply) {
18
+ res.raw.write("event: done\n");
19
+ res.raw.write("data: {}\n\n");
20
+ res.raw.end();
21
+ }
22
+
23
+ export function startAnthropicMessage(res: FastifyReply, model: string) {
24
+ const id = `msg_${Date.now()}`;
25
+ sendEvent(res, "message_start", {
26
+ type: "message_start",
27
+ message: {
28
+ id,
29
+ type: "message",
30
+ role: "assistant",
31
+ model,
32
+ content: [],
33
+ stop_reason: null,
34
+ stop_sequence: null,
35
+ usage: { input_tokens: 0, output_tokens: 0 }
36
+ }
37
+ });
38
+ sendEvent(res, "content_block_start", {
39
+ type: "content_block_start",
40
+ index: 0,
41
+ content_block: { type: "text", text: "" }
42
+ });
43
+ }
44
+
45
+ export function deltaText(res: FastifyReply, text: string) {
46
+ if (!text) return;
47
+ sendEvent(res, "content_block_delta", {
48
+ type: "content_block_delta",
49
+ index: 0,
50
+ delta: { type: "text_delta", text }
51
+ });
52
+ }
53
+
54
+ export function stopAnthropicMessage(res: FastifyReply) {
55
+ sendEvent(res, "content_block_stop", { type: "content_block_stop", index: 0 });
56
+ sendEvent(res, "message_delta", {
57
+ type: "message_delta",
58
+ delta: { stop_reason: "end_turn", stop_sequence: null },
59
+ usage: { output_tokens: 0 }
60
+ });
61
+ sendEvent(res, "message_stop", { type: "message_stop" });
62
+ }