zencefyl 0.2.4 → 0.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,62 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/auth/credentials.ts
4
+ import fs from "fs";
5
+ import path from "path";
6
+ function credentialsPath(dataDir) {
7
+ return path.join(dataDir, "credentials.json");
8
+ }
9
+ function loadCredentials(dataDir) {
10
+ const p = credentialsPath(dataDir);
11
+ if (!fs.existsSync(p)) return {};
12
+ try {
13
+ return JSON.parse(fs.readFileSync(p, "utf8"));
14
+ } catch {
15
+ return {};
16
+ }
17
+ }
18
+ function saveCredentials(dataDir, store) {
19
+ fs.writeFileSync(credentialsPath(dataDir), JSON.stringify(store, null, 2), "utf8");
20
+ }
21
+ function saveProviderCredentials(dataDir, providerId, creds) {
22
+ const store = loadCredentials(dataDir);
23
+ store[providerId] = creds;
24
+ saveCredentials(dataDir, store);
25
+ }
26
+ var REFRESH_BUFFER_MS = 6e4;
27
+ function isExpired(creds) {
28
+ return Date.now() >= creds.expires - REFRESH_BUFFER_MS;
29
+ }
30
+ async function getAccessToken(dataDir, providerId) {
31
+ const store = loadCredentials(dataDir);
32
+ const creds = store[providerId];
33
+ if (!creds) {
34
+ throw new Error(
35
+ `No credentials for ${providerId}. Run zencefyl setup and choose the subscription option.`
36
+ );
37
+ }
38
+ if (!isExpired(creds)) {
39
+ return creds.access;
40
+ }
41
+ let refreshed;
42
+ if (providerId === "openai-subscription") {
43
+ const { refreshOpenAICodexToken } = await import("@mariozechner/pi-ai/oauth");
44
+ refreshed = await refreshOpenAICodexToken(creds.refresh);
45
+ } else {
46
+ const { refreshGoogleCloudToken } = await import("@mariozechner/pi-ai/oauth");
47
+ const projectId = creds.projectId ?? "";
48
+ refreshed = await refreshGoogleCloudToken(creds.refresh, projectId);
49
+ refreshed.projectId = projectId;
50
+ }
51
+ saveProviderCredentials(dataDir, providerId, refreshed);
52
+ return refreshed.access;
53
+ }
54
+
55
+ export {
56
+ credentialsPath,
57
+ loadCredentials,
58
+ saveCredentials,
59
+ saveProviderCredentials,
60
+ getAccessToken
61
+ };
62
+ //# sourceMappingURL=chunk-OBDSCGMH.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/auth/credentials.ts"],"sourcesContent":["// Credential storage and refresh for OAuth providers.\n//\n// Credentials are stored in ~/.zencefyl/credentials.json — separate from\n// config.json so they are never accidentally committed to git.\n//\n// Structure:\n// {\n// \"openai-subscription\": { access: \"...\", refresh: \"...\", expires: 1234567890 },\n// \"gemini-subscription\": { access: \"...\", refresh: \"...\", expires: 1234567890, projectId: \"...\" }\n// }\n\nimport fs from 'node:fs'\nimport path from 'node:path'\nimport type { OAuthCredentials } from '@mariozechner/pi-ai/oauth'\n\n// ── Types ─────────────────────────────────────────────────────────────────────\n\nexport type ProviderId = 'openai-subscription' | 'gemini-subscription'\n\n// Gemini credentials carry a projectId needed by refreshGoogleCloudToken.\nexport type GeminiCredentials = OAuthCredentials & { projectId?: string }\n\ntype CredentialsStore = {\n 'openai-subscription'?: OAuthCredentials\n 'gemini-subscription'?: GeminiCredentials\n}\n\n// ── File path ─────────────────────────────────────────────────────────────────\n\nexport function credentialsPath(dataDir: string): string {\n return path.join(dataDir, 'credentials.json')\n}\n\n// ── Load / Save ───────────────────────────────────────────────────────────────\n\nexport function loadCredentials(dataDir: string): CredentialsStore {\n const p = credentialsPath(dataDir)\n if (!fs.existsSync(p)) return {}\n try {\n return JSON.parse(fs.readFileSync(p, 'utf8')) as CredentialsStore\n } catch {\n return {}\n }\n}\n\nexport function saveCredentials(dataDir: string, store: CredentialsStore): void {\n fs.writeFileSync(credentialsPath(dataDir), JSON.stringify(store, null, 2), 'utf8')\n}\n\nexport function saveProviderCredentials(\n dataDir: string,\n providerId: ProviderId,\n creds: OAuthCredentials | GeminiCredentials,\n): void {\n const store = loadCredentials(dataDir)\n store[providerId] = creds as GeminiCredentials\n saveCredentials(dataDir, store)\n}\n\n// ── Token refresh ─────────────────────────────────────────────────────────────\n\n// Buffer in ms — refresh 60s before actual expiry so requests never hit an\n// expired token mid-flight.\nconst REFRESH_BUFFER_MS = 60_000\n\nfunction isExpired(creds: OAuthCredentials): boolean {\n return Date.now() >= creds.expires - REFRESH_BUFFER_MS\n}\n\n// Returns a valid access token, refreshing first if needed.\n// Persists updated credentials back to disk on refresh.\nexport async function getAccessToken(\n dataDir: string,\n providerId: 'openai-subscription',\n): Promise<string>\nexport async function getAccessToken(\n dataDir: string,\n providerId: 'gemini-subscription',\n): Promise<string>\nexport async function getAccessToken(dataDir: string, providerId: ProviderId): Promise<string> {\n const store = loadCredentials(dataDir)\n const creds = store[providerId]\n\n if (!creds) {\n throw new Error(\n `No credentials for ${providerId}. ` +\n `Run zencefyl setup and choose the subscription option.`\n )\n }\n\n if (!isExpired(creds)) {\n return creds.access\n }\n\n // Token expired — refresh it\n let refreshed: OAuthCredentials\n\n if (providerId === 'openai-subscription') {\n const { refreshOpenAICodexToken } = await import('@mariozechner/pi-ai/oauth')\n refreshed = await refreshOpenAICodexToken(creds.refresh)\n } else {\n const { refreshGoogleCloudToken } = await import('@mariozechner/pi-ai/oauth')\n const projectId = (creds as GeminiCredentials).projectId ?? ''\n refreshed = await refreshGoogleCloudToken(creds.refresh, projectId)\n // Preserve projectId through refresh\n ;(refreshed as GeminiCredentials).projectId = projectId\n }\n\n saveProviderCredentials(dataDir, providerId, refreshed)\n return refreshed.access\n}\n"],"mappings":";;;AAWA,OAAO,QAAU;AACjB,OAAO,UAAU;AAiBV,SAAS,gBAAgB,SAAyB;AACvD,SAAO,KAAK,KAAK,SAAS,kBAAkB;AAC9C;AAIO,SAAS,gBAAgB,SAAmC;AACjE,QAAM,IAAI,gBAAgB,OAAO;AACjC,MAAI,CAAC,GAAG,WAAW,CAAC,EAAG,QAAO,CAAC;AAC/B,MAAI;AACF,WAAO,KAAK,MAAM,GAAG,aAAa,GAAG,MAAM,CAAC;AAAA,EAC9C,QAAQ;AACN,WAAO,CAAC;AAAA,EACV;AACF;AAEO,SAAS,gBAAgB,SAAiB,OAA+B;AAC9E,KAAG,cAAc,gBAAgB,OAAO,GAAG,KAAK,UAAU,OAAO,MAAM,CAAC,GAAG,MAAM;AACnF;AAEO,SAAS,wBACd,SACA,YACA,OACM;AACN,QAAM,QAAQ,gBAAgB,OAAO;AACrC,QAAM,UAAU,IAAI;AACpB,kBAAgB,SAAS,KAAK;AAChC;AAMA,IAAM,oBAAoB;AAE1B,SAAS,UAAU,OAAkC;AACnD,SAAO,KAAK,IAAI,KAAK,MAAM,UAAU;AACvC;AAYA,eAAsB,eAAe,SAAiB,YAAyC;AAC7F,QAAM,QAAQ,gBAAgB,OAAO;AACrC,QAAM,QAAQ,MAAM,UAAU;AAE9B,MAAI,CAAC,OAAO;AACV,UAAM,IAAI;AAAA,MACR,sBAAsB,UAAU;AAAA,IAElC;AAAA,EACF;AAEA,MAAI,CAAC,UAAU,KAAK,GAAG;AACrB,WAAO,MAAM;AAAA,EACf;AAGA,MAAI;AAEJ,MAAI,eAAe,uBAAuB;AACxC,UAAM,EAAE,wBAAwB,IAAI,MAAM,OAAO,2BAA2B;AAC5E,gBAAY,MAAM,wBAAwB,MAAM,OAAO;AAAA,EACzD,OAAO;AACL,UAAM,EAAE,wBAAwB,IAAI,MAAM,OAAO,2BAA2B;AAC5E,UAAM,YAAa,MAA4B,aAAa;AAC5D,gBAAmB,MAAM,wBAAwB,MAAM,SAAS,SAAS;AAExE,IAAC,UAAgC,YAAY;AAAA,EAChD;AAEA,0BAAwB,SAAS,YAAY,SAAS;AACtD,SAAO,UAAU;AACnB;","names":[]}
@@ -0,0 +1,224 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/constants/models.ts
4
+ var DEFAULT_MODELS = {
5
+ fast: "claude-haiku-4-5-20251001",
6
+ // Cheap + fast — background tasks
7
+ default: "claude-sonnet-4-6",
8
+ // Main model for most turns
9
+ deep: "claude-opus-4-6"
10
+ // Reserved for deep reasoning
11
+ };
12
+ var LOCAL_TRANSFORMERS_MODELS = {
13
+ fast: "tinyllama",
14
+ // 1.1B params, fast on CPU
15
+ default: "phi3-mini",
16
+ // 3.8B params, good quality
17
+ deep: "stablelm2"
18
+ // 1.6B params, better reasoning
19
+ };
20
+ var MODEL_CONTEXT_WINDOW = {
21
+ // Anthropic
22
+ "claude-haiku-4-5-20251001": 2e5,
23
+ "claude-sonnet-4-6": 2e5,
24
+ "claude-opus-4-6": 2e5,
25
+ // OpenAI subscription (via ChatGPT plan — @mariozechner/pi-ai)
26
+ // Context windows are approximate — Codex endpoint docs not fully public.
27
+ "gpt-5.4": 128e3,
28
+ "gpt-5.4-mini": 128e3,
29
+ "gpt-5.3-codex": 128e3,
30
+ "gpt-5.3-codex-spark": 128e3,
31
+ "gpt-5.2": 128e3,
32
+ "gpt-5.2-codex": 128e3,
33
+ "gpt-5.1": 128e3,
34
+ "gpt-5.1-codex-max": 128e3,
35
+ "gpt-5.1-codex-mini": 128e3,
36
+ // Gemini subscription (via Google Cloud Code Assist — @mariozechner/pi-ai)
37
+ "gemini-3.1-pro-preview": 1e6,
38
+ "gemini-3-pro-preview": 1e6,
39
+ "gemini-3-flash-preview": 1e6,
40
+ "gemini-2.5-pro": 1e6,
41
+ "gemini-2.5-flash": 1e6,
42
+ "gemini-2.0-flash": 1e6,
43
+ // Ollama local models (context varies — using typical defaults)
44
+ "gemma3": 128e3,
45
+ "gemma3:1b": 32e3,
46
+ "gemma3:12b": 128e3,
47
+ "llama3.2": 128e3,
48
+ "llama3.2:1b": 128e3,
49
+ "llama3.3": 128e3,
50
+ "mistral": 32e3,
51
+ "mistral-nemo": 128e3,
52
+ "phi4": 128e3,
53
+ "phi4-mini": 128e3,
54
+ "tinyllama": 4096,
55
+ "qwen2.5": 128e3,
56
+ "qwen2.5:14b": 128e3,
57
+ "qwen2.5:72b": 128e3,
58
+ "deepseek-r1": 64e3,
59
+ "deepseek-r1:14b": 64e3,
60
+ "deepseek-r1:32b": 64e3,
61
+ "qwq": 128e3,
62
+ "codellama": 16e3,
63
+ "codellama:13b": 16e3,
64
+ "codegemma": 8e3,
65
+ "llava": 4096,
66
+ "llava:13b": 4096,
67
+ // Local transformers models (approximate — varies by model)
68
+ "phi3-mini": 4096,
69
+ "stablelm2": 4096,
70
+ "gemma-2b": 8192,
71
+ // Moonshot (Kimi models)
72
+ "kimi-k2.5": 256e3,
73
+ "kimi-k1.5": 256e3,
74
+ "kimi-k1.5-long-context": 2e6
75
+ };
76
+ var MODEL_PRICING = {
77
+ // Anthropic (source: anthropic.com/pricing)
78
+ "claude-haiku-4-5-20251001": { inputPerM: 0.8, outputPerM: 4 },
79
+ "claude-sonnet-4-6": { inputPerM: 3, outputPerM: 15 },
80
+ "claude-opus-4-6": { inputPerM: 15, outputPerM: 75 },
81
+ // OpenAI subscription models — billed via ChatGPT plan, not per token
82
+ "gpt-5.4": { inputPerM: 0, outputPerM: 0 },
83
+ "gpt-5.4-mini": { inputPerM: 0, outputPerM: 0 },
84
+ "gpt-5.3-codex": { inputPerM: 0, outputPerM: 0 },
85
+ "gpt-5.3-codex-spark": { inputPerM: 0, outputPerM: 0 },
86
+ "gpt-5.2": { inputPerM: 0, outputPerM: 0 },
87
+ "gpt-5.2-codex": { inputPerM: 0, outputPerM: 0 },
88
+ "gpt-5.1": { inputPerM: 0, outputPerM: 0 },
89
+ "gpt-5.1-codex-max": { inputPerM: 0, outputPerM: 0 },
90
+ "gpt-5.1-codex-mini": { inputPerM: 0, outputPerM: 0 },
91
+ // Gemini subscription models — billed via Google Cloud Code Assist plan, not per token
92
+ "gemini-3.1-pro-preview": { inputPerM: 0, outputPerM: 0 },
93
+ "gemini-3-pro-preview": { inputPerM: 0, outputPerM: 0 },
94
+ "gemini-3-flash-preview": { inputPerM: 0, outputPerM: 0 },
95
+ "gemini-2.5-pro": { inputPerM: 0, outputPerM: 0 },
96
+ "gemini-2.5-flash": { inputPerM: 0, outputPerM: 0 },
97
+ "gemini-2.0-flash": { inputPerM: 0, outputPerM: 0 },
98
+ // Moonshot (Kimi) — approximate pricing (check platform.moonshot.cn for current rates)
99
+ "kimi-k2.5": { inputPerM: 2, outputPerM: 10 },
100
+ "kimi-k1.5": { inputPerM: 8, outputPerM: 32 },
101
+ "kimi-k1.5-long-context": { inputPerM: 4, outputPerM: 16 },
102
+ // Local transformers models — $0 (runs locally, no API cost)
103
+ "tinyllama": { inputPerM: 0, outputPerM: 0 },
104
+ "phi3-mini": { inputPerM: 0, outputPerM: 0 },
105
+ "stablelm2": { inputPerM: 0, outputPerM: 0 },
106
+ "gemma-2b": { inputPerM: 0, outputPerM: 0 }
107
+ };
108
+ var OPENAI_SUBSCRIPTION_MODELS = {
109
+ fast: "gpt-5.4-mini",
110
+ // smaller, faster tier
111
+ default: "gpt-5.4",
112
+ // latest flagship
113
+ deep: "gpt-5.3-codex"
114
+ // strong coding + reasoning
115
+ };
116
+ var GEMINI_SUBSCRIPTION_MODELS = {
117
+ fast: "gemini-3-flash-preview",
118
+ // Gemini 3 fast tier
119
+ default: "gemini-2.5-pro",
120
+ // stable flagship
121
+ deep: "gemini-3.1-pro-preview"
122
+ // latest strongest
123
+ };
124
+ var OLLAMA_MODELS_DEFAULT = {
125
+ fast: "gemma3:1b",
126
+ // small + fast
127
+ default: "gemma3",
128
+ // 4B — recommended general-purpose
129
+ deep: "qwen2.5"
130
+ // 7B — stronger for complex tasks
131
+ };
132
+ var MOONSHOT_MODELS = {
133
+ fast: "kimi-k2.5",
134
+ // fastest, most cost-effective
135
+ default: "kimi-k2.5",
136
+ // balanced — the model you're talking to now
137
+ deep: "kimi-k1.5"
138
+ // strongest reasoning with thinking
139
+ };
140
+ var MODEL_REGISTRY = [
141
+ // ── Anthropic (claude-code or anthropic provider) ─────────────────────────
142
+ { id: "claude-sonnet-4-6", label: "Claude Sonnet 4.6", provider: "anthropic" },
143
+ { id: "claude-opus-4-6", label: "Claude Opus 4.6", provider: "anthropic" },
144
+ { id: "claude-haiku-4-5-20251001", label: "Claude Haiku 4.5", provider: "anthropic" },
145
+ // ── OpenAI subscription (via ChatGPT plan — @mariozechner/pi-ai) ─────────
146
+ // Model IDs must match what pi-ai's openai-codex provider recognises.
147
+ { id: "gpt-5.4", label: "GPT-5.4", provider: "openai-subscription", tier: "default" },
148
+ { id: "gpt-5.4-mini", label: "GPT-5.4 Mini", provider: "openai-subscription", tier: "fast" },
149
+ { id: "gpt-5.3-codex", label: "GPT-5.3 Codex", provider: "openai-subscription", tier: "deep" },
150
+ { id: "gpt-5.3-codex-spark", label: "GPT-5.3 Codex Spark", provider: "openai-subscription" },
151
+ { id: "gpt-5.2-codex", label: "GPT-5.2 Codex", provider: "openai-subscription" },
152
+ { id: "gpt-5.2", label: "GPT-5.2", provider: "openai-subscription" },
153
+ { id: "gpt-5.1", label: "GPT-5.1", provider: "openai-subscription" },
154
+ { id: "gpt-5.1-codex-max", label: "GPT-5.1 Codex Max", provider: "openai-subscription" },
155
+ { id: "gpt-5.1-codex-mini", label: "GPT-5.1 Codex Mini", provider: "openai-subscription" },
156
+ // ── Gemini subscription (via Google Cloud Code Assist — @mariozechner/pi-ai) ─
157
+ // Model IDs from pi-ai's google-gemini-cli provider.
158
+ { id: "gemini-3.1-pro-preview", label: "Gemini 3.1 Pro", provider: "gemini-subscription", tier: "deep" },
159
+ { id: "gemini-3-pro-preview", label: "Gemini 3 Pro", provider: "gemini-subscription" },
160
+ { id: "gemini-3-flash-preview", label: "Gemini 3 Flash", provider: "gemini-subscription", tier: "fast" },
161
+ { id: "gemini-2.5-pro", label: "Gemini 2.5 Pro", provider: "gemini-subscription", tier: "default" },
162
+ { id: "gemini-2.5-flash", label: "Gemini 2.5 Flash", provider: "gemini-subscription" },
163
+ { id: "gemini-2.0-flash", label: "Gemini 2.0 Flash", provider: "gemini-subscription" },
164
+ // ── Ollama (local) — requires `ollama serve` ──────────────────────────────
165
+ // Fast / small models (< 4 GB)
166
+ { id: "gemma3:1b", label: "Gemma 3 1B", provider: "ollama", tier: "fast", size: "0.8 GB" },
167
+ { id: "llama3.2:1b", label: "Llama 3.2 1B", provider: "ollama", tier: "fast", size: "1.3 GB" },
168
+ { id: "phi4-mini", label: "Phi 4 Mini", provider: "ollama", tier: "fast", size: "2.5 GB" },
169
+ { id: "tinyllama", label: "TinyLlama 1.1B", provider: "ollama", tier: "fast", size: "0.6 GB" },
170
+ // Balanced models (2–8 GB) — recommended for daily use
171
+ { id: "gemma3", label: "Gemma 3 4B", provider: "ollama", tier: "default", size: "3.3 GB" },
172
+ { id: "gemma3:12b", label: "Gemma 3 12B", provider: "ollama", size: "8.1 GB" },
173
+ { id: "llama3.2", label: "Llama 3.2 3B", provider: "ollama", tier: "default", size: "2.0 GB" },
174
+ { id: "llama3.3", label: "Llama 3.3 70B", provider: "ollama", tier: "deep", size: "43 GB" },
175
+ { id: "mistral", label: "Mistral 7B", provider: "ollama", size: "4.1 GB" },
176
+ { id: "mistral-nemo", label: "Mistral Nemo 12B", provider: "ollama", size: "7.1 GB" },
177
+ { id: "phi4", label: "Phi 4 14B", provider: "ollama", size: "9.1 GB" },
178
+ { id: "qwen2.5", label: "Qwen 2.5 7B", provider: "ollama", tier: "default", size: "4.7 GB" },
179
+ { id: "qwen2.5:14b", label: "Qwen 2.5 14B", provider: "ollama", size: "9.0 GB" },
180
+ { id: "qwen2.5:72b", label: "Qwen 2.5 72B", provider: "ollama", tier: "deep", size: "47 GB" },
181
+ // Reasoning models
182
+ { id: "deepseek-r1", label: "DeepSeek R1 7B", provider: "ollama", size: "4.7 GB" },
183
+ { id: "deepseek-r1:14b", label: "DeepSeek R1 14B", provider: "ollama", size: "9.0 GB" },
184
+ { id: "deepseek-r1:32b", label: "DeepSeek R1 32B", provider: "ollama", tier: "deep", size: "20 GB" },
185
+ { id: "qwq", label: "QwQ 32B (reasoning)", provider: "ollama", tier: "deep", size: "20 GB" },
186
+ // Code models
187
+ { id: "codellama", label: "CodeLlama 7B", provider: "ollama", size: "3.8 GB" },
188
+ { id: "codellama:13b", label: "CodeLlama 13B", provider: "ollama", size: "7.4 GB" },
189
+ { id: "codegemma", label: "CodeGemma 7B", provider: "ollama", size: "5.0 GB" },
190
+ // Multimodal
191
+ { id: "llava", label: "LLaVA 7B (vision)", provider: "ollama", size: "4.5 GB" },
192
+ { id: "llava:13b", label: "LLaVA 13B (vision)", provider: "ollama", size: "8.0 GB" },
193
+ // ── Local Transformers (fully local, no external deps) ───────────────────
194
+ { id: "tinyllama", label: "TinyLlama 1.1B", provider: "local-transformers", tier: "fast" },
195
+ { id: "phi3-mini", label: "Phi-3 Mini 3.8B", provider: "local-transformers", tier: "default" },
196
+ { id: "stablelm2", label: "StableLM 2 Zephyr 1.6B", provider: "local-transformers", tier: "default" },
197
+ { id: "gemma-2b", label: "Gemma 2B Instruct", provider: "local-transformers", tier: "fast" },
198
+ // ── Moonshot (Kimi) ─────────────────────────────────────────────────────────
199
+ { id: "kimi-k2.5", label: "Kimi K2.5", provider: "moonshot" },
200
+ { id: "kimi-k1.5", label: "Kimi K1.5", provider: "moonshot" },
201
+ { id: "kimi-k1.5-long-context", label: "Kimi K1.5 (2M ctx)", provider: "moonshot" }
202
+ ];
203
+ var PROVIDER_LABELS = {
204
+ "anthropic": "Anthropic",
205
+ "openai-subscription": "OpenAI (subscription)",
206
+ "gemini-subscription": "Gemini (subscription)",
207
+ "ollama": "Ollama (local)",
208
+ "local-transformers": "Local Models (transformers.js)",
209
+ "moonshot": "Moonshot (Kimi)"
210
+ };
211
+
212
+ export {
213
+ DEFAULT_MODELS,
214
+ LOCAL_TRANSFORMERS_MODELS,
215
+ MODEL_CONTEXT_WINDOW,
216
+ MODEL_PRICING,
217
+ OPENAI_SUBSCRIPTION_MODELS,
218
+ GEMINI_SUBSCRIPTION_MODELS,
219
+ OLLAMA_MODELS_DEFAULT,
220
+ MOONSHOT_MODELS,
221
+ MODEL_REGISTRY,
222
+ PROVIDER_LABELS
223
+ };
224
+ //# sourceMappingURL=chunk-VGMBOH7B.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/constants/models.ts"],"sourcesContent":["// Default model IDs and pricing constants.\n// Override model IDs in ~/.zencefyl/config.json — no code changes needed.\n//\n// Model IDs sourced from @mariozechner/pi-ai models.generated.js (subscription)\n// and official pricing pages (API key providers).\n\nimport type { ModelConfig } from '../types/config'\n\n// Fallback models used when config.json has no models section.\nexport const DEFAULT_MODELS: ModelConfig = {\n fast: 'claude-haiku-4-5-20251001', // Cheap + fast — background tasks\n default: 'claude-sonnet-4-6', // Main model for most turns\n deep: 'claude-opus-4-6', // Reserved for deep reasoning\n}\n\n// Default models for the local-transformers provider.\nexport const LOCAL_TRANSFORMERS_MODELS: ModelConfig = {\n fast: 'tinyllama', // 1.1B params, fast on CPU\n default: 'phi3-mini', // 3.8B params, good quality\n deep: 'stablelm2', // 1.6B params, better reasoning\n}\n\n// Context window sizes per model (input tokens).\n// Used to show a \"X / Y tokens\" budget indicator in the status bar.\nexport const MODEL_CONTEXT_WINDOW: Record<string, number> = {\n // Anthropic\n 'claude-haiku-4-5-20251001': 200_000,\n 'claude-sonnet-4-6': 200_000,\n 'claude-opus-4-6': 200_000,\n // OpenAI subscription (via ChatGPT plan — @mariozechner/pi-ai)\n // Context windows are approximate — Codex endpoint docs not fully public.\n 'gpt-5.4': 128_000,\n 'gpt-5.4-mini': 128_000,\n 'gpt-5.3-codex': 128_000,\n 'gpt-5.3-codex-spark': 128_000,\n 'gpt-5.2': 128_000,\n 'gpt-5.2-codex': 128_000,\n 'gpt-5.1': 128_000,\n 'gpt-5.1-codex-max': 128_000,\n 'gpt-5.1-codex-mini': 128_000,\n // Gemini subscription (via Google Cloud Code Assist — @mariozechner/pi-ai)\n 'gemini-3.1-pro-preview': 1_000_000,\n 'gemini-3-pro-preview': 1_000_000,\n 'gemini-3-flash-preview': 1_000_000,\n 'gemini-2.5-pro': 1_000_000,\n 'gemini-2.5-flash': 1_000_000,\n 'gemini-2.0-flash': 1_000_000,\n // Ollama local models (context varies — using typical defaults)\n 'gemma3': 128_000,\n 'gemma3:1b': 32_000,\n 'gemma3:12b': 128_000,\n 'llama3.2': 128_000,\n 'llama3.2:1b': 128_000,\n 'llama3.3': 128_000,\n 'mistral': 32_000,\n 'mistral-nemo': 128_000,\n 'phi4': 128_000,\n 'phi4-mini': 128_000,\n 'tinyllama': 4_096,\n 'qwen2.5': 128_000,\n 'qwen2.5:14b': 128_000,\n 'qwen2.5:72b': 128_000,\n 'deepseek-r1': 64_000,\n 'deepseek-r1:14b': 64_000,\n 'deepseek-r1:32b': 64_000,\n 'qwq': 128_000,\n 'codellama': 16_000,\n 'codellama:13b': 16_000,\n 'codegemma': 8_000,\n 'llava': 4_096,\n 'llava:13b': 4_096,\n // Local transformers models (approximate — varies by model)\n 'phi3-mini': 4_096,\n 'stablelm2': 4_096,\n 'gemma-2b': 8_192,\n // Moonshot (Kimi models)\n 'kimi-k2.5': 256_000,\n 'kimi-k1.5': 256_000,\n 'kimi-k1.5-long-context': 2_000_000,\n}\n\n// Cost per million tokens in USD.\n// Subscription providers (openai-subscription, gemini-subscription) are billed\n// through the plan itself — we track $0 here so StatusBar shows \"free\" for them.\nexport const MODEL_PRICING: Record<string, { inputPerM: number; outputPerM: number }> = {\n // Anthropic (source: anthropic.com/pricing)\n 'claude-haiku-4-5-20251001': { inputPerM: 0.80, outputPerM: 4.00 },\n 'claude-sonnet-4-6': { inputPerM: 3.00, outputPerM: 15.00 },\n 'claude-opus-4-6': { inputPerM: 15.00, outputPerM: 75.00 },\n // OpenAI subscription models — billed via ChatGPT plan, not per token\n 'gpt-5.4': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.4-mini': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.3-codex': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.3-codex-spark': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.2': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.2-codex': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.1': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.1-codex-max': { inputPerM: 0, outputPerM: 0 },\n 'gpt-5.1-codex-mini': { inputPerM: 0, outputPerM: 0 },\n // Gemini subscription models — billed via Google Cloud Code Assist plan, not per token\n 'gemini-3.1-pro-preview': { inputPerM: 0, outputPerM: 0 },\n 'gemini-3-pro-preview': { inputPerM: 0, outputPerM: 0 },\n 'gemini-3-flash-preview': { inputPerM: 0, outputPerM: 0 },\n 'gemini-2.5-pro': { inputPerM: 0, outputPerM: 0 },\n 'gemini-2.5-flash': { inputPerM: 0, outputPerM: 0 },\n 'gemini-2.0-flash': { inputPerM: 0, outputPerM: 0 },\n // Moonshot (Kimi) — approximate pricing (check platform.moonshot.cn for current rates)\n 'kimi-k2.5': { inputPerM: 2.00, outputPerM: 10.00 },\n 'kimi-k1.5': { inputPerM: 8.00, outputPerM: 32.00 },\n 'kimi-k1.5-long-context': { inputPerM: 4.00, outputPerM: 16.00 },\n // Local transformers models — $0 (runs locally, no API cost)\n 'tinyllama': { inputPerM: 0, outputPerM: 0 },\n 'phi3-mini': { inputPerM: 0, outputPerM: 0 },\n 'stablelm2': { inputPerM: 0, outputPerM: 0 },\n 'gemma-2b': { inputPerM: 0, outputPerM: 0 },\n}\n\n// ── Per-provider default model tiers ──────────────────────────────────────────\n\n// Default models for the ChatGPT OAuth subscription provider.\n// IDs must match what @mariozechner/pi-ai openai-codex provider recognises.\n// Valid IDs: gpt-5.1, gpt-5.1-codex-max, gpt-5.1-codex-mini, gpt-5.2,\n// gpt-5.2-codex, gpt-5.3-codex, gpt-5.3-codex-spark, gpt-5.4, gpt-5.4-mini\nexport const OPENAI_SUBSCRIPTION_MODELS: ModelConfig = {\n fast: 'gpt-5.4-mini', // smaller, faster tier\n default: 'gpt-5.4', // latest flagship\n deep: 'gpt-5.3-codex', // strong coding + reasoning\n}\n\n// Default models for the Gemini OAuth subscription provider.\nexport const GEMINI_SUBSCRIPTION_MODELS: ModelConfig = {\n fast: 'gemini-3-flash-preview', // Gemini 3 fast tier\n default: 'gemini-2.5-pro', // stable flagship\n deep: 'gemini-3.1-pro-preview', // latest strongest\n}\n\n// Default models for the Ollama provider.\nexport const OLLAMA_MODELS_DEFAULT: ModelConfig = {\n fast: 'gemma3:1b', // small + fast\n default: 'gemma3', // 4B — recommended general-purpose\n deep: 'qwen2.5', // 7B — stronger for complex tasks\n}\n\n// Default models for the Moonshot (Kimi) provider.\nexport const MOONSHOT_MODELS: ModelConfig = {\n fast: 'kimi-k2.5', // fastest, most cost-effective\n default: 'kimi-k2.5', // balanced — the model you're talking to now\n deep: 'kimi-k1.5', // strongest reasoning with thinking\n}\n\n// ── Model registry for the /model picker ──────────────────────────────────────\n\n// A model entry shown in the interactive /model picker.\nexport interface ModelEntry {\n id: string // model ID passed to provider.chat()\n label: string // human-readable display name\n provider: string // which provider it belongs to (for grouping)\n tier?: string // 'fast' | 'default' | 'deep' — optional badge\n size?: string // disk size hint shown in install wizard (e.g. '3.3 GB')\n}\n\n// All models surfaced in the /model picker, grouped by provider.\n// Subscription models marked with their subscription; Ollama entries are generic.\nexport const MODEL_REGISTRY: ModelEntry[] = [\n // ── Anthropic (claude-code or anthropic provider) ─────────────────────────\n { id: 'claude-sonnet-4-6', label: 'Claude Sonnet 4.6', provider: 'anthropic' },\n { id: 'claude-opus-4-6', label: 'Claude Opus 4.6', provider: 'anthropic' },\n { id: 'claude-haiku-4-5-20251001', label: 'Claude Haiku 4.5', provider: 'anthropic' },\n\n // ── OpenAI subscription (via ChatGPT plan — @mariozechner/pi-ai) ─────────\n // Model IDs must match what pi-ai's openai-codex provider recognises.\n { id: 'gpt-5.4', label: 'GPT-5.4', provider: 'openai-subscription', tier: 'default' },\n { id: 'gpt-5.4-mini', label: 'GPT-5.4 Mini', provider: 'openai-subscription', tier: 'fast' },\n { id: 'gpt-5.3-codex', label: 'GPT-5.3 Codex', provider: 'openai-subscription', tier: 'deep' },\n { id: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark', provider: 'openai-subscription' },\n { id: 'gpt-5.2-codex', label: 'GPT-5.2 Codex', provider: 'openai-subscription' },\n { id: 'gpt-5.2', label: 'GPT-5.2', provider: 'openai-subscription' },\n { id: 'gpt-5.1', label: 'GPT-5.1', provider: 'openai-subscription' },\n { id: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max', provider: 'openai-subscription' },\n { id: 'gpt-5.1-codex-mini', label: 'GPT-5.1 Codex Mini', provider: 'openai-subscription' },\n\n // ── Gemini subscription (via Google Cloud Code Assist — @mariozechner/pi-ai) ─\n // Model IDs from pi-ai's google-gemini-cli provider.\n { id: 'gemini-3.1-pro-preview', label: 'Gemini 3.1 Pro', provider: 'gemini-subscription', tier: 'deep' },\n { id: 'gemini-3-pro-preview', label: 'Gemini 3 Pro', provider: 'gemini-subscription' },\n { id: 'gemini-3-flash-preview', label: 'Gemini 3 Flash', provider: 'gemini-subscription', tier: 'fast' },\n { id: 'gemini-2.5-pro', label: 'Gemini 2.5 Pro', provider: 'gemini-subscription', tier: 'default' },\n { id: 'gemini-2.5-flash', label: 'Gemini 2.5 Flash', provider: 'gemini-subscription' },\n { id: 'gemini-2.0-flash', label: 'Gemini 2.0 Flash', provider: 'gemini-subscription' },\n\n // ── Ollama (local) — requires `ollama serve` ──────────────────────────────\n // Fast / small models (< 4 GB)\n { id: 'gemma3:1b', label: 'Gemma 3 1B', provider: 'ollama', tier: 'fast', size: '0.8 GB' },\n { id: 'llama3.2:1b', label: 'Llama 3.2 1B', provider: 'ollama', tier: 'fast', size: '1.3 GB' },\n { id: 'phi4-mini', label: 'Phi 4 Mini', provider: 'ollama', tier: 'fast', size: '2.5 GB' },\n { id: 'tinyllama', label: 'TinyLlama 1.1B', provider: 'ollama', tier: 'fast', size: '0.6 GB' },\n // Balanced models (2–8 GB) — recommended for daily use\n { id: 'gemma3', label: 'Gemma 3 4B', provider: 'ollama', tier: 'default', size: '3.3 GB' },\n { id: 'gemma3:12b', label: 'Gemma 3 12B', provider: 'ollama', size: '8.1 GB' },\n { id: 'llama3.2', label: 'Llama 3.2 3B', provider: 'ollama', tier: 'default', size: '2.0 GB' },\n { id: 'llama3.3', label: 'Llama 3.3 70B', provider: 'ollama', tier: 'deep', size: '43 GB' },\n { id: 'mistral', label: 'Mistral 7B', provider: 'ollama', size: '4.1 GB' },\n { id: 'mistral-nemo', label: 'Mistral Nemo 12B', provider: 'ollama', size: '7.1 GB' },\n { id: 'phi4', label: 'Phi 4 14B', provider: 'ollama', size: '9.1 GB' },\n { id: 'qwen2.5', label: 'Qwen 2.5 7B', provider: 'ollama', tier: 'default', size: '4.7 GB' },\n { id: 'qwen2.5:14b', label: 'Qwen 2.5 14B', provider: 'ollama', size: '9.0 GB' },\n { id: 'qwen2.5:72b', label: 'Qwen 2.5 72B', provider: 'ollama', tier: 'deep', size: '47 GB' },\n // Reasoning models\n { id: 'deepseek-r1', label: 'DeepSeek R1 7B', provider: 'ollama', size: '4.7 GB' },\n { id: 'deepseek-r1:14b', label: 'DeepSeek R1 14B', provider: 'ollama', size: '9.0 GB' },\n { id: 'deepseek-r1:32b', label: 'DeepSeek R1 32B', provider: 'ollama', tier: 'deep', size: '20 GB' },\n { id: 'qwq', label: 'QwQ 32B (reasoning)', provider: 'ollama', tier: 'deep', size: '20 GB' },\n // Code models\n { id: 'codellama', label: 'CodeLlama 7B', provider: 'ollama', size: '3.8 GB' },\n { id: 'codellama:13b', label: 'CodeLlama 13B', provider: 'ollama', size: '7.4 GB' },\n { id: 'codegemma', label: 'CodeGemma 7B', provider: 'ollama', size: '5.0 GB' },\n // Multimodal\n { id: 'llava', label: 'LLaVA 7B (vision)', provider: 'ollama', size: '4.5 GB' },\n { id: 'llava:13b', label: 'LLaVA 13B (vision)', provider: 'ollama', size: '8.0 GB' },\n\n // ── Local Transformers (fully local, no external deps) ───────────────────\n { id: 'tinyllama', label: 'TinyLlama 1.1B', provider: 'local-transformers', tier: 'fast' },\n { id: 'phi3-mini', label: 'Phi-3 Mini 3.8B', provider: 'local-transformers', tier: 'default' },\n { id: 'stablelm2', label: 'StableLM 2 Zephyr 1.6B', provider: 'local-transformers', tier: 'default' },\n { id: 'gemma-2b', label: 'Gemma 2B Instruct', provider: 'local-transformers', tier: 'fast' },\n\n // ── Moonshot (Kimi) ─────────────────────────────────────────────────────────\n { id: 'kimi-k2.5', label: 'Kimi K2.5', provider: 'moonshot' },\n { id: 'kimi-k1.5', label: 'Kimi K1.5', provider: 'moonshot' },\n { id: 'kimi-k1.5-long-context', label: 'Kimi K1.5 (2M ctx)', provider: 'moonshot' },\n]\n\n// Provider display names for the picker's group headers.\nexport const PROVIDER_LABELS: Record<string, string> = {\n 'anthropic': 'Anthropic',\n 'openai-subscription': 'OpenAI (subscription)',\n 'gemini-subscription': 'Gemini (subscription)',\n 'ollama': 'Ollama (local)',\n 'local-transformers': 'Local Models (transformers.js)',\n 'moonshot': 'Moonshot (Kimi)',\n}\n"],"mappings":";;;AASO,IAAM,iBAA8B;AAAA,EACzC,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAGO,IAAM,4BAAyC;AAAA,EACpD,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAIO,IAAM,uBAA+C;AAAA;AAAA,EAE1D,6BAA6B;AAAA,EAC7B,qBAA6B;AAAA,EAC7B,mBAA6B;AAAA;AAAA;AAAA,EAG7B,WAAwB;AAAA,EACxB,gBAAwB;AAAA,EACxB,iBAAwB;AAAA,EACxB,uBAAwB;AAAA,EACxB,WAAwB;AAAA,EACxB,iBAAwB;AAAA,EACxB,WAAwB;AAAA,EACxB,qBAAwB;AAAA,EACxB,sBAAwB;AAAA;AAAA,EAExB,0BAA2B;AAAA,EAC3B,wBAA2B;AAAA,EAC3B,0BAA2B;AAAA,EAC3B,kBAA2B;AAAA,EAC3B,oBAA2B;AAAA,EAC3B,oBAA2B;AAAA;AAAA,EAE3B,UAAmB;AAAA,EACnB,aAAoB;AAAA,EACpB,cAAmB;AAAA,EACnB,YAAmB;AAAA,EACnB,eAAmB;AAAA,EACnB,YAAmB;AAAA,EACnB,WAAoB;AAAA,EACpB,gBAAmB;AAAA,EACnB,QAAmB;AAAA,EACnB,aAAmB;AAAA,EACnB,aAAqB;AAAA,EACrB,WAAmB;AAAA,EACnB,eAAmB;AAAA,EACnB,eAAmB;AAAA,EACnB,eAAoB;AAAA,EACpB,mBAAoB;AAAA,EACpB,mBAAoB;AAAA,EACpB,OAAmB;AAAA,EACnB,aAAoB;AAAA,EACpB,iBAAoB;AAAA,EACpB,aAAqB;AAAA,EACrB,SAAqB;AAAA,EACrB,aAAqB;AAAA;AAAA,EAErB,aAAa;AAAA,EACb,aAAa;AAAA,EACb,YAAa;AAAA;AAAA,EAEb,aAAc;AAAA,EACd,aAAc;AAAA,EACd,0BAA0B;AAC5B;AAKO,IAAM,gBAA2E;AAAA;AAAA,EAEtF,6BAA6B,EAAE,WAAW,KAAO,YAAY,EAAM;AAAA,EACnE,qBAA6B,EAAE,WAAW,GAAO,YAAY,GAAM;AAAA,EACnE,mBAA6B,EAAE,WAAW,IAAO,YAAY,GAAM;AAAA;AAAA,EAEnE,WAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,gBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,iBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,uBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,WAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,iBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,WAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,qBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACtD,sBAAwB,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA;AAAA,EAEtD,0BAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,wBAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,0BAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,kBAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,oBAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EACzD,oBAA2B,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA;AAAA,EAEzD,aAAc,EAAE,WAAW,GAAM,YAAY,GAAM;AAAA,EACnD,aAAc,EAAE,WAAW,GAAM,YAAY,GAAM;AAAA,EACnD,0BAA0B,EAAE,WAAW,GAAM,YAAY,GAAM;AAAA;AAAA,EAE/D,aAAa,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EAC3C,aAAa,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EAC3C,aAAa,EAAE,WAAW,GAAG,YAAY,EAAE;AAAA,EAC3C,YAAa,EAAE,WAAW,GAAG,YAAY,EAAE;AAC7C;AAQO,IAAM,6BAA0C;AAAA,EACrD,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAGO,IAAM,6BAA0C;AAAA,EACrD,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAGO,IAAM,wBAAqC;AAAA,EAChD,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAGO,IAAM,kBAA+B;AAAA,EAC1C,MAAS;AAAA;AAAA,EACT,SAAS;AAAA;AAAA,EACT,MAAS;AAAA;AACX;AAeO,IAAM,iBAA+B;AAAA;AAAA,EAE1C,EAAE,IAAI,qBAA6B,OAAO,qBAAwB,UAAU,YAAY;AAAA,EACxF,EAAE,IAAI,mBAA6B,OAAO,mBAAwB,UAAU,YAAY;AAAA,EACxF,EAAE,IAAI,6BAA6B,OAAO,oBAAwB,UAAU,YAAY;AAAA;AAAA;AAAA,EAIxF,EAAE,IAAI,WAAuB,OAAO,WAAqB,UAAU,uBAAuB,MAAM,UAAU;AAAA,EAC1G,EAAE,IAAI,gBAAuB,OAAO,gBAAqB,UAAU,uBAAuB,MAAM,OAAU;AAAA,EAC1G,EAAE,IAAI,iBAAuB,OAAO,iBAAqB,UAAU,uBAAuB,MAAM,OAAU;AAAA,EAC1G,EAAE,IAAI,uBAAuB,OAAO,uBAAuB,UAAU,sBAAsB;AAAA,EAC3F,EAAE,IAAI,iBAAuB,OAAO,iBAAqB,UAAU,sBAAsB;AAAA,EACzF,EAAE,IAAI,WAAuB,OAAO,WAAsB,UAAU,sBAAsB;AAAA,EAC1F,EAAE,IAAI,WAAuB,OAAO,WAAsB,UAAU,sBAAsB;AAAA,EAC1F,EAAE,IAAI,qBAAuB,OAAO,qBAAqB,UAAU,sBAAsB;AAAA,EACzF,EAAE,IAAI,sBAAuB,OAAO,sBAAsB,UAAU,sBAAsB;AAAA;AAAA;AAAA,EAI1F,EAAE,IAAI,0BAA2B,OAAO,kBAAqB,UAAU,uBAAuB,MAAM,OAAU;AAAA,EAC9G,EAAE,IAAI,wBAA2B,OAAO,gBAAsB,UAAU,sBAAuC;AAAA,EAC/G,EAAE,IAAI,0BAA2B,OAAO,kBAAsB,UAAU,uBAAuB,MAAM,OAAU;AAAA,EAC/G,EAAE,IAAI,kBAA2B,OAAO,kBAAsB,UAAU,uBAAuB,MAAM,UAAU;AAAA,EAC/G,EAAE,IAAI,oBAA2B,OAAO,oBAAsB,UAAU,sBAAuC;AAAA,EAC/G,EAAE,IAAI,oBAA2B,OAAO,oBAAsB,UAAU,sBAAuC;AAAA;AAAA;AAAA,EAI/G,EAAE,IAAI,aAAiB,OAAO,cAAoB,UAAU,UAAU,MAAM,QAAW,MAAM,SAAS;AAAA,EACtG,EAAE,IAAI,eAAiB,OAAO,gBAAoB,UAAU,UAAU,MAAM,QAAW,MAAM,SAAS;AAAA,EACtG,EAAE,IAAI,aAAiB,OAAO,cAAqB,UAAU,UAAU,MAAM,QAAW,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,aAAiB,OAAO,kBAAqB,UAAU,UAAU,MAAM,QAAW,MAAM,SAAS;AAAA;AAAA,EAEvG,EAAE,IAAI,UAAiB,OAAO,cAAqB,UAAU,UAAU,MAAM,WAAW,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,cAAiB,OAAO,eAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,YAAiB,OAAO,gBAAqB,UAAU,UAAU,MAAM,WAAW,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,YAAiB,OAAO,iBAAqB,UAAU,UAAU,MAAM,QAAW,MAAM,QAAS;AAAA,EACvG,EAAE,IAAI,WAAiB,OAAO,cAAsB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACxG,EAAE,IAAI,gBAAiB,OAAO,oBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,QAAiB,OAAO,aAAsB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACxG,EAAE,IAAI,WAAiB,OAAO,eAAsB,UAAU,UAAU,MAAM,WAAW,MAAM,SAAS;AAAA,EACxG,EAAE,IAAI,eAAiB,OAAO,gBAAsB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACxG,EAAE,IAAI,eAAiB,OAAO,gBAAsB,UAAU,UAAU,MAAM,QAAW,MAAM,QAAS;AAAA;AAAA,EAExG,EAAE,IAAI,eAAiB,OAAO,kBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,mBAAmB,OAAO,mBAAmB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,mBAAmB,OAAO,mBAAmB,UAAU,UAAU,MAAM,QAAW,MAAM,QAAS;AAAA,EACvG,EAAE,IAAI,OAAiB,OAAO,uBAAuB,UAAU,UAAU,MAAM,QAAS,MAAM,QAAS;AAAA;AAAA,EAEvG,EAAE,IAAI,aAAiB,OAAO,gBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,iBAAiB,OAAO,iBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,aAAiB,OAAO,gBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA;AAAA,EAEvG,EAAE,IAAI,SAAiB,OAAO,qBAAqB,UAAU,UAA2B,MAAM,SAAS;AAAA,EACvG,EAAE,IAAI,aAAiB,OAAO,sBAAsB,UAAU,UAA0B,MAAM,SAAS;AAAA;AAAA,EAGvG,EAAE,IAAI,aAAa,OAAO,kBAAkB,UAAU,sBAAsB,MAAM,OAAO;AAAA,EACzF,EAAE,IAAI,aAAa,OAAO,mBAAmB,UAAU,sBAAsB,MAAM,UAAU;AAAA,EAC7F,EAAE,IAAI,aAAa,OAAO,0BAA0B,UAAU,sBAAsB,MAAM,UAAU;AAAA,EACpG,EAAE,IAAI,YAAa,OAAO,qBAAqB,UAAU,sBAAsB,MAAM,OAAO;AAAA;AAAA,EAG5F,EAAE,IAAI,aAAc,OAAO,aAAuB,UAAU,WAAW;AAAA,EACvE,EAAE,IAAI,aAAc,OAAO,aAAuB,UAAU,WAAW;AAAA,EACvE,EAAE,IAAI,0BAA0B,OAAO,sBAAsB,UAAU,WAAW;AACpF;AAGO,IAAM,kBAA0C;AAAA,EACrD,aAAuB;AAAA,EACvB,uBAAuB;AAAA,EACvB,uBAAuB;AAAA,EACvB,UAAuB;AAAA,EACvB,sBAAsB;AAAA,EACtB,YAAuB;AACzB;","names":[]}
@@ -0,0 +1,16 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ credentialsPath,
4
+ getAccessToken,
5
+ loadCredentials,
6
+ saveCredentials,
7
+ saveProviderCredentials
8
+ } from "./chunk-OBDSCGMH.js";
9
+ export {
10
+ credentialsPath,
11
+ getAccessToken,
12
+ loadCredentials,
13
+ saveCredentials,
14
+ saveProviderCredentials
15
+ };
16
+ //# sourceMappingURL=credentials-7Z74C2OF.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
@@ -0,0 +1,29 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/cli/headless.ts
4
+ async function readStdin() {
5
+ if (process.stdin.isTTY) return "";
6
+ const chunks = [];
7
+ for await (const chunk of process.stdin) chunks.push(chunk);
8
+ return Buffer.concat(chunks).toString("utf8").trim();
9
+ }
10
+ async function runHeadless(engine, prompt) {
11
+ for await (const delta of engine.sendMessage(prompt)) {
12
+ if (delta.type === "text") process.stdout.write(delta.text);
13
+ if (delta.type === "done") break;
14
+ }
15
+ process.stdout.write("\n");
16
+ }
17
+ async function resolveHeadlessPrompt() {
18
+ const args = process.argv.slice(2).filter((a) => a !== "-p" && a !== "--print");
19
+ const promptArg = args.find((a) => !a.startsWith("-"));
20
+ if (promptArg) return promptArg;
21
+ const stdin = await readStdin();
22
+ if (stdin) return stdin;
23
+ return null;
24
+ }
25
+ export {
26
+ resolveHeadlessPrompt,
27
+ runHeadless
28
+ };
29
+ //# sourceMappingURL=headless-B5GW5RGP.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/cli/headless.ts"],"sourcesContent":["// Non-interactive output mode — used when zencefyl is invoked with -p / --print.\n// Streams the response directly to stdout and exits. No Ink, no animations.\n// Usage: zencefyl -p \"question\" or echo \"question\" | zencefyl -p\n\nimport type { Engine } from '../core/engine.js'\nimport type { Container } from '../bootstrap/container.js'\n\n// Read all of stdin into a string (for piped input).\nasync function readStdin(): Promise<string> {\n if (process.stdin.isTTY) return '' // interactive — no piped content\n const chunks: Buffer[] = []\n for await (const chunk of process.stdin) chunks.push(chunk as Buffer)\n return Buffer.concat(chunks).toString('utf8').trim()\n}\n\n// Run a single headless prompt: stream response to stdout, then exit.\nexport async function runHeadless(\n engine: Engine,\n prompt: string,\n): Promise<void> {\n for await (const delta of engine.sendMessage(prompt)) {\n if (delta.type === 'text') process.stdout.write(delta.text)\n if (delta.type === 'done') break\n }\n process.stdout.write('\\n')\n}\n\n// Entry point for headless mode — resolves the prompt from args or stdin.\nexport async function resolveHeadlessPrompt(): Promise<string | null> {\n // Find first non-flag argument after the binary name\n const args = process.argv.slice(2).filter(a => a !== '-p' && a !== '--print')\n const promptArg = args.find(a => !a.startsWith('-'))\n if (promptArg) return promptArg\n\n // No arg — try reading from stdin (piped input)\n const stdin = await readStdin()\n if (stdin) return stdin\n\n return null\n}\n"],"mappings":";;;AAQA,eAAe,YAA6B;AAC1C,MAAI,QAAQ,MAAM,MAAO,QAAO;AAChC,QAAM,SAAmB,CAAC;AAC1B,mBAAiB,SAAS,QAAQ,MAAO,QAAO,KAAK,KAAe;AACpE,SAAO,OAAO,OAAO,MAAM,EAAE,SAAS,MAAM,EAAE,KAAK;AACrD;AAGA,eAAsB,YACpB,QACA,QACe;AACf,mBAAiB,SAAS,OAAO,YAAY,MAAM,GAAG;AACpD,QAAI,MAAM,SAAS,OAAQ,SAAQ,OAAO,MAAM,MAAM,IAAI;AAC1D,QAAI,MAAM,SAAS,OAAQ;AAAA,EAC7B;AACA,UAAQ,OAAO,MAAM,IAAI;AAC3B;AAGA,eAAsB,wBAAgD;AAEpE,QAAM,OAAO,QAAQ,KAAK,MAAM,CAAC,EAAE,OAAO,OAAK,MAAM,QAAQ,MAAM,SAAS;AAC5E,QAAM,YAAY,KAAK,KAAK,OAAK,CAAC,EAAE,WAAW,GAAG,CAAC;AACnD,MAAI,UAAW,QAAO;AAGtB,QAAM,QAAQ,MAAM,UAAU;AAC9B,MAAI,MAAO,QAAO;AAElB,SAAO;AACT;","names":[]}