tg-agent 0.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,407 +1,7 @@
1
- import { createAgentSession, discoverAuthStorage, discoverModels, SessionManager, SettingsManager, } from "@mariozechner/pi-coding-agent";
2
- import { config } from "./config.js";
3
- import { readCodexOAuth } from "./codexAuth.js";
4
- import { createCustomTools } from "./customTools.js";
5
- import { ensureDir } from "./utils.js";
6
- import { sessionFilePath } from "./sessionStore.js";
7
- const CODEX_MODEL_ALIASES = {
8
- "codex-mini-latest": "gpt-5.1-codex-mini",
9
- "codex-max-latest": "gpt-5.1-codex-max",
10
- "codex-latest": "gpt-5.2-codex",
11
- };
12
- function normalizeProviderId(provider) {
13
- const trimmed = provider.trim().toLowerCase();
14
- if (trimmed === "codex")
15
- return "openai-codex";
16
- return trimmed;
17
- }
18
- function splitModelRef(ref, fallbackProvider) {
19
- const trimmed = ref.trim();
20
- if (!trimmed) {
21
- return { provider: fallbackProvider, modelId: "" };
22
- }
23
- if (trimmed.includes("/")) {
24
- const [provider, modelId] = trimmed.split("/", 2);
25
- return { provider: provider.trim() || fallbackProvider, modelId: modelId.trim() };
26
- }
27
- return { provider: fallbackProvider, modelId: trimmed };
28
- }
29
- function normalizeModelId(provider, modelId) {
30
- if (provider !== "openai-codex") {
31
- return modelId;
32
- }
33
- const mapped = CODEX_MODEL_ALIASES[modelId];
34
- return mapped ?? modelId;
35
- }
36
- function pickDefaultModel(modelRegistry, provider) {
37
- const available = modelRegistry.getAvailable().filter((model) => model.provider === provider);
38
- if (available.length > 0) {
39
- return available[0];
40
- }
41
- const allModels = modelRegistry.getAll().filter((model) => model.provider === provider);
42
- return allModels[0];
43
- }
44
- function resolveModel(modelRegistry, overrides) {
45
- const hasOverrideProvider = Boolean(overrides?.provider?.trim());
46
- const hasOverrideModel = Boolean(overrides?.modelId?.trim());
47
- const providerRaw = normalizeProviderId(overrides?.provider || config.modelProvider || "openai-codex");
48
- const modelRef = hasOverrideProvider || hasOverrideModel ? "" : config.modelRef || "";
49
- const { provider: refProvider, modelId: refModelId } = splitModelRef(modelRef, providerRaw);
50
- const provider = overrides?.provider ? providerRaw : normalizeProviderId(refProvider);
51
- let modelId = hasOverrideModel ? overrides?.modelId?.trim() || "" : refModelId;
52
- if (!modelId) {
53
- const pick = pickDefaultModel(modelRegistry, provider);
54
- if (pick) {
55
- return { model: pick, provider, modelId: pick.id };
56
- }
57
- return { model: undefined, provider, modelId: "" };
58
- }
59
- const normalizedModelId = normalizeModelId(provider, modelId);
60
- const model = modelRegistry.find(provider, normalizedModelId);
61
- return { model, provider, modelId: normalizedModelId };
62
- }
63
- function applyCodexOAuth(authStorage) {
64
- const codex = readCodexOAuth();
65
- if (!codex) {
66
- return null;
67
- }
68
- authStorage.setRuntimeApiKey("openai-codex", codex.accessToken);
69
- return { source: codex.source, expiresAt: codex.expiresAt };
70
- }
71
- function extractTextFromMessage(message) {
72
- const role = message?.role;
73
- if (role !== "assistant")
74
- return "";
75
- const content = message.content;
76
- if (typeof content === "string") {
77
- return content.trim();
78
- }
79
- if (!Array.isArray(content)) {
80
- return "";
81
- }
82
- const textBlocks = content
83
- .filter((block) => typeof block === "object" && block && block.type === "text")
84
- .map((block) => block.text ?? "")
85
- .map((text) => text.trim())
86
- .filter(Boolean);
87
- if (textBlocks.length > 0) {
88
- return textBlocks.join("\n\n");
89
- }
90
- const thinkingBlocks = content
91
- .filter((block) => typeof block === "object" && block && block.type === "thinking")
92
- .map((block) => block.thinking ?? "")
93
- .map((text) => text.trim())
94
- .filter(Boolean);
95
- if (thinkingBlocks.length > 0) {
96
- return thinkingBlocks.join("\n\n");
97
- }
98
- return "";
99
- }
100
- function summarizeMessages(messages) {
101
- if (messages.length === 0) {
102
- return "messages=0";
103
- }
104
- const tail = messages.slice(-6).map((message) => {
105
- const role = message?.role ?? "unknown";
106
- const content = message.content;
107
- const stopReason = message?.stopReason;
108
- const errorMessage = message?.errorMessage;
109
- if (typeof content === "string") {
110
- return `${role}(text:${content.length}${stopReason ? `,stop=${stopReason}` : ""})`;
111
- }
112
- if (Array.isArray(content)) {
113
- const types = content
114
- .map((block) => {
115
- if (typeof block === "object" && block && "type" in block) {
116
- return String(block.type || "unknown");
117
- }
118
- return typeof block;
119
- })
120
- .join(",");
121
- const meta = [
122
- `blocks:${types || "none"}`,
123
- stopReason ? `stop=${stopReason}` : null,
124
- errorMessage ? "error=1" : null,
125
- ]
126
- .filter(Boolean)
127
- .join(",");
128
- return `${role}(${meta})`;
129
- }
130
- if (content == null) {
131
- const meta = [
132
- "empty",
133
- stopReason ? `stop=${stopReason}` : null,
134
- errorMessage ? "error=1" : null,
135
- ]
136
- .filter(Boolean)
137
- .join(",");
138
- return `${role}(${meta})`;
139
- }
140
- return `${role}(${typeof content}${stopReason ? `,stop=${stopReason}` : ""})`;
141
- });
142
- return `messages=${messages.length} last=[${tail.join(" ")}]`;
143
- }
144
- function extractAssistantText(messages, session) {
145
- const lastText = session?.getLastAssistantText?.();
146
- if (lastText?.trim()) {
147
- return lastText.trim();
148
- }
149
- for (let i = messages.length - 1; i >= 0; i -= 1) {
150
- const text = extractTextFromMessage(messages[i]);
151
- if (text) {
152
- return text;
153
- }
154
- }
155
- return "";
156
- }
157
- function findLastAssistantMessage(messages) {
158
- for (let i = messages.length - 1; i >= 0; i -= 1) {
159
- const role = messages[i]?.role;
160
- if (role === "assistant") {
161
- return messages[i];
162
- }
163
- }
164
- return undefined;
165
- }
166
- export async function runPiAgentPrompt(params) {
167
- await ensureDir(config.sessionDir);
168
- await ensureDir(config.agentDir);
169
- await ensureDir(config.workspaceDir);
170
- const sessionFile = sessionFilePath(params.userId, params.sessionId);
171
- const authStorage = discoverAuthStorage(config.agentDir);
172
- const codexInfo = applyCodexOAuth(authStorage);
173
- if (codexInfo) {
174
- console.log(`[tg-agent] codex oauth source=${codexInfo.source} expiresAt=${new Date(codexInfo.expiresAt).toISOString()}`);
175
- }
176
- const modelRegistry = discoverModels(authStorage, config.agentDir);
177
- const { model, provider, modelId } = resolveModel(modelRegistry, {
178
- provider: params.modelProvider,
179
- modelId: params.modelId,
180
- });
181
- if (modelId && !model) {
182
- console.warn(`[tg-agent] model not found: ${provider}/${modelId}`);
183
- }
184
- const sessionManager = SessionManager.open(sessionFile);
185
- const settingsManager = SettingsManager.create(config.workspaceDir, config.agentDir);
186
- const { session, modelFallbackMessage } = await createAgentSession({
187
- cwd: config.workspaceDir,
188
- agentDir: config.agentDir,
189
- authStorage,
190
- modelRegistry,
191
- model: model ?? undefined,
192
- sessionManager,
193
- settingsManager,
194
- customTools: createCustomTools(),
195
- systemPrompt: (defaultPrompt) => {
196
- const extra = params.systemPrompt?.trim();
197
- if (!extra)
198
- return defaultPrompt;
199
- return `${defaultPrompt}\n\n${extra}`;
200
- },
201
- });
202
- if (modelFallbackMessage) {
203
- console.warn(`[tg-agent] modelFallback=${modelFallbackMessage}`);
204
- }
205
- let userAbortRequested = false;
206
- const abortFn = () => {
207
- userAbortRequested = true;
208
- void session.abort();
209
- };
210
- params.onAbortReady?.(abortFn);
211
- const logEvents = process.env.LOG_AGENT_EVENTS !== "0";
212
- const logStream = process.env.LOG_AGENT_STREAM === "1";
213
- const toolStartTimes = new Map();
214
- const unsubscribe = session.subscribe((event) => {
215
- switch (event.type) {
216
- case "agent_start":
217
- if (logEvents) {
218
- console.log(`[tg-agent] agent start session=${params.sessionId}`);
219
- }
220
- lastProgressAt = Date.now();
221
- params.onStatus?.({ type: "agent_start" });
222
- break;
223
- case "agent_end":
224
- if (logEvents) {
225
- console.log(`[tg-agent] agent end session=${params.sessionId}`);
226
- }
227
- params.onStatus?.({ type: "agent_end" });
228
- break;
229
- case "turn_start":
230
- if (logEvents) {
231
- console.log(`[tg-agent] turn start session=${params.sessionId}`);
232
- }
233
- lastProgressAt = Date.now();
234
- params.onStatus?.({ type: "turn_start" });
235
- break;
236
- case "turn_end":
237
- if (logEvents) {
238
- console.log(`[tg-agent] turn end session=${params.sessionId} toolResults=${event.toolResults.length}`);
239
- }
240
- lastProgressAt = Date.now();
241
- params.onStatus?.({ type: "turn_end", toolResults: event.toolResults.length });
242
- break;
243
- case "message_start": {
244
- if (logEvents) {
245
- const role = event.message.role ?? "unknown";
246
- console.log(`[tg-agent] message start session=${params.sessionId} role=${role}`);
247
- }
248
- lastProgressAt = Date.now();
249
- {
250
- const role = event.message.role ?? "unknown";
251
- params.onStatus?.({ type: "message_start", role });
252
- }
253
- break;
254
- }
255
- case "message_end": {
256
- if (logEvents) {
257
- const role = event.message.role ?? "unknown";
258
- console.log(`[tg-agent] message end session=${params.sessionId} role=${role}`);
259
- }
260
- lastProgressAt = Date.now();
261
- {
262
- const role = event.message.role ?? "unknown";
263
- params.onStatus?.({ type: "message_end", role });
264
- }
265
- break;
266
- }
267
- case "message_update": {
268
- if (!logStream)
269
- break;
270
- if (event.assistantMessageEvent.type === "text_delta") {
271
- const delta = event.assistantMessageEvent.delta ?? "";
272
- const len = delta.length;
273
- if (len > 0) {
274
- console.log(`[tg-agent] stream delta session=${params.sessionId} chars=${len}`);
275
- }
276
- lastProgressAt = Date.now();
277
- }
278
- if (event.assistantMessageEvent.type === "thinking_delta") {
279
- lastProgressAt = Date.now();
280
- }
281
- break;
282
- }
283
- case "tool_execution_start": {
284
- toolStartTimes.set(event.toolCallId, Date.now());
285
- console.log(`[tg-agent] tool start name=${event.toolName} id=${event.toolCallId}`);
286
- activeToolCount += 1;
287
- lastProgressAt = Date.now();
288
- params.onStatus?.({
289
- type: "tool_start",
290
- name: event.toolName,
291
- id: event.toolCallId,
292
- args: event.args,
293
- });
294
- break;
295
- }
296
- case "tool_execution_end": {
297
- const startedAt = toolStartTimes.get(event.toolCallId);
298
- const duration = startedAt ? Date.now() - startedAt : 0;
299
- console.log(`[tg-agent] tool end name=${event.toolName} id=${event.toolCallId} ok=${!event.isError} durationMs=${duration}`);
300
- toolStartTimes.delete(event.toolCallId);
301
- activeToolCount = Math.max(0, activeToolCount - 1);
302
- lastProgressAt = Date.now();
303
- params.onStatus?.({
304
- type: "tool_end",
305
- name: event.toolName,
306
- id: event.toolCallId,
307
- ok: !event.isError,
308
- durationMs: duration,
309
- });
310
- break;
311
- }
312
- default:
313
- break;
314
- }
315
- });
316
- let heartbeat = null;
317
- let timeoutInterval = null;
318
- let timedOut = false;
319
- let timedOutAfterMs = 0;
320
- let timedOutStreaming = false;
321
- let lastProgressAt = Date.now();
322
- let activeToolCount = 0;
323
- try {
324
- const promptStartedAt = Date.now();
325
- heartbeat = setInterval(() => {
326
- const elapsed = Date.now() - promptStartedAt;
327
- console.log(`[tg-agent] prompt running session=${params.sessionId} elapsedMs=${elapsed} streaming=${session.isStreaming}`);
328
- params.onStatus?.({
329
- type: "heartbeat",
330
- elapsedMs: elapsed,
331
- streaming: session.isStreaming,
332
- });
333
- }, 15000);
334
- timeoutInterval = setInterval(() => {
335
- if (timedOut)
336
- return;
337
- if (activeToolCount > 0)
338
- return;
339
- const elapsed = Date.now() - lastProgressAt;
340
- const baseTimeout = session.isStreaming
341
- ? config.modelTimeoutStreamingMs
342
- : config.modelTimeoutMs;
343
- const timeoutMs = Math.max(1_000, baseTimeout);
344
- if (elapsed >= timeoutMs) {
345
- timedOut = true;
346
- timedOutAfterMs = timeoutMs;
347
- timedOutStreaming = session.isStreaming;
348
- console.warn(`[tg-agent] model timeout session=${params.sessionId} elapsedMs=${elapsed}`);
349
- void session.abort();
350
- }
351
- }, 2000);
352
- try {
353
- await session.prompt(params.prompt);
354
- if (timedOut) {
355
- throw new Error(`Model request timed out after ${config.modelTimeoutMs}ms`);
356
- }
357
- }
358
- catch (error) {
359
- if (userAbortRequested) {
360
- throw new Error("Cancelled by user.");
361
- }
362
- if (timedOut) {
363
- const suffix = timedOutStreaming ? " (streaming)" : "";
364
- const ms = timedOutAfterMs || config.modelTimeoutMs;
365
- throw new Error(`Model request timed out after ${ms}ms${suffix}`);
366
- }
367
- throw error;
368
- }
369
- if (heartbeat)
370
- clearInterval(heartbeat);
371
- const lastAssistant = findLastAssistantMessage(session.messages);
372
- if (lastAssistant) {
373
- const stopReason = lastAssistant
374
- ?.stopReason;
375
- const errorMessage = lastAssistant
376
- ?.errorMessage;
377
- if (stopReason === "error") {
378
- console.warn(`[tg-agent] model error session=${params.sessionId} error=${errorMessage ?? "unknown"}`);
379
- throw new Error(errorMessage ?? "Model error without details.");
380
- }
381
- }
382
- const text = extractAssistantText(session.messages, session);
383
- if (!text) {
384
- const summary = summarizeMessages(session.messages);
385
- console.warn(`[tg-agent] empty response session=${params.sessionId} ${summary}`);
386
- throw new Error("No assistant response.");
387
- }
388
- return {
389
- text,
390
- sessionFile,
391
- sessionId: session.sessionId,
392
- modelProvider: session.model?.provider ?? provider,
393
- modelId: session.model?.id ?? modelId,
394
- modelFallbackMessage,
395
- };
396
- }
397
- finally {
398
- if (heartbeat)
399
- clearInterval(heartbeat);
400
- if (timeoutInterval)
401
- clearInterval(timeoutInterval);
402
- unsubscribe();
403
- const manager = sessionManager;
404
- manager.flushPendingToolResults?.();
405
- session.dispose();
406
- }
407
- }
1
+ import{createAgentSession as P,discoverAuthStorage as j,discoverModels as B,SessionManager as N,SettingsManager as L}from"@mariozechner/pi-coding-agent";import{config as d}from"./config.js";import{readCodexOAuth as F}from"./codexAuth.js";import{createCustomTools as z}from"./customTools.js";import{getMcpCatalog as G}from"./mcp.js";import{ensureDir as k}from"./utils.js";import{sessionFilePath as q}from"./sessionStore.js";const K={"codex-mini-latest":"gpt-5.1-codex-mini","codex-max-latest":"gpt-5.1-codex-max","codex-latest":"gpt-5.2-codex"};function C(e){const t=e.trim().toLowerCase();return t==="codex"?"openai-codex":t}function V(e,t){const n=e.trim();if(!n)return{provider:t,modelId:""};if(n.includes("/")){const[i,r]=n.split("/",2);return{provider:i.trim()||t,modelId:r.trim()}}return{provider:t,modelId:n}}function X(e,t){return e!=="openai-codex"?t:K[t]??t}function H(e,t){const n=e.getAvailable().filter(r=>r.provider===t);return n.length>0?n[0]:e.getAll().filter(r=>r.provider===t)[0]}function J(e,t){const n=!!t?.provider?.trim(),i=!!t?.modelId?.trim(),r=C(t?.provider||d.modelProvider||"openai-codex"),s=n||i?"":d.modelRef||"",{provider:w,modelId:p}=V(s,r),u=t?.provider?r:C(w);let m=i?t?.modelId?.trim()||"":p;if(!m){const $=H(e,u);return $?{model:$,provider:u,modelId:$.id}:{model:void 0,provider:u,modelId:""}}const I=X(u,m);return{model:e.find(u,I),provider:u,modelId:I}}function Q(e){const t=F();return t?(e.setRuntimeApiKey("openai-codex",t.accessToken),{source:t.source,expiresAt:t.expiresAt}):null}function U(e){if(e?.role!=="assistant")return"";const n=e.content;if(typeof n=="string")return n.trim();if(!Array.isArray(n))return"";const i=n.filter(s=>typeof s=="object"&&s&&s.type==="text").map(s=>s.text??"").map(s=>s.trim()).filter(Boolean);if(i.length>0)return i.join(`
2
+
3
+ `);const r=n.filter(s=>typeof s=="object"&&s&&s.type==="thinking").map(s=>s.thinking??"").map(s=>s.trim()).filter(Boolean);return r.length>0?r.join(`
4
+
5
+ `):""}function W(e){if(e.length===0)return"messages=0";const t=e.slice(-6).map(n=>{const i=n?.role??"unknown",r=n.content,s=n?.stopReason,w=n?.errorMessage;if(typeof r=="string")return`${i}(text:${r.length}${s?`,stop=${s}`:""})`;if(Array.isArray(r)){const u=[`blocks:${r.map(m=>typeof m=="object"&&m&&"type"in m?String(m.type||"unknown"):typeof m).join(",")||"none"}`,s?`stop=${s}`:null,w?"error=1":null].filter(Boolean).join(",");return`${i}(${u})`}if(r==null){const p=["empty",s?`stop=${s}`:null,w?"error=1":null].filter(Boolean).join(",");return`${i}(${p})`}return`${i}(${typeof r}${s?`,stop=${s}`:""})`});return`messages=${e.length} last=[${t.join(" ")}]`}function Y(e,t){const n=t?.getLastAssistantText?.();if(n?.trim())return n.trim();for(let i=e.length-1;i>=0;i-=1){const r=U(e[i]);if(r)return r}return""}function Z(e){for(let t=e.length-1;t>=0;t-=1)if(e[t]?.role==="assistant")return e[t]}async function ae(e){await k(d.sessionDir),await k(d.agentDir),await k(d.workspaceDir);const t=q(e.userId,e.sessionId),n=j(d.agentDir),i=Q(n);i&&console.log(`[tg-agent] codex oauth source=${i.source} expiresAt=${new Date(i.expiresAt).toISOString()}`);const r=B(n,d.agentDir),{model:s,provider:w,modelId:p}=J(r,{provider:e.modelProvider,modelId:e.modelId});p&&!s&&console.warn(`[tg-agent] model not found: ${w}/${p}`);const u=N.open(t),m=L.create(d.workspaceDir,d.agentDir);let I="";try{I=await G(d.agentDir,{timeoutMs:4e3,maxBytes:d.fetchMaxBytes,maxChars:3e3})}catch(o){const a=o instanceof Error?o.message:String(o);console.warn(`[tg-agent] mcp catalog error: ${a}`)}const{session:l,modelFallbackMessage:$}=await P({cwd:d.workspaceDir,agentDir:d.agentDir,authStorage:n,modelRegistry:r,model:s??void 0,sessionManager:u,settingsManager:m,customTools:z({telegram:e.telegram}),systemPrompt:o=>{const a=e.systemPrompt?.trim(),c=[o];return a&&c.push(a),I&&c.push(I),c.join(`
6
+
7
+ `)}});$&&console.warn(`[tg-agent] modelFallback=${$}`);let b=!1;const R=()=>{b=!0,l.abort()};e.onAbortReady?.(R);const M=process.env.LOG_AGENT_EVENTS!=="0",v=process.env.LOG_AGENT_STREAM==="1",S=new Map,O=l.subscribe(o=>{switch(o.type){case"agent_start":M&&console.log(`[tg-agent] agent start session=${e.sessionId}`),f=Date.now(),e.onStatus?.({type:"agent_start"});break;case"agent_end":M&&console.log(`[tg-agent] agent end session=${e.sessionId}`),e.onStatus?.({type:"agent_end"});break;case"turn_start":M&&console.log(`[tg-agent] turn start session=${e.sessionId}`),f=Date.now(),e.onStatus?.({type:"turn_start"});break;case"turn_end":M&&console.log(`[tg-agent] turn end session=${e.sessionId} toolResults=${o.toolResults.length}`),f=Date.now(),e.onStatus?.({type:"turn_end",toolResults:o.toolResults.length});break;case"message_start":{if(M){const a=o.message.role??"unknown";console.log(`[tg-agent] message start session=${e.sessionId} role=${a}`)}f=Date.now();{const a=o.message.role??"unknown";e.onStatus?.({type:"message_start",role:a})}break}case"message_end":{if(M){const a=o.message.role??"unknown";console.log(`[tg-agent] message end session=${e.sessionId} role=${a}`)}f=Date.now();{const a=o.message.role??"unknown";e.onStatus?.({type:"message_end",role:a})}break}case"message_update":{if(!v)break;if(o.assistantMessageEvent.type==="text_delta"){const c=(o.assistantMessageEvent.delta??"").length;c>0&&console.log(`[tg-agent] stream delta session=${e.sessionId} chars=${c}`),f=Date.now()}o.assistantMessageEvent.type==="thinking_delta"&&(f=Date.now());break}case"tool_execution_start":{S.set(o.toolCallId,Date.now()),console.log(`[tg-agent] tool start name=${o.toolName} id=${o.toolCallId}`),A+=1,f=Date.now(),e.onStatus?.({type:"tool_start",name:o.toolName,id:o.toolCallId,args:o.args});break}case"tool_execution_end":{const a=S.get(o.toolCallId),c=a?Date.now()-a:0;console.log(`[tg-agent] tool end name=${o.toolName} id=${o.toolCallId} ok=${!o.isError} durationMs=${c}`),S.delete(o.toolCallId),A=Math.max(0,A-1),f=Date.now(),e.onStatus?.({type:"tool_end",name:o.toolName,id:o.toolCallId,ok:!o.isError,durationMs:c});break}default:break}});let h=null,_=null,y=!1,E=0,T=!1,f=Date.now(),A=0;try{const o=Date.now();h=setInterval(()=>{const g=Date.now()-o;console.log(`[tg-agent] prompt running session=${e.sessionId} elapsedMs=${g} streaming=${l.isStreaming}`),e.onStatus?.({type:"heartbeat",elapsedMs:g,streaming:l.isStreaming})},15e3),_=setInterval(()=>{if(y||A>0)return;const g=Date.now()-f,x=l.isStreaming?d.modelTimeoutStreamingMs:d.modelTimeoutMs,D=Math.max(1e3,x);g>=D&&(y=!0,E=D,T=l.isStreaming,console.warn(`[tg-agent] model timeout session=${e.sessionId} elapsedMs=${g}`),l.abort())},2e3);try{if(await l.prompt(e.prompt,e.images&&e.images.length>0?{images:e.images}:void 0),y)throw new Error(`Model request timed out after ${d.modelTimeoutMs}ms`)}catch(g){if(b)throw new Error("Cancelled by user.");if(y){const x=T?" (streaming)":"",D=E||d.modelTimeoutMs;throw new Error(`Model request timed out after ${D}ms${x}`)}throw g}h&&clearInterval(h);const a=Z(l.messages);if(a){const g=a?.stopReason,x=a?.errorMessage;if(g==="error")throw console.warn(`[tg-agent] model error session=${e.sessionId} error=${x??"unknown"}`),new Error(x??"Model error without details.")}const c=Y(l.messages,l);if(!c){const g=W(l.messages);throw console.warn(`[tg-agent] empty response session=${e.sessionId} ${g}`),new Error("No assistant response.")}return{text:c,sessionFile:t,sessionId:l.sessionId,modelProvider:l.model?.provider??w,modelId:l.model?.id??p,modelFallbackMessage:$}}finally{h&&clearInterval(h),_&&clearInterval(_),O(),u.flushPendingToolResults?.(),l.dispose()}}export{ae as runPiAgentPrompt};
@@ -1,99 +1,5 @@
1
- import { completeSimple, getModel, } from "@mariozechner/pi-ai";
2
- import { config } from "./config.js";
3
- import { resolveApiKeyForProvider } from "./auth.js";
4
- const CODEX_MODEL_ALIASES = {
5
- "codex-mini-latest": "gpt-5.1-codex-mini",
6
- "codex-max-latest": "gpt-5.1-codex-max",
7
- "codex-latest": "gpt-5.2-codex",
8
- };
9
- function splitModelRef(ref, fallbackProvider) {
10
- const trimmed = ref.trim();
11
- if (!trimmed) {
12
- return { provider: fallbackProvider, modelId: "" };
13
- }
14
- if (trimmed.includes("/")) {
15
- const [provider, modelId] = trimmed.split("/", 2);
16
- return { provider: provider.trim() || fallbackProvider, modelId: modelId.trim() };
17
- }
18
- return { provider: fallbackProvider, modelId: trimmed };
19
- }
20
- function normalizeModelId(provider, modelId) {
21
- if (provider !== "openai-codex") {
22
- return modelId;
23
- }
24
- const mapped = CODEX_MODEL_ALIASES[modelId];
25
- return mapped ?? modelId;
26
- }
27
- function toAssistantMessage(text, model, timestamp) {
28
- return {
29
- role: "assistant",
30
- content: [{ type: "text", text }],
31
- api: model.api,
32
- provider: model.provider,
33
- model: model.id,
34
- usage: {
35
- input: 0,
36
- output: 0,
37
- cacheRead: 0,
38
- cacheWrite: 0,
39
- totalTokens: 0,
40
- cost: {
41
- input: 0,
42
- output: 0,
43
- cacheRead: 0,
44
- cacheWrite: 0,
45
- total: 0,
46
- },
47
- },
48
- stopReason: "stop",
49
- timestamp,
50
- };
51
- }
52
- function buildContextMessages(localMessages, model) {
53
- return localMessages.map((message) => {
54
- if (message.role === "user") {
55
- return {
56
- role: "user",
57
- content: message.content,
58
- timestamp: message.ts,
59
- };
60
- }
61
- return toAssistantMessage(message.content, model, message.ts);
62
- });
63
- }
64
- function extractText(assistant) {
65
- const textBlocks = assistant.content.filter((block) => block.type === "text");
66
- const text = textBlocks.map((block) => block.text.trim()).filter(Boolean).join("\n\n");
67
- if (text) {
68
- return text;
69
- }
70
- const thinkingBlocks = assistant.content.filter((block) => block.type === "thinking");
71
- const thinking = thinkingBlocks
72
- .map((block) => (block.type === "thinking" ? block.thinking.trim() : ""))
73
- .filter(Boolean)
74
- .join("\n\n");
75
- return thinking || "Empty response.";
76
- }
77
- export async function runPiCompletion(localMessages, systemPrompt, sessionId) {
78
- const providerRaw = config.modelProvider || "openai-codex";
79
- const modelRef = config.modelRef || "gpt-5.2";
80
- const { provider, modelId: rawModelId } = splitModelRef(modelRef, providerRaw);
81
- const modelId = normalizeModelId(provider, rawModelId || "gpt-5.2");
82
- const model = getModel(provider, modelId);
83
- const { apiKey } = resolveApiKeyForProvider(provider);
84
- const context = {
85
- systemPrompt,
86
- messages: buildContextMessages(localMessages, model),
87
- };
88
- const assistant = await completeSimple(model, context, {
89
- apiKey,
90
- sessionId,
91
- maxTokens: config.maxOutputTokens || undefined,
92
- });
93
- return {
94
- text: extractText(assistant),
95
- assistant,
96
- provider,
97
- modelId: model.id,
98
- };
99
- }
1
+ import{completeSimple as u,getModel as x}from"@mariozechner/pi-ai";import{config as p}from"./config.js";import{resolveApiKeyForProvider as f}from"./auth.js";const g={"codex-mini-latest":"gpt-5.1-codex-mini","codex-max-latest":"gpt-5.1-codex-max","codex-latest":"gpt-5.2-codex"};function h(n,e){const t=n.trim();if(!t)return{provider:e,modelId:""};if(t.includes("/")){const[i,r]=t.split("/",2);return{provider:i.trim()||e,modelId:r.trim()}}return{provider:e,modelId:t}}function k(n,e){return n!=="openai-codex"?e:g[e]??e}function v(n,e,t){return{role:"assistant",content:[{type:"text",text:n}],api:e.api,provider:e.provider,model:e.id,usage:{input:0,output:0,cacheRead:0,cacheWrite:0,totalTokens:0,cost:{input:0,output:0,cacheRead:0,cacheWrite:0,total:0}},stopReason:"stop",timestamp:t}}function y(n,e){return n.map(t=>t.role==="user"?{role:"user",content:t.content,timestamp:t.ts}:v(t.content,e,t.ts))}function I(n){const t=n.content.filter(o=>o.type==="text").map(o=>o.text.trim()).filter(Boolean).join(`
2
+
3
+ `);return t||n.content.filter(o=>o.type==="thinking").map(o=>o.type==="thinking"?o.thinking.trim():"").filter(Boolean).join(`
4
+
5
+ `)||"Empty response."}async function A(n,e,t){const i=p.modelProvider||"openai-codex",r=p.modelRef||"gpt-5.2",{provider:o,modelId:d}=h(r,i),m=k(o,d||"gpt-5.2"),c=x(o,m),{apiKey:a}=f(o),l={systemPrompt:e,messages:y(n,c)},s=await u(c,l,{apiKey:a,sessionId:t,maxTokens:p.maxOutputTokens||void 0});return{text:I(s),assistant:s,provider:o,modelId:c.id}}export{A as runPiCompletion};
package/dist/proxy.js CHANGED
@@ -1,19 +1 @@
1
- import { ProxyAgent, setGlobalDispatcher } from "undici";
2
- import { resolveFetchProxyInfo } from "./auth.js";
3
- let applied = false;
4
- let appliedInfo = null;
5
- export function applyFetchProxy() {
6
- if (applied) {
7
- return appliedInfo;
8
- }
9
- applied = true;
10
- const proxy = resolveFetchProxyInfo();
11
- if (!proxy) {
12
- appliedInfo = null;
13
- return null;
14
- }
15
- const dispatcher = new ProxyAgent(proxy.url);
16
- setGlobalDispatcher(dispatcher);
17
- appliedInfo = proxy;
18
- return proxy;
19
- }
1
+ import{ProxyAgent as o,setGlobalDispatcher as n}from"undici";import{resolveFetchProxyInfo as p}from"./auth.js";let t=!1,e=null;function f(){if(t)return e;t=!0;const r=p();if(!r)return e=null,null;const l=new o(r.url);return n(l),e=r,r}export{f as applyFetchProxy};