teleton 0.6.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -31
- package/dist/{chunk-6L6KGATM.js → chunk-3YM57ZAV.js} +1638 -1749
- package/dist/{chunk-D5I7GBV7.js → chunk-FNV5FF35.js} +22 -13
- package/dist/{chunk-4IPJ25HE.js → chunk-HZNZT4TG.js} +1106 -711
- package/dist/chunk-LRCPA7SC.js +149 -0
- package/dist/chunk-ND2X5FWB.js +368 -0
- package/dist/chunk-NERLQY2H.js +421 -0
- package/dist/{chunk-YBA6IBGT.js → chunk-OCLG5GKI.js} +24 -24
- package/dist/{chunk-ADCMUNYU.js → chunk-RBU6JXD3.js} +60 -55
- package/dist/chunk-RCMD3U65.js +141 -0
- package/dist/{chunk-ECSCVEQQ.js → chunk-UCN6TI25.js} +7 -3
- package/dist/{chunk-WL2Q3VRD.js → chunk-UDD7FYOU.js} +12 -4
- package/dist/chunk-VAUJSSD3.js +20 -0
- package/dist/chunk-XBE4JB7C.js +8 -0
- package/dist/{chunk-GDCODBNO.js → chunk-XBKSS6DM.js} +2 -16
- package/dist/cli/index.js +878 -433
- package/dist/client-3VWE7NC4.js +29 -0
- package/dist/{get-my-gifts-KVULMBJ3.js → get-my-gifts-RI7FAXAL.js} +3 -1
- package/dist/index.js +17 -11
- package/dist/{memory-TVDOGQXS.js → memory-5SS3Q5EA.js} +7 -5
- package/dist/{migrate-QIEMPOMT.js → migrate-M7SJMDOL.js} +14 -9
- package/dist/{server-RSWVCVY3.js → server-DS5OARW6.js} +174 -85
- package/dist/setup-server-C7ZTPHD5.js +934 -0
- package/dist/{task-dependency-resolver-72DLY2HV.js → task-dependency-resolver-WKZWJLLM.js} +19 -15
- package/dist/{task-executor-VXB6DAV2.js → task-executor-PD3H4MLO.js} +4 -1
- package/dist/tool-adapter-Y3TCEQOC.js +145 -0
- package/dist/{tool-index-DKI2ZNOU.js → tool-index-MIVK3D7H.js} +14 -9
- package/dist/{transcript-7V4UNID4.js → transcript-UDJZP6NK.js} +2 -1
- package/dist/web/assets/complete-fZLnb5Ot.js +1 -0
- package/dist/web/assets/index-BqwoDycr.js +72 -0
- package/dist/web/assets/index-CRDIf07k.css +1 -0
- package/dist/web/assets/index.es-D81xLR29.js +11 -0
- package/dist/web/assets/login-telegram-BP7CJDmx.js +1 -0
- package/dist/web/assets/run-DOrDowjK.js +1 -0
- package/dist/web/index.html +2 -2
- package/package.json +7 -3
- package/dist/chunk-2QUJLHCZ.js +0 -362
- package/dist/web/assets/index-BNhrx9S1.js +0 -67
- package/dist/web/assets/index-CqrrRLOh.css +0 -1
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
// src/config/providers.ts
|
|
2
|
+
var PROVIDER_REGISTRY = {
|
|
3
|
+
anthropic: {
|
|
4
|
+
id: "anthropic",
|
|
5
|
+
displayName: "Anthropic (Claude)",
|
|
6
|
+
envVar: "ANTHROPIC_API_KEY",
|
|
7
|
+
keyPrefix: "sk-ant-",
|
|
8
|
+
keyHint: "sk-ant-api03-...",
|
|
9
|
+
consoleUrl: "https://console.anthropic.com/",
|
|
10
|
+
defaultModel: "claude-opus-4-5-20251101",
|
|
11
|
+
utilityModel: "claude-3-5-haiku-20241022",
|
|
12
|
+
toolLimit: null,
|
|
13
|
+
piAiProvider: "anthropic"
|
|
14
|
+
},
|
|
15
|
+
openai: {
|
|
16
|
+
id: "openai",
|
|
17
|
+
displayName: "OpenAI (GPT-4o)",
|
|
18
|
+
envVar: "OPENAI_API_KEY",
|
|
19
|
+
keyPrefix: "sk-",
|
|
20
|
+
keyHint: "sk-proj-...",
|
|
21
|
+
consoleUrl: "https://platform.openai.com/api-keys",
|
|
22
|
+
defaultModel: "gpt-4o",
|
|
23
|
+
utilityModel: "gpt-4o-mini",
|
|
24
|
+
toolLimit: 128,
|
|
25
|
+
piAiProvider: "openai"
|
|
26
|
+
},
|
|
27
|
+
google: {
|
|
28
|
+
id: "google",
|
|
29
|
+
displayName: "Google (Gemini)",
|
|
30
|
+
envVar: "GOOGLE_API_KEY",
|
|
31
|
+
keyPrefix: null,
|
|
32
|
+
keyHint: "AIza...",
|
|
33
|
+
consoleUrl: "https://aistudio.google.com/apikey",
|
|
34
|
+
defaultModel: "gemini-2.5-flash",
|
|
35
|
+
utilityModel: "gemini-2.0-flash-lite",
|
|
36
|
+
toolLimit: 128,
|
|
37
|
+
piAiProvider: "google"
|
|
38
|
+
},
|
|
39
|
+
xai: {
|
|
40
|
+
id: "xai",
|
|
41
|
+
displayName: "xAI (Grok)",
|
|
42
|
+
envVar: "XAI_API_KEY",
|
|
43
|
+
keyPrefix: "xai-",
|
|
44
|
+
keyHint: "xai-...",
|
|
45
|
+
consoleUrl: "https://console.x.ai/",
|
|
46
|
+
defaultModel: "grok-3",
|
|
47
|
+
utilityModel: "grok-3-mini-fast",
|
|
48
|
+
toolLimit: 128,
|
|
49
|
+
piAiProvider: "xai"
|
|
50
|
+
},
|
|
51
|
+
groq: {
|
|
52
|
+
id: "groq",
|
|
53
|
+
displayName: "Groq",
|
|
54
|
+
envVar: "GROQ_API_KEY",
|
|
55
|
+
keyPrefix: "gsk_",
|
|
56
|
+
keyHint: "gsk_...",
|
|
57
|
+
consoleUrl: "https://console.groq.com/keys",
|
|
58
|
+
defaultModel: "llama-3.3-70b-versatile",
|
|
59
|
+
utilityModel: "llama-3.1-8b-instant",
|
|
60
|
+
toolLimit: 128,
|
|
61
|
+
piAiProvider: "groq"
|
|
62
|
+
},
|
|
63
|
+
openrouter: {
|
|
64
|
+
id: "openrouter",
|
|
65
|
+
displayName: "OpenRouter",
|
|
66
|
+
envVar: "OPENROUTER_API_KEY",
|
|
67
|
+
keyPrefix: "sk-or-",
|
|
68
|
+
keyHint: "sk-or-v1-...",
|
|
69
|
+
consoleUrl: "https://openrouter.ai/keys",
|
|
70
|
+
defaultModel: "anthropic/claude-opus-4.5",
|
|
71
|
+
utilityModel: "google/gemini-2.5-flash-lite",
|
|
72
|
+
toolLimit: 128,
|
|
73
|
+
piAiProvider: "openrouter"
|
|
74
|
+
},
|
|
75
|
+
moonshot: {
|
|
76
|
+
id: "moonshot",
|
|
77
|
+
displayName: "Moonshot (Kimi K2.5)",
|
|
78
|
+
envVar: "MOONSHOT_API_KEY",
|
|
79
|
+
keyPrefix: "sk-",
|
|
80
|
+
keyHint: "sk-...",
|
|
81
|
+
consoleUrl: "https://platform.moonshot.ai/",
|
|
82
|
+
defaultModel: "kimi-k2.5",
|
|
83
|
+
utilityModel: "kimi-k2.5",
|
|
84
|
+
toolLimit: 128,
|
|
85
|
+
piAiProvider: "moonshot"
|
|
86
|
+
},
|
|
87
|
+
mistral: {
|
|
88
|
+
id: "mistral",
|
|
89
|
+
displayName: "Mistral AI",
|
|
90
|
+
envVar: "MISTRAL_API_KEY",
|
|
91
|
+
keyPrefix: null,
|
|
92
|
+
keyHint: "...",
|
|
93
|
+
consoleUrl: "https://console.mistral.ai/api-keys",
|
|
94
|
+
defaultModel: "devstral-small-2507",
|
|
95
|
+
utilityModel: "ministral-8b-latest",
|
|
96
|
+
toolLimit: 128,
|
|
97
|
+
piAiProvider: "mistral"
|
|
98
|
+
},
|
|
99
|
+
cocoon: {
|
|
100
|
+
id: "cocoon",
|
|
101
|
+
displayName: "Cocoon Network (Decentralized)",
|
|
102
|
+
envVar: "",
|
|
103
|
+
keyPrefix: null,
|
|
104
|
+
keyHint: "No API key needed \u2014 pays in TON",
|
|
105
|
+
consoleUrl: "https://cocoon.network",
|
|
106
|
+
defaultModel: "Qwen/Qwen3-32B",
|
|
107
|
+
utilityModel: "Qwen/Qwen3-32B",
|
|
108
|
+
toolLimit: 128,
|
|
109
|
+
piAiProvider: "cocoon"
|
|
110
|
+
},
|
|
111
|
+
local: {
|
|
112
|
+
id: "local",
|
|
113
|
+
displayName: "Local (Ollama, vLLM, LM Studio...)",
|
|
114
|
+
envVar: "",
|
|
115
|
+
keyPrefix: null,
|
|
116
|
+
keyHint: "No API key needed",
|
|
117
|
+
consoleUrl: "",
|
|
118
|
+
defaultModel: "auto",
|
|
119
|
+
utilityModel: "auto",
|
|
120
|
+
toolLimit: 128,
|
|
121
|
+
piAiProvider: "local"
|
|
122
|
+
}
|
|
123
|
+
};
|
|
124
|
+
function getProviderMetadata(provider) {
|
|
125
|
+
const meta = PROVIDER_REGISTRY[provider];
|
|
126
|
+
if (!meta) {
|
|
127
|
+
throw new Error(`Unknown provider: ${provider}`);
|
|
128
|
+
}
|
|
129
|
+
return meta;
|
|
130
|
+
}
|
|
131
|
+
function getSupportedProviders() {
|
|
132
|
+
return Object.values(PROVIDER_REGISTRY);
|
|
133
|
+
}
|
|
134
|
+
function validateApiKeyFormat(provider, key) {
|
|
135
|
+
const meta = PROVIDER_REGISTRY[provider];
|
|
136
|
+
if (!meta) return `Unknown provider: ${provider}`;
|
|
137
|
+
if (provider === "cocoon" || provider === "local") return void 0;
|
|
138
|
+
if (!key || key.trim().length === 0) return "API key is required";
|
|
139
|
+
if (meta.keyPrefix && !key.startsWith(meta.keyPrefix)) {
|
|
140
|
+
return `Invalid format (should start with ${meta.keyPrefix})`;
|
|
141
|
+
}
|
|
142
|
+
return void 0;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
export {
|
|
146
|
+
getProviderMetadata,
|
|
147
|
+
getSupportedProviders,
|
|
148
|
+
validateApiKeyFormat
|
|
149
|
+
};
|
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
import {
|
|
2
|
+
getProviderMetadata
|
|
3
|
+
} from "./chunk-LRCPA7SC.js";
|
|
4
|
+
import {
|
|
5
|
+
appendToTranscript,
|
|
6
|
+
readTranscript
|
|
7
|
+
} from "./chunk-OCLG5GKI.js";
|
|
8
|
+
import {
|
|
9
|
+
fetchWithTimeout
|
|
10
|
+
} from "./chunk-VAUJSSD3.js";
|
|
11
|
+
import {
|
|
12
|
+
createLogger
|
|
13
|
+
} from "./chunk-RCMD3U65.js";
|
|
14
|
+
|
|
15
|
+
// src/agent/client.ts
|
|
16
|
+
import {
|
|
17
|
+
complete,
|
|
18
|
+
getModel
|
|
19
|
+
} from "@mariozechner/pi-ai";
|
|
20
|
+
|
|
21
|
+
// src/agent/schema-sanitizer.ts
|
|
22
|
+
var UNSUPPORTED_KEYS = /* @__PURE__ */ new Set([
|
|
23
|
+
"$schema",
|
|
24
|
+
"$id",
|
|
25
|
+
"$ref",
|
|
26
|
+
"$defs",
|
|
27
|
+
"$anchor",
|
|
28
|
+
"title",
|
|
29
|
+
"default",
|
|
30
|
+
"examples"
|
|
31
|
+
]);
|
|
32
|
+
function sanitizeSchema(schema) {
|
|
33
|
+
if (!schema || typeof schema !== "object") return schema;
|
|
34
|
+
const result = { ...schema };
|
|
35
|
+
for (const key of UNSUPPORTED_KEYS) {
|
|
36
|
+
delete result[key];
|
|
37
|
+
}
|
|
38
|
+
if (Array.isArray(result.anyOf)) {
|
|
39
|
+
const items = result.anyOf;
|
|
40
|
+
const nonNull = items.filter((s) => s.type !== "null");
|
|
41
|
+
const allConst = nonNull.length > 0 && nonNull.every((s) => s.const !== void 0);
|
|
42
|
+
if (allConst) {
|
|
43
|
+
const enumValues = nonNull.map((s) => s.const);
|
|
44
|
+
const inferredType = nonNull[0]?.type || "string";
|
|
45
|
+
delete result.anyOf;
|
|
46
|
+
result.type = inferredType;
|
|
47
|
+
result.enum = enumValues;
|
|
48
|
+
} else if (nonNull.length > 0) {
|
|
49
|
+
delete result.anyOf;
|
|
50
|
+
const first = nonNull[0];
|
|
51
|
+
if (first.type) result.type = first.type;
|
|
52
|
+
if (first.enum) result.enum = first.enum;
|
|
53
|
+
if (first.description && !result.description) {
|
|
54
|
+
result.description = first.description;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
if (Array.isArray(result.anyOf)) {
|
|
59
|
+
delete result.anyOf;
|
|
60
|
+
if (!result.type) result.type = "string";
|
|
61
|
+
}
|
|
62
|
+
if (result.const !== void 0) {
|
|
63
|
+
result.enum = [result.const];
|
|
64
|
+
if (!result.type) {
|
|
65
|
+
const jsType = typeof result.const;
|
|
66
|
+
result.type = jsType === "string" ? "string" : jsType === "boolean" ? "boolean" : "number";
|
|
67
|
+
}
|
|
68
|
+
delete result.const;
|
|
69
|
+
}
|
|
70
|
+
if (result.properties && typeof result.properties === "object") {
|
|
71
|
+
const props = result.properties;
|
|
72
|
+
const sanitized = {};
|
|
73
|
+
for (const [key, value] of Object.entries(props)) {
|
|
74
|
+
sanitized[key] = value && typeof value === "object" && !Array.isArray(value) ? sanitizeSchema(value) : value;
|
|
75
|
+
}
|
|
76
|
+
result.properties = sanitized;
|
|
77
|
+
}
|
|
78
|
+
if (result.items && typeof result.items === "object" && !Array.isArray(result.items)) {
|
|
79
|
+
result.items = sanitizeSchema(result.items);
|
|
80
|
+
}
|
|
81
|
+
return result;
|
|
82
|
+
}
|
|
83
|
+
function sanitizeToolsForGemini(tools) {
|
|
84
|
+
return tools.map(
|
|
85
|
+
(tool) => ({
|
|
86
|
+
...tool,
|
|
87
|
+
parameters: sanitizeSchema({ ...tool.parameters })
|
|
88
|
+
})
|
|
89
|
+
);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// src/agent/client.ts
|
|
93
|
+
var log = createLogger("LLM");
|
|
94
|
+
function isOAuthToken(apiKey, provider) {
|
|
95
|
+
if (provider && provider !== "anthropic") return false;
|
|
96
|
+
return apiKey.startsWith("sk-ant-oat01-");
|
|
97
|
+
}
|
|
98
|
+
function getEffectiveApiKey(provider, rawKey) {
|
|
99
|
+
if (provider === "local") return "local";
|
|
100
|
+
if (provider === "cocoon") return "";
|
|
101
|
+
return rawKey;
|
|
102
|
+
}
|
|
103
|
+
var modelCache = /* @__PURE__ */ new Map();
|
|
104
|
+
var COCOON_MODELS = {};
|
|
105
|
+
async function registerCocoonModels(httpPort) {
|
|
106
|
+
try {
|
|
107
|
+
const res = await fetch(`http://localhost:${httpPort}/v1/models`);
|
|
108
|
+
if (!res.ok) return [];
|
|
109
|
+
const body = await res.json();
|
|
110
|
+
const models = body.data || body.models || [];
|
|
111
|
+
if (!Array.isArray(models)) return [];
|
|
112
|
+
const ids = [];
|
|
113
|
+
for (const m of models) {
|
|
114
|
+
const id = m.id || m.name || String(m);
|
|
115
|
+
COCOON_MODELS[id] = {
|
|
116
|
+
id,
|
|
117
|
+
name: id,
|
|
118
|
+
api: "openai-completions",
|
|
119
|
+
provider: "cocoon",
|
|
120
|
+
baseUrl: `http://localhost:${httpPort}/v1`,
|
|
121
|
+
reasoning: false,
|
|
122
|
+
input: ["text"],
|
|
123
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
124
|
+
contextWindow: 128e3,
|
|
125
|
+
maxTokens: 4096,
|
|
126
|
+
compat: {
|
|
127
|
+
supportsStore: false,
|
|
128
|
+
supportsDeveloperRole: false,
|
|
129
|
+
supportsReasoningEffort: false
|
|
130
|
+
}
|
|
131
|
+
};
|
|
132
|
+
ids.push(id);
|
|
133
|
+
}
|
|
134
|
+
return ids;
|
|
135
|
+
} catch {
|
|
136
|
+
return [];
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
var LOCAL_MODELS = {};
|
|
140
|
+
async function registerLocalModels(baseUrl) {
|
|
141
|
+
try {
|
|
142
|
+
const parsed = new URL(baseUrl);
|
|
143
|
+
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
|
|
144
|
+
log.warn(`Local LLM base_url must use http or https (got ${parsed.protocol})`);
|
|
145
|
+
return [];
|
|
146
|
+
}
|
|
147
|
+
const url = baseUrl.replace(/\/+$/, "");
|
|
148
|
+
const res = await fetchWithTimeout(`${url}/models`, { timeoutMs: 1e4 });
|
|
149
|
+
if (!res.ok) return [];
|
|
150
|
+
const body = await res.json();
|
|
151
|
+
const rawModels = body.data || body.models || [];
|
|
152
|
+
if (!Array.isArray(rawModels)) return [];
|
|
153
|
+
const models = rawModels.slice(0, 500);
|
|
154
|
+
const ids = [];
|
|
155
|
+
for (const m of models) {
|
|
156
|
+
const id = m.id || m.name || String(m);
|
|
157
|
+
LOCAL_MODELS[id] = {
|
|
158
|
+
id,
|
|
159
|
+
name: id,
|
|
160
|
+
api: "openai-completions",
|
|
161
|
+
provider: "local",
|
|
162
|
+
baseUrl: url,
|
|
163
|
+
reasoning: false,
|
|
164
|
+
input: ["text"],
|
|
165
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
166
|
+
contextWindow: 128e3,
|
|
167
|
+
maxTokens: 4096,
|
|
168
|
+
compat: {
|
|
169
|
+
supportsStore: false,
|
|
170
|
+
supportsDeveloperRole: false,
|
|
171
|
+
supportsReasoningEffort: false,
|
|
172
|
+
supportsStrictMode: false,
|
|
173
|
+
maxTokensField: "max_tokens"
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
ids.push(id);
|
|
177
|
+
}
|
|
178
|
+
return ids;
|
|
179
|
+
} catch {
|
|
180
|
+
return [];
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
var MOONSHOT_MODELS = {
|
|
184
|
+
"kimi-k2.5": {
|
|
185
|
+
id: "kimi-k2.5",
|
|
186
|
+
name: "Kimi K2.5",
|
|
187
|
+
api: "openai-completions",
|
|
188
|
+
provider: "moonshot",
|
|
189
|
+
baseUrl: "https://api.moonshot.ai/v1",
|
|
190
|
+
reasoning: false,
|
|
191
|
+
input: ["text", "image"],
|
|
192
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
193
|
+
contextWindow: 256e3,
|
|
194
|
+
maxTokens: 8192
|
|
195
|
+
},
|
|
196
|
+
"kimi-k2-thinking": {
|
|
197
|
+
id: "kimi-k2-thinking",
|
|
198
|
+
name: "Kimi K2 Thinking",
|
|
199
|
+
api: "openai-completions",
|
|
200
|
+
provider: "moonshot",
|
|
201
|
+
baseUrl: "https://api.moonshot.ai/v1",
|
|
202
|
+
reasoning: true,
|
|
203
|
+
input: ["text"],
|
|
204
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
205
|
+
contextWindow: 256e3,
|
|
206
|
+
maxTokens: 8192
|
|
207
|
+
}
|
|
208
|
+
};
|
|
209
|
+
function getProviderModel(provider, modelId) {
|
|
210
|
+
const cacheKey = `${provider}:${modelId}`;
|
|
211
|
+
const cached = modelCache.get(cacheKey);
|
|
212
|
+
if (cached) return cached;
|
|
213
|
+
const meta = getProviderMetadata(provider);
|
|
214
|
+
if (meta.piAiProvider === "cocoon") {
|
|
215
|
+
let model = COCOON_MODELS[modelId];
|
|
216
|
+
if (!model) {
|
|
217
|
+
model = Object.values(COCOON_MODELS)[0];
|
|
218
|
+
if (model) log.warn(`Cocoon model "${modelId}" not found, using "${model.id}"`);
|
|
219
|
+
}
|
|
220
|
+
if (model) {
|
|
221
|
+
modelCache.set(cacheKey, model);
|
|
222
|
+
return model;
|
|
223
|
+
}
|
|
224
|
+
throw new Error("No Cocoon models available. Is the cocoon client running?");
|
|
225
|
+
}
|
|
226
|
+
if (meta.piAiProvider === "local") {
|
|
227
|
+
let model = LOCAL_MODELS[modelId];
|
|
228
|
+
if (!model) {
|
|
229
|
+
model = Object.values(LOCAL_MODELS)[0];
|
|
230
|
+
if (model) log.warn(`Local model "${modelId}" not found, using "${model.id}"`);
|
|
231
|
+
}
|
|
232
|
+
if (model) {
|
|
233
|
+
modelCache.set(cacheKey, model);
|
|
234
|
+
return model;
|
|
235
|
+
}
|
|
236
|
+
throw new Error("No local models available. Is the LLM server running?");
|
|
237
|
+
}
|
|
238
|
+
if (meta.piAiProvider === "moonshot") {
|
|
239
|
+
const model = MOONSHOT_MODELS[modelId] ?? MOONSHOT_MODELS[meta.defaultModel];
|
|
240
|
+
if (model) {
|
|
241
|
+
modelCache.set(cacheKey, model);
|
|
242
|
+
return model;
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
try {
|
|
246
|
+
const model = getModel(meta.piAiProvider, modelId);
|
|
247
|
+
if (!model) {
|
|
248
|
+
throw new Error(`getModel returned undefined for ${provider}/${modelId}`);
|
|
249
|
+
}
|
|
250
|
+
modelCache.set(cacheKey, model);
|
|
251
|
+
return model;
|
|
252
|
+
} catch (e) {
|
|
253
|
+
log.warn(`Model ${modelId} not found for ${provider}, falling back to ${meta.defaultModel}`);
|
|
254
|
+
const fallbackKey = `${provider}:${meta.defaultModel}`;
|
|
255
|
+
const fallbackCached = modelCache.get(fallbackKey);
|
|
256
|
+
if (fallbackCached) return fallbackCached;
|
|
257
|
+
try {
|
|
258
|
+
const model = getModel(meta.piAiProvider, meta.defaultModel);
|
|
259
|
+
if (!model) {
|
|
260
|
+
throw new Error(
|
|
261
|
+
`Fallback model ${meta.defaultModel} also returned undefined for ${provider}`
|
|
262
|
+
);
|
|
263
|
+
}
|
|
264
|
+
modelCache.set(fallbackKey, model);
|
|
265
|
+
return model;
|
|
266
|
+
} catch {
|
|
267
|
+
throw new Error(
|
|
268
|
+
`Could not find model ${modelId} or fallback ${meta.defaultModel} for ${provider}`
|
|
269
|
+
);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
function getUtilityModel(provider, overrideModel) {
|
|
274
|
+
const meta = getProviderMetadata(provider);
|
|
275
|
+
const modelId = overrideModel || meta.utilityModel;
|
|
276
|
+
return getProviderModel(provider, modelId);
|
|
277
|
+
}
|
|
278
|
+
async function chatWithContext(config, options) {
|
|
279
|
+
const provider = config.provider || "anthropic";
|
|
280
|
+
const model = getProviderModel(provider, config.model);
|
|
281
|
+
const isCocoon = provider === "cocoon";
|
|
282
|
+
let tools = provider === "google" && options.tools ? sanitizeToolsForGemini(options.tools) : options.tools;
|
|
283
|
+
let systemPrompt = options.systemPrompt || options.context.systemPrompt || "";
|
|
284
|
+
let cocoonAllowedTools;
|
|
285
|
+
if (isCocoon) {
|
|
286
|
+
systemPrompt = "/no_think\n" + systemPrompt;
|
|
287
|
+
if (tools && tools.length > 0) {
|
|
288
|
+
cocoonAllowedTools = new Set(tools.map((t) => t.name));
|
|
289
|
+
const { injectToolsIntoSystemPrompt } = await import("./tool-adapter-Y3TCEQOC.js");
|
|
290
|
+
systemPrompt = injectToolsIntoSystemPrompt(systemPrompt, tools);
|
|
291
|
+
tools = void 0;
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
const context = {
|
|
295
|
+
...options.context,
|
|
296
|
+
systemPrompt,
|
|
297
|
+
tools
|
|
298
|
+
};
|
|
299
|
+
const temperature = provider === "moonshot" ? 1 : options.temperature ?? config.temperature;
|
|
300
|
+
const completeOptions = {
|
|
301
|
+
apiKey: getEffectiveApiKey(provider, config.api_key),
|
|
302
|
+
maxTokens: options.maxTokens ?? config.max_tokens,
|
|
303
|
+
temperature,
|
|
304
|
+
sessionId: options.sessionId
|
|
305
|
+
};
|
|
306
|
+
if (isCocoon) {
|
|
307
|
+
const { stripCocoonPayload } = await import("./tool-adapter-Y3TCEQOC.js");
|
|
308
|
+
completeOptions.onPayload = stripCocoonPayload;
|
|
309
|
+
}
|
|
310
|
+
const response = await complete(model, context, completeOptions);
|
|
311
|
+
if (isCocoon) {
|
|
312
|
+
const textBlock = response.content.find((b) => b.type === "text");
|
|
313
|
+
if (textBlock?.type === "text" && textBlock.text.includes("<tool_call>")) {
|
|
314
|
+
const { parseToolCallsFromText, extractPlainText } = await import("./tool-adapter-Y3TCEQOC.js");
|
|
315
|
+
const syntheticCalls = parseToolCallsFromText(textBlock.text, cocoonAllowedTools);
|
|
316
|
+
if (syntheticCalls.length > 0) {
|
|
317
|
+
const plainText = extractPlainText(textBlock.text);
|
|
318
|
+
response.content = [
|
|
319
|
+
...plainText ? [{ type: "text", text: plainText }] : [],
|
|
320
|
+
...syntheticCalls
|
|
321
|
+
];
|
|
322
|
+
response.stopReason = "toolUse";
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
const thinkRe = /<think>[\s\S]*?<\/think>/g;
|
|
327
|
+
for (const block of response.content) {
|
|
328
|
+
if (block.type === "text" && block.text.includes("<think>")) {
|
|
329
|
+
block.text = block.text.replace(thinkRe, "").trim();
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
if (options.persistTranscript && options.sessionId) {
|
|
333
|
+
appendToTranscript(options.sessionId, response);
|
|
334
|
+
}
|
|
335
|
+
const textContent = response.content.find((block) => block.type === "text");
|
|
336
|
+
const text = textContent?.type === "text" ? textContent.text : "";
|
|
337
|
+
const updatedContext = {
|
|
338
|
+
...context,
|
|
339
|
+
messages: [...context.messages, response]
|
|
340
|
+
};
|
|
341
|
+
return {
|
|
342
|
+
message: response,
|
|
343
|
+
text,
|
|
344
|
+
context: updatedContext
|
|
345
|
+
};
|
|
346
|
+
}
|
|
347
|
+
function loadContextFromTranscript(sessionId, systemPrompt) {
|
|
348
|
+
const messages = readTranscript(sessionId);
|
|
349
|
+
return {
|
|
350
|
+
systemPrompt,
|
|
351
|
+
messages
|
|
352
|
+
};
|
|
353
|
+
}
|
|
354
|
+
function createClient(_config) {
|
|
355
|
+
return null;
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
export {
|
|
359
|
+
isOAuthToken,
|
|
360
|
+
getEffectiveApiKey,
|
|
361
|
+
registerCocoonModels,
|
|
362
|
+
registerLocalModels,
|
|
363
|
+
getProviderModel,
|
|
364
|
+
getUtilityModel,
|
|
365
|
+
chatWithContext,
|
|
366
|
+
loadContextFromTranscript,
|
|
367
|
+
createClient
|
|
368
|
+
};
|