@yeaft/webchat-agent 0.1.398 → 0.1.408
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/crew/role-query.js +10 -6
- package/package.json +3 -1
- package/sdk/query.js +3 -1
- package/unify/cli.js +537 -0
- package/unify/config.js +256 -0
- package/unify/debug-trace.js +398 -0
- package/unify/engine.js +319 -0
- package/unify/index.js +21 -0
- package/unify/init.js +147 -0
- package/unify/llm/adapter.js +186 -0
- package/unify/llm/anthropic.js +322 -0
- package/unify/llm/chat-completions.js +315 -0
- package/unify/models.js +167 -0
- package/unify/prompts.js +61 -0
package/unify/models.js
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* models.js — Model ID registry for Yeaft Unify
|
|
3
|
+
*
|
|
4
|
+
* Maps model IDs (e.g. "gpt-5", "claude-sonnet-4-20250514") to their
|
|
5
|
+
* adapter type, API base URL, and capabilities.
|
|
6
|
+
*
|
|
7
|
+
* Yeaft does not provide its own models. The "model" field is always a
|
|
8
|
+
* model ID from an external provider. This registry lets Yeaft auto-detect
|
|
9
|
+
* the correct adapter and endpoint from just the model ID, so users only
|
|
10
|
+
* need to set YEAFT_MODEL=gpt-5 without configuring adapter/baseUrl separately.
|
|
11
|
+
*
|
|
12
|
+
* Unknown model IDs return null — caller falls back to env-based detection.
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* @typedef {Object} ModelInfo
|
|
17
|
+
* @property {'anthropic' | 'chat-completions'} adapter — Which adapter to use
|
|
18
|
+
* @property {string} baseUrl — API endpoint base URL
|
|
19
|
+
* @property {number} contextWindow — Max context tokens
|
|
20
|
+
* @property {number} maxOutputTokens — Max output tokens
|
|
21
|
+
* @property {string} displayName — Human-readable model name
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
/** @type {Map<string, ModelInfo>} */
|
|
25
|
+
export const MODEL_REGISTRY = new Map([
|
|
26
|
+
// ── Anthropic ──────────────────────────────────────────────────
|
|
27
|
+
['claude-sonnet-4-20250514', {
|
|
28
|
+
adapter: 'anthropic',
|
|
29
|
+
baseUrl: 'https://api.anthropic.com',
|
|
30
|
+
contextWindow: 200000,
|
|
31
|
+
maxOutputTokens: 16384,
|
|
32
|
+
displayName: 'Claude Sonnet 4',
|
|
33
|
+
}],
|
|
34
|
+
['claude-opus-4-20250514', {
|
|
35
|
+
adapter: 'anthropic',
|
|
36
|
+
baseUrl: 'https://api.anthropic.com',
|
|
37
|
+
contextWindow: 200000,
|
|
38
|
+
maxOutputTokens: 16384,
|
|
39
|
+
displayName: 'Claude Opus 4',
|
|
40
|
+
}],
|
|
41
|
+
['claude-haiku-3-20250414', {
|
|
42
|
+
adapter: 'anthropic',
|
|
43
|
+
baseUrl: 'https://api.anthropic.com',
|
|
44
|
+
contextWindow: 200000,
|
|
45
|
+
maxOutputTokens: 8192,
|
|
46
|
+
displayName: 'Claude Haiku 3',
|
|
47
|
+
}],
|
|
48
|
+
|
|
49
|
+
// ── OpenAI ─────────────────────────────────────────────────────
|
|
50
|
+
['gpt-5', {
|
|
51
|
+
adapter: 'chat-completions',
|
|
52
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
53
|
+
contextWindow: 256000,
|
|
54
|
+
maxOutputTokens: 16384,
|
|
55
|
+
displayName: 'GPT-5',
|
|
56
|
+
}],
|
|
57
|
+
['gpt-5.4', {
|
|
58
|
+
adapter: 'chat-completions',
|
|
59
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
60
|
+
contextWindow: 272000,
|
|
61
|
+
maxOutputTokens: 16384,
|
|
62
|
+
displayName: 'GPT-5.4',
|
|
63
|
+
}],
|
|
64
|
+
['gpt-4.1', {
|
|
65
|
+
adapter: 'chat-completions',
|
|
66
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
67
|
+
contextWindow: 1047576,
|
|
68
|
+
maxOutputTokens: 32768,
|
|
69
|
+
displayName: 'GPT-4.1',
|
|
70
|
+
}],
|
|
71
|
+
['gpt-4.1-mini', {
|
|
72
|
+
adapter: 'chat-completions',
|
|
73
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
74
|
+
contextWindow: 1047576,
|
|
75
|
+
maxOutputTokens: 16384,
|
|
76
|
+
displayName: 'GPT-4.1 Mini',
|
|
77
|
+
}],
|
|
78
|
+
['gpt-4.1-nano', {
|
|
79
|
+
adapter: 'chat-completions',
|
|
80
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
81
|
+
contextWindow: 1047576,
|
|
82
|
+
maxOutputTokens: 16384,
|
|
83
|
+
displayName: 'GPT-4.1 Nano',
|
|
84
|
+
}],
|
|
85
|
+
['o3', {
|
|
86
|
+
adapter: 'chat-completions',
|
|
87
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
88
|
+
contextWindow: 200000,
|
|
89
|
+
maxOutputTokens: 100000,
|
|
90
|
+
displayName: 'o3',
|
|
91
|
+
}],
|
|
92
|
+
['o4-mini', {
|
|
93
|
+
adapter: 'chat-completions',
|
|
94
|
+
baseUrl: 'https://api.openai.com/v1',
|
|
95
|
+
contextWindow: 200000,
|
|
96
|
+
maxOutputTokens: 100000,
|
|
97
|
+
displayName: 'o4-mini',
|
|
98
|
+
}],
|
|
99
|
+
|
|
100
|
+
// ── DeepSeek ───────────────────────────────────────────────────
|
|
101
|
+
['deepseek-chat', {
|
|
102
|
+
adapter: 'chat-completions',
|
|
103
|
+
baseUrl: 'https://api.deepseek.com',
|
|
104
|
+
contextWindow: 131072,
|
|
105
|
+
maxOutputTokens: 8192,
|
|
106
|
+
displayName: 'DeepSeek Chat',
|
|
107
|
+
}],
|
|
108
|
+
['deepseek-reasoner', {
|
|
109
|
+
adapter: 'chat-completions',
|
|
110
|
+
baseUrl: 'https://api.deepseek.com',
|
|
111
|
+
contextWindow: 131072,
|
|
112
|
+
maxOutputTokens: 8192,
|
|
113
|
+
displayName: 'DeepSeek Reasoner',
|
|
114
|
+
}],
|
|
115
|
+
|
|
116
|
+
// ── Google (via OpenAI-compatible API) ─────────────────────────
|
|
117
|
+
['gemini-2.5-pro', {
|
|
118
|
+
adapter: 'chat-completions',
|
|
119
|
+
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
|
120
|
+
contextWindow: 1048576,
|
|
121
|
+
maxOutputTokens: 65536,
|
|
122
|
+
displayName: 'Gemini 2.5 Pro',
|
|
123
|
+
}],
|
|
124
|
+
['gemini-2.5-flash', {
|
|
125
|
+
adapter: 'chat-completions',
|
|
126
|
+
baseUrl: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
|
127
|
+
contextWindow: 1048576,
|
|
128
|
+
maxOutputTokens: 65536,
|
|
129
|
+
displayName: 'Gemini 2.5 Flash',
|
|
130
|
+
}],
|
|
131
|
+
]);
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* Resolve a model name to its registry info.
|
|
135
|
+
*
|
|
136
|
+
* @param {string} modelName — The model name (e.g., 'gpt-5', 'claude-sonnet-4-20250514')
|
|
137
|
+
* @returns {ModelInfo | null} — Model info, or null if not in registry
|
|
138
|
+
*/
|
|
139
|
+
export function resolveModel(modelName) {
|
|
140
|
+
if (!modelName) return null;
|
|
141
|
+
const info = MODEL_REGISTRY.get(modelName);
|
|
142
|
+
// Return a shallow copy so callers can't mutate the registry
|
|
143
|
+
return info ? { ...info } : null;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* List all known models.
|
|
148
|
+
*
|
|
149
|
+
* @returns {{ name: string, adapter: string, baseUrl: string, contextWindow: number, maxOutputTokens: number, displayName: string }[]}
|
|
150
|
+
*/
|
|
151
|
+
export function listModels() {
|
|
152
|
+
const result = [];
|
|
153
|
+
for (const [name, info] of MODEL_REGISTRY) {
|
|
154
|
+
result.push({ name, ...info });
|
|
155
|
+
}
|
|
156
|
+
return result;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Check if a model name is in the registry.
|
|
161
|
+
*
|
|
162
|
+
* @param {string} modelName
|
|
163
|
+
* @returns {boolean}
|
|
164
|
+
*/
|
|
165
|
+
export function isKnownModel(modelName) {
|
|
166
|
+
return MODEL_REGISTRY.has(modelName);
|
|
167
|
+
}
|
package/unify/prompts.js
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* prompts.js — Bilingual system prompt templates
|
|
3
|
+
*
|
|
4
|
+
* Single source of truth for system prompts. Both engine.js and cli.js
|
|
5
|
+
* import buildSystemPrompt() from here. Supports 'en' and 'zh'.
|
|
6
|
+
*
|
|
7
|
+
* To add a new language: add a new key to PROMPTS with all required fields.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
// ─── Prompt Templates ─────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
const PROMPTS = {
|
|
13
|
+
en: {
|
|
14
|
+
identity: 'You are Yeaft, a helpful AI assistant.',
|
|
15
|
+
mode: (mode) => `Current mode: ${mode}`,
|
|
16
|
+
date: (d) => `Date: ${d}`,
|
|
17
|
+
work: 'You are in work mode. Break tasks into steps, execute them using tools, and report progress.',
|
|
18
|
+
dream: 'You are in dream mode. Reflect on past conversations and consolidate memories.',
|
|
19
|
+
tools: (names) => `Available tools: ${names}`,
|
|
20
|
+
},
|
|
21
|
+
zh: {
|
|
22
|
+
identity: '你是 Yeaft,一个有用的 AI 助手。',
|
|
23
|
+
mode: (mode) => `当前模式:${mode}`,
|
|
24
|
+
date: (d) => `日期:${d}`,
|
|
25
|
+
work: '你处于工作模式。将任务分解为步骤,使用工具执行,并报告进度。',
|
|
26
|
+
dream: '你处于梦境模式。回顾过去的对话,整理和巩固记忆。',
|
|
27
|
+
tools: (names) => `可用工具:${names}`,
|
|
28
|
+
},
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
/** Supported language codes. */
|
|
32
|
+
export const SUPPORTED_LANGUAGES = Object.keys(PROMPTS);
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Build the system prompt for a given language and mode.
|
|
36
|
+
*
|
|
37
|
+
* @param {{ language?: string, mode?: string, toolNames?: string[] }} params
|
|
38
|
+
* @returns {string}
|
|
39
|
+
*/
|
|
40
|
+
export function buildSystemPrompt({ language = 'en', mode = 'chat', toolNames = [] } = {}) {
|
|
41
|
+
// Fallback to English for unknown languages
|
|
42
|
+
const lang = PROMPTS[language] || PROMPTS.en;
|
|
43
|
+
|
|
44
|
+
const parts = [
|
|
45
|
+
lang.identity,
|
|
46
|
+
lang.mode(mode),
|
|
47
|
+
lang.date(new Date().toISOString().split('T')[0]),
|
|
48
|
+
];
|
|
49
|
+
|
|
50
|
+
if (mode === 'work') {
|
|
51
|
+
parts.push(lang.work);
|
|
52
|
+
} else if (mode === 'dream') {
|
|
53
|
+
parts.push(lang.dream);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (toolNames.length > 0) {
|
|
57
|
+
parts.push(lang.tools(toolNames.join(', ')));
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return parts.join('\n\n');
|
|
61
|
+
}
|