tjs-lang 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTEXT.md +594 -0
- package/LICENSE +190 -0
- package/README.md +220 -0
- package/bin/benchmarks.ts +351 -0
- package/bin/dev.ts +205 -0
- package/bin/docs.js +170 -0
- package/bin/install-cursor.sh +71 -0
- package/bin/install-vscode.sh +71 -0
- package/bin/select-local-models.d.ts +1 -0
- package/bin/select-local-models.js +28 -0
- package/bin/select-local-models.ts +31 -0
- package/demo/autocomplete.test.ts +232 -0
- package/demo/docs.json +186 -0
- package/demo/examples.test.ts +598 -0
- package/demo/index.html +91 -0
- package/demo/src/autocomplete.ts +482 -0
- package/demo/src/capabilities.ts +859 -0
- package/demo/src/demo-nav.ts +2097 -0
- package/demo/src/examples.test.ts +161 -0
- package/demo/src/examples.ts +476 -0
- package/demo/src/imports.test.ts +196 -0
- package/demo/src/imports.ts +421 -0
- package/demo/src/index.ts +639 -0
- package/demo/src/module-store.ts +635 -0
- package/demo/src/module-sw.ts +132 -0
- package/demo/src/playground.ts +949 -0
- package/demo/src/service-host.ts +389 -0
- package/demo/src/settings.ts +440 -0
- package/demo/src/style.ts +280 -0
- package/demo/src/tjs-playground.ts +1605 -0
- package/demo/src/ts-examples.ts +478 -0
- package/demo/src/ts-playground.ts +1092 -0
- package/demo/static/favicon.svg +30 -0
- package/demo/static/photo-1.jpg +0 -0
- package/demo/static/photo-2.jpg +0 -0
- package/demo/static/texts/ai-history.txt +9 -0
- package/demo/static/texts/coffee-origins.txt +9 -0
- package/demo/static/texts/renewable-energy.txt +9 -0
- package/dist/index.js +256 -0
- package/dist/index.js.map +37 -0
- package/dist/tjs-batteries.js +4 -0
- package/dist/tjs-batteries.js.map +15 -0
- package/dist/tjs-full.js +256 -0
- package/dist/tjs-full.js.map +37 -0
- package/dist/tjs-transpiler.js +220 -0
- package/dist/tjs-transpiler.js.map +21 -0
- package/dist/tjs-vm.js +4 -0
- package/dist/tjs-vm.js.map +14 -0
- package/docs/CNAME +1 -0
- package/docs/favicon.svg +30 -0
- package/docs/index.html +91 -0
- package/docs/index.js +10468 -0
- package/docs/index.js.map +92 -0
- package/docs/photo-1.jpg +0 -0
- package/docs/photo-1.webp +0 -0
- package/docs/photo-2.jpg +0 -0
- package/docs/photo-2.webp +0 -0
- package/docs/texts/ai-history.txt +9 -0
- package/docs/texts/coffee-origins.txt +9 -0
- package/docs/texts/renewable-energy.txt +9 -0
- package/docs/tjs-lang.svg +31 -0
- package/docs/tosijs-agent.svg +31 -0
- package/editors/README.md +325 -0
- package/editors/ace/ajs-mode.js +328 -0
- package/editors/ace/ajs-mode.ts +269 -0
- package/editors/ajs-syntax.ts +212 -0
- package/editors/build-grammars.ts +510 -0
- package/editors/codemirror/ajs-language.js +287 -0
- package/editors/codemirror/ajs-language.ts +1447 -0
- package/editors/codemirror/autocomplete.test.ts +531 -0
- package/editors/codemirror/component.ts +404 -0
- package/editors/monaco/ajs-monarch.js +243 -0
- package/editors/monaco/ajs-monarch.ts +225 -0
- package/editors/tjs-syntax.ts +115 -0
- package/editors/vscode/language-configuration.json +37 -0
- package/editors/vscode/package.json +65 -0
- package/editors/vscode/syntaxes/ajs-injection.tmLanguage.json +107 -0
- package/editors/vscode/syntaxes/ajs.tmLanguage.json +252 -0
- package/editors/vscode/syntaxes/tjs.tmLanguage.json +333 -0
- package/package.json +83 -0
- package/src/cli/commands/check.ts +41 -0
- package/src/cli/commands/convert.ts +133 -0
- package/src/cli/commands/emit.ts +260 -0
- package/src/cli/commands/run.ts +68 -0
- package/src/cli/commands/test.ts +194 -0
- package/src/cli/commands/types.ts +20 -0
- package/src/cli/create-app.ts +236 -0
- package/src/cli/playground.ts +250 -0
- package/src/cli/tjs.ts +166 -0
- package/src/cli/tjsx.ts +160 -0
- package/tjs-lang.svg +31 -0
|
@@ -0,0 +1,859 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* capabilities.ts - Shared LLM capability builders for demo
|
|
3
|
+
*
|
|
4
|
+
* Used by both playground.ts and LiveExample contexts
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
// Module-level cache for LM Studio models, keyed by endpoint URL
|
|
8
|
+
let cachedLocalModels: Map<string, string[]> = new Map()
|
|
9
|
+
|
|
10
|
+
// LM Studio load detection
|
|
11
|
+
interface LoadStatus {
|
|
12
|
+
isLoaded: boolean
|
|
13
|
+
lastCheck: number
|
|
14
|
+
pendingRequests: number
|
|
15
|
+
}
|
|
16
|
+
const loadStatus: Map<string, LoadStatus> = new Map()
|
|
17
|
+
const LOAD_CHECK_INTERVAL = 5000 // Recheck load every 5 seconds
|
|
18
|
+
const LOAD_CHECK_TIMEOUT = 2000 // If ping takes > 2s, server is loaded
|
|
19
|
+
|
|
20
|
+
// Check if LM Studio is responsive (fast ping)
|
|
21
|
+
export async function checkServerLoad(url: string): Promise<boolean> {
|
|
22
|
+
const now = Date.now()
|
|
23
|
+
const status = loadStatus.get(url)
|
|
24
|
+
|
|
25
|
+
// Use cached status if recent
|
|
26
|
+
if (status && now - status.lastCheck < LOAD_CHECK_INTERVAL) {
|
|
27
|
+
return !status.isLoaded
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
try {
|
|
31
|
+
const controller = new AbortController()
|
|
32
|
+
const timeout = setTimeout(() => controller.abort(), LOAD_CHECK_TIMEOUT)
|
|
33
|
+
|
|
34
|
+
const start = Date.now()
|
|
35
|
+
await fetch(`${url}/models`, { signal: controller.signal })
|
|
36
|
+
clearTimeout(timeout)
|
|
37
|
+
|
|
38
|
+
const elapsed = Date.now() - start
|
|
39
|
+
const isLoaded = elapsed > LOAD_CHECK_TIMEOUT * 0.8 // 80% of timeout = loaded
|
|
40
|
+
|
|
41
|
+
loadStatus.set(url, {
|
|
42
|
+
isLoaded,
|
|
43
|
+
lastCheck: now,
|
|
44
|
+
pendingRequests: status?.pendingRequests || 0,
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
if (isLoaded) {
|
|
48
|
+
console.log(
|
|
49
|
+
`⏳ LM Studio at ${url} is under load (${elapsed}ms response)`
|
|
50
|
+
)
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return !isLoaded
|
|
54
|
+
} catch (e: any) {
|
|
55
|
+
if (e.name === 'AbortError') {
|
|
56
|
+
console.log(`⏳ LM Studio at ${url} is under heavy load (timeout)`)
|
|
57
|
+
loadStatus.set(url, {
|
|
58
|
+
isLoaded: true,
|
|
59
|
+
lastCheck: now,
|
|
60
|
+
pendingRequests: status?.pendingRequests || 0,
|
|
61
|
+
})
|
|
62
|
+
return false
|
|
63
|
+
}
|
|
64
|
+
// Connection error - server might be down
|
|
65
|
+
return false
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Track pending requests
|
|
70
|
+
function trackRequest(url: string, delta: number): number {
|
|
71
|
+
const status = loadStatus.get(url) || {
|
|
72
|
+
isLoaded: false,
|
|
73
|
+
lastCheck: 0,
|
|
74
|
+
pendingRequests: 0,
|
|
75
|
+
}
|
|
76
|
+
status.pendingRequests = Math.max(0, status.pendingRequests + delta)
|
|
77
|
+
loadStatus.set(url, status)
|
|
78
|
+
return status.pendingRequests
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// Get current pending request count
|
|
82
|
+
export function getPendingRequests(url: string): number {
|
|
83
|
+
return loadStatus.get(url)?.pendingRequests || 0
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Cache for verified vision models (keyed by URL)
|
|
87
|
+
let verifiedVisionModels: Map<string, string | null> = new Map()
|
|
88
|
+
|
|
89
|
+
// Clear the model cache (call this to force rescan)
|
|
90
|
+
export function clearModelCache(): void {
|
|
91
|
+
cachedLocalModels.clear()
|
|
92
|
+
verifiedVisionModels.clear()
|
|
93
|
+
console.log('🔄 Model cache cleared (including vision verification)')
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Rescan models from LM Studio and return the list
|
|
97
|
+
export async function rescanLocalModels(
|
|
98
|
+
customLlmUrl?: string
|
|
99
|
+
): Promise<string[]> {
|
|
100
|
+
const url = customLlmUrl || localStorage.getItem('customLlmUrl') || ''
|
|
101
|
+
if (!url) {
|
|
102
|
+
console.log('⚠️ No custom LLM URL configured')
|
|
103
|
+
return []
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
try {
|
|
107
|
+
const response = await fetch(`${url}/models`)
|
|
108
|
+
if (response.ok) {
|
|
109
|
+
const data = await response.json()
|
|
110
|
+
const models = data.data?.map((m: any) => m.id) || []
|
|
111
|
+
cachedLocalModels.set(url, models)
|
|
112
|
+
console.log(`✅ Found ${models.length} models at ${url}:`, models)
|
|
113
|
+
return models
|
|
114
|
+
}
|
|
115
|
+
} catch (e) {
|
|
116
|
+
console.error('❌ Failed to fetch models:', e)
|
|
117
|
+
}
|
|
118
|
+
cachedLocalModels.set(url, [])
|
|
119
|
+
return []
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Get cached models (or fetch if not cached)
|
|
123
|
+
export async function getLocalModels(customLlmUrl?: string): Promise<string[]> {
|
|
124
|
+
const url = customLlmUrl || localStorage.getItem('customLlmUrl') || ''
|
|
125
|
+
if (!url) return []
|
|
126
|
+
|
|
127
|
+
const cached = cachedLocalModels.get(url)
|
|
128
|
+
if (cached !== undefined) return cached
|
|
129
|
+
return rescanLocalModels(url)
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// LLM provider type
|
|
133
|
+
export type LLMProvider =
|
|
134
|
+
| 'auto'
|
|
135
|
+
| 'custom'
|
|
136
|
+
| 'openai'
|
|
137
|
+
| 'anthropic'
|
|
138
|
+
| 'deepseek'
|
|
139
|
+
|
|
140
|
+
export interface LLMSettings {
|
|
141
|
+
preferredProvider: LLMProvider
|
|
142
|
+
openaiKey: string
|
|
143
|
+
anthropicKey: string
|
|
144
|
+
deepseekKey: string
|
|
145
|
+
customLlmUrl: string
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Get settings from localStorage
|
|
149
|
+
export function getSettings(): LLMSettings {
|
|
150
|
+
return {
|
|
151
|
+
preferredProvider: (localStorage.getItem('preferredProvider') ||
|
|
152
|
+
'auto') as LLMProvider,
|
|
153
|
+
openaiKey: localStorage.getItem('openaiKey') || '',
|
|
154
|
+
anthropicKey: localStorage.getItem('anthropicKey') || '',
|
|
155
|
+
deepseekKey: localStorage.getItem('deepseekKey') || '',
|
|
156
|
+
customLlmUrl: localStorage.getItem('customLlmUrl') || '',
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// Build LLM capability from settings (simple predict interface)
|
|
161
|
+
export function buildLLMCapability(settings: LLMSettings) {
|
|
162
|
+
const {
|
|
163
|
+
preferredProvider,
|
|
164
|
+
openaiKey,
|
|
165
|
+
anthropicKey,
|
|
166
|
+
deepseekKey,
|
|
167
|
+
customLlmUrl,
|
|
168
|
+
} = settings
|
|
169
|
+
|
|
170
|
+
// Determine which providers are available
|
|
171
|
+
const hasCustomUrl = customLlmUrl && customLlmUrl.trim() !== ''
|
|
172
|
+
const hasOpenAI = openaiKey && openaiKey.trim() !== ''
|
|
173
|
+
const hasAnthropic = anthropicKey && anthropicKey.trim() !== ''
|
|
174
|
+
const hasDeepseek = deepseekKey && deepseekKey.trim() !== ''
|
|
175
|
+
|
|
176
|
+
if (!hasCustomUrl && !hasOpenAI && !hasAnthropic && !hasDeepseek) {
|
|
177
|
+
return null
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Provider implementations
|
|
181
|
+
const callCustom = async (prompt: string, options?: any): Promise<string> => {
|
|
182
|
+
const body: any = {
|
|
183
|
+
model: options?.model || 'local-model',
|
|
184
|
+
messages: [{ role: 'user', content: prompt }],
|
|
185
|
+
temperature: options?.temperature ?? 0.7,
|
|
186
|
+
}
|
|
187
|
+
if (options?.responseFormat) body.response_format = options.responseFormat
|
|
188
|
+
|
|
189
|
+
const pending = trackRequest(customLlmUrl, 1)
|
|
190
|
+
if (pending > 1) {
|
|
191
|
+
console.log(`⏳ LM Studio: ${pending} requests pending`)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
try {
|
|
195
|
+
const startTime = Date.now()
|
|
196
|
+
const response = await fetch(`${customLlmUrl}/chat/completions`, {
|
|
197
|
+
method: 'POST',
|
|
198
|
+
headers: { 'Content-Type': 'application/json' },
|
|
199
|
+
body: JSON.stringify(body),
|
|
200
|
+
})
|
|
201
|
+
const elapsed = Date.now() - startTime
|
|
202
|
+
|
|
203
|
+
if (!response.ok) {
|
|
204
|
+
throw new Error(
|
|
205
|
+
`LLM Error: ${response.status} - Check that LM Studio is running at ${customLlmUrl}`
|
|
206
|
+
)
|
|
207
|
+
}
|
|
208
|
+
console.log(`✅ LM Studio response in ${elapsed}ms`)
|
|
209
|
+
const data = await response.json()
|
|
210
|
+
return data.choices?.[0]?.message?.content ?? ''
|
|
211
|
+
} catch (e: any) {
|
|
212
|
+
if (e.message?.includes('Failed to fetch') || e.name === 'TypeError') {
|
|
213
|
+
throw new Error(
|
|
214
|
+
`Cannot connect to LM Studio at ${customLlmUrl}. Make sure LM Studio is running and CORS is enabled (Server settings → Enable CORS).`
|
|
215
|
+
)
|
|
216
|
+
}
|
|
217
|
+
throw e
|
|
218
|
+
} finally {
|
|
219
|
+
trackRequest(customLlmUrl, -1)
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
const callOpenAI = async (prompt: string, options?: any): Promise<string> => {
|
|
224
|
+
const body: any = {
|
|
225
|
+
model: options?.model || 'gpt-4o-mini',
|
|
226
|
+
messages: [{ role: 'user', content: prompt }],
|
|
227
|
+
temperature: options?.temperature ?? 0.7,
|
|
228
|
+
}
|
|
229
|
+
if (options?.responseFormat) body.response_format = options.responseFormat
|
|
230
|
+
|
|
231
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
232
|
+
method: 'POST',
|
|
233
|
+
headers: {
|
|
234
|
+
'Content-Type': 'application/json',
|
|
235
|
+
Authorization: `Bearer ${openaiKey}`,
|
|
236
|
+
},
|
|
237
|
+
body: JSON.stringify(body),
|
|
238
|
+
})
|
|
239
|
+
if (!response.ok) {
|
|
240
|
+
const error = await response.json().catch(() => ({}))
|
|
241
|
+
throw new Error(
|
|
242
|
+
`OpenAI Error: ${response.status} - ${
|
|
243
|
+
error.error?.message || 'Check your API key'
|
|
244
|
+
}`
|
|
245
|
+
)
|
|
246
|
+
}
|
|
247
|
+
const data = await response.json()
|
|
248
|
+
return data.choices?.[0]?.message?.content ?? ''
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const callAnthropic = async (
|
|
252
|
+
prompt: string,
|
|
253
|
+
options?: any
|
|
254
|
+
): Promise<string> => {
|
|
255
|
+
// Note: Anthropic doesn't support response_format the same way
|
|
256
|
+
// It uses tool_use for structured output instead
|
|
257
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
258
|
+
method: 'POST',
|
|
259
|
+
headers: {
|
|
260
|
+
'Content-Type': 'application/json',
|
|
261
|
+
'x-api-key': anthropicKey,
|
|
262
|
+
'anthropic-version': '2023-06-01',
|
|
263
|
+
'anthropic-dangerous-direct-browser-access': 'true',
|
|
264
|
+
},
|
|
265
|
+
body: JSON.stringify({
|
|
266
|
+
model: options?.model || 'claude-3-haiku-20240307',
|
|
267
|
+
max_tokens: options?.maxTokens || 1024,
|
|
268
|
+
messages: [{ role: 'user', content: prompt }],
|
|
269
|
+
}),
|
|
270
|
+
})
|
|
271
|
+
if (!response.ok) {
|
|
272
|
+
const error = await response.json().catch(() => ({}))
|
|
273
|
+
throw new Error(
|
|
274
|
+
`Anthropic Error: ${response.status} - ${
|
|
275
|
+
error.error?.message || 'Check your API key'
|
|
276
|
+
}`
|
|
277
|
+
)
|
|
278
|
+
}
|
|
279
|
+
const data = await response.json()
|
|
280
|
+
return data.content?.[0]?.text ?? ''
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
const callDeepseek = async (
|
|
284
|
+
prompt: string,
|
|
285
|
+
options?: any
|
|
286
|
+
): Promise<string> => {
|
|
287
|
+
const body: any = {
|
|
288
|
+
model: options?.model || 'deepseek-chat',
|
|
289
|
+
messages: [{ role: 'user', content: prompt }],
|
|
290
|
+
temperature: options?.temperature ?? 0.7,
|
|
291
|
+
}
|
|
292
|
+
if (options?.responseFormat) body.response_format = options.responseFormat
|
|
293
|
+
|
|
294
|
+
const response = await fetch('https://api.deepseek.com/chat/completions', {
|
|
295
|
+
method: 'POST',
|
|
296
|
+
headers: {
|
|
297
|
+
'Content-Type': 'application/json',
|
|
298
|
+
Authorization: `Bearer ${deepseekKey}`,
|
|
299
|
+
},
|
|
300
|
+
body: JSON.stringify(body),
|
|
301
|
+
})
|
|
302
|
+
if (!response.ok) {
|
|
303
|
+
const error = await response.json().catch(() => ({}))
|
|
304
|
+
throw new Error(
|
|
305
|
+
`Deepseek Error: ${response.status} - ${
|
|
306
|
+
error.error?.message || 'Check your API key'
|
|
307
|
+
}`
|
|
308
|
+
)
|
|
309
|
+
}
|
|
310
|
+
const data = await response.json()
|
|
311
|
+
return data.choices?.[0]?.message?.content ?? ''
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
return {
|
|
315
|
+
async predict(prompt: string, options?: any): Promise<string> {
|
|
316
|
+
// If a specific provider is selected, use it
|
|
317
|
+
if (preferredProvider === 'custom' && hasCustomUrl)
|
|
318
|
+
return callCustom(prompt, options)
|
|
319
|
+
if (preferredProvider === 'openai' && hasOpenAI)
|
|
320
|
+
return callOpenAI(prompt, options)
|
|
321
|
+
if (preferredProvider === 'anthropic' && hasAnthropic)
|
|
322
|
+
return callAnthropic(prompt, options)
|
|
323
|
+
if (preferredProvider === 'deepseek' && hasDeepseek)
|
|
324
|
+
return callDeepseek(prompt, options)
|
|
325
|
+
|
|
326
|
+
// If preferred provider not available, show helpful error
|
|
327
|
+
if (preferredProvider !== 'auto') {
|
|
328
|
+
const providerNames: Record<string, string> = {
|
|
329
|
+
custom: 'Custom Endpoint',
|
|
330
|
+
openai: 'OpenAI',
|
|
331
|
+
anthropic: 'Anthropic',
|
|
332
|
+
deepseek: 'Deepseek',
|
|
333
|
+
}
|
|
334
|
+
throw new Error(
|
|
335
|
+
`${providerNames[preferredProvider]} is selected but not configured. Add your API key in Settings.`
|
|
336
|
+
)
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// Auto mode: use first available in priority order
|
|
340
|
+
if (hasCustomUrl) return callCustom(prompt, options)
|
|
341
|
+
if (hasOpenAI) return callOpenAI(prompt, options)
|
|
342
|
+
if (hasAnthropic) return callAnthropic(prompt, options)
|
|
343
|
+
if (hasDeepseek) return callDeepseek(prompt, options)
|
|
344
|
+
|
|
345
|
+
throw new Error('No LLM provider configured')
|
|
346
|
+
},
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Build LLM Battery capability (supports system/user, tools, responseFormat)
|
|
351
|
+
// UserContent can be a simple string or multimodal with images
|
|
352
|
+
type UserContent = string | { text: string; images?: string[] }
|
|
353
|
+
|
|
354
|
+
// Build user message content - supports text-only or multimodal (text + images)
|
|
355
|
+
function buildUserContent(user: UserContent): any {
|
|
356
|
+
if (typeof user === 'string') {
|
|
357
|
+
return user
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// Multimodal: array of content blocks (OpenAI vision format)
|
|
361
|
+
const content: any[] = [{ type: 'text', text: user.text }]
|
|
362
|
+
|
|
363
|
+
for (const img of user.images || []) {
|
|
364
|
+
content.push({
|
|
365
|
+
type: 'image_url',
|
|
366
|
+
image_url: {
|
|
367
|
+
url: img, // Can be URL or data:image/...;base64,...
|
|
368
|
+
},
|
|
369
|
+
})
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
return content
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
export function buildLLMBattery(settings: LLMSettings) {
|
|
376
|
+
const {
|
|
377
|
+
preferredProvider,
|
|
378
|
+
openaiKey,
|
|
379
|
+
anthropicKey,
|
|
380
|
+
deepseekKey,
|
|
381
|
+
customLlmUrl,
|
|
382
|
+
} = settings
|
|
383
|
+
|
|
384
|
+
const hasCustomUrl = customLlmUrl && customLlmUrl.trim() !== ''
|
|
385
|
+
const hasOpenAI = openaiKey && openaiKey.trim() !== ''
|
|
386
|
+
const hasAnthropic = anthropicKey && anthropicKey.trim() !== ''
|
|
387
|
+
const hasDeepseek = deepseekKey && deepseekKey.trim() !== ''
|
|
388
|
+
|
|
389
|
+
if (!hasCustomUrl && !hasOpenAI && !hasAnthropic && !hasDeepseek) {
|
|
390
|
+
return null
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
type BatteryResult = { content?: string; tool_calls?: any[] }
|
|
394
|
+
|
|
395
|
+
// Get a test image for vision capability testing
|
|
396
|
+
const getTestImage = async (): Promise<string | null> => {
|
|
397
|
+
// Browser: synthesize with canvas (circle and square like test-shapes.jpg)
|
|
398
|
+
if (
|
|
399
|
+
typeof document !== 'undefined' &&
|
|
400
|
+
typeof document.createElement === 'function'
|
|
401
|
+
) {
|
|
402
|
+
try {
|
|
403
|
+
const canvas = document.createElement('canvas')
|
|
404
|
+
canvas.width = 200
|
|
405
|
+
canvas.height = 200
|
|
406
|
+
const ctx = canvas.getContext('2d')
|
|
407
|
+
if (ctx) {
|
|
408
|
+
// White background
|
|
409
|
+
ctx.fillStyle = 'white'
|
|
410
|
+
ctx.fillRect(0, 0, 200, 200)
|
|
411
|
+
// Blue circle on left
|
|
412
|
+
ctx.fillStyle = '#3366cc'
|
|
413
|
+
ctx.beginPath()
|
|
414
|
+
ctx.arc(60, 100, 40, 0, Math.PI * 2)
|
|
415
|
+
ctx.fill()
|
|
416
|
+
// Red square on right
|
|
417
|
+
ctx.fillStyle = '#cc3333'
|
|
418
|
+
ctx.fillRect(100, 60, 80, 80)
|
|
419
|
+
return canvas.toDataURL('image/jpeg', 0.9)
|
|
420
|
+
}
|
|
421
|
+
} catch {}
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
// Node/Bun: read test-shapes.jpg from disk
|
|
425
|
+
try {
|
|
426
|
+
const fs = await import('fs')
|
|
427
|
+
const path = await import('path')
|
|
428
|
+
const imagePath = path.join(process.cwd(), 'test-data/test-shapes.jpg')
|
|
429
|
+
const buffer = fs.readFileSync(imagePath)
|
|
430
|
+
const base64 = buffer.toString('base64')
|
|
431
|
+
return `data:image/jpeg;base64,${base64}`
|
|
432
|
+
} catch {}
|
|
433
|
+
|
|
434
|
+
return null
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
// Test if a model can actually do vision
|
|
438
|
+
const testVisionCapability = async (model: string): Promise<boolean> => {
|
|
439
|
+
try {
|
|
440
|
+
const testImage = await getTestImage()
|
|
441
|
+
if (!testImage) {
|
|
442
|
+
console.log(`🧪 Vision test for ${model}: test image not available`)
|
|
443
|
+
return false
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
const response = await fetch(`${customLlmUrl}/chat/completions`, {
|
|
447
|
+
method: 'POST',
|
|
448
|
+
headers: { 'Content-Type': 'application/json' },
|
|
449
|
+
body: JSON.stringify({
|
|
450
|
+
model,
|
|
451
|
+
messages: [
|
|
452
|
+
{
|
|
453
|
+
role: 'user',
|
|
454
|
+
content: [
|
|
455
|
+
{
|
|
456
|
+
type: 'text',
|
|
457
|
+
text: 'What shapes do you see? Reply briefly.',
|
|
458
|
+
},
|
|
459
|
+
{ type: 'image_url', image_url: { url: testImage } },
|
|
460
|
+
],
|
|
461
|
+
},
|
|
462
|
+
],
|
|
463
|
+
max_tokens: 30,
|
|
464
|
+
temperature: 0,
|
|
465
|
+
}),
|
|
466
|
+
})
|
|
467
|
+
|
|
468
|
+
if (!response.ok) {
|
|
469
|
+
const errorText = await response.text().catch(() => '')
|
|
470
|
+
console.log(
|
|
471
|
+
`🧪 Vision test for ${model}: HTTP ${
|
|
472
|
+
response.status
|
|
473
|
+
} - ${errorText.slice(0, 100)}`
|
|
474
|
+
)
|
|
475
|
+
return false
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
const data = await response.json()
|
|
479
|
+
const answer = (data.choices?.[0]?.message?.content || '').toLowerCase()
|
|
480
|
+
// Accept circle, square, or red (canvas generates red circle, test-shapes.jpg has circle+square)
|
|
481
|
+
const isCorrect =
|
|
482
|
+
answer.includes('circle') ||
|
|
483
|
+
answer.includes('square') ||
|
|
484
|
+
answer.includes('red')
|
|
485
|
+
console.log(
|
|
486
|
+
`🧪 Vision test for ${model}: "${answer}" - ${isCorrect ? '✓' : '✗'}`
|
|
487
|
+
)
|
|
488
|
+
return isCorrect
|
|
489
|
+
} catch (e) {
|
|
490
|
+
console.log(`🧪 Vision test for ${model}: failed - ${e}`)
|
|
491
|
+
return false
|
|
492
|
+
}
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
// Find a working vision model by testing candidates
|
|
496
|
+
const findVisionModel = async (): Promise<string | null> => {
|
|
497
|
+
// Check cache first
|
|
498
|
+
const cacheKey = customLlmUrl
|
|
499
|
+
if (verifiedVisionModels.has(cacheKey)) {
|
|
500
|
+
return verifiedVisionModels.get(cacheKey) || null
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
const models = await getLocalModels(customLlmUrl)
|
|
504
|
+
|
|
505
|
+
// Candidates in priority order (most likely to support vision first)
|
|
506
|
+
const candidates = [
|
|
507
|
+
...models.filter(
|
|
508
|
+
(id) => id.includes('-vl') || id.includes('vl-') || id.includes('llava')
|
|
509
|
+
),
|
|
510
|
+
...models.filter((id) => id.includes('vision')),
|
|
511
|
+
...models.filter((id) => id.includes('gemma-3') || id.includes('gemma3')),
|
|
512
|
+
]
|
|
513
|
+
|
|
514
|
+
// Remove duplicates
|
|
515
|
+
const uniqueCandidates = [...new Set(candidates)]
|
|
516
|
+
|
|
517
|
+
// Test each candidate
|
|
518
|
+
for (const model of uniqueCandidates) {
|
|
519
|
+
console.log(`🔍 Testing vision capability: ${model}`)
|
|
520
|
+
if (await testVisionCapability(model)) {
|
|
521
|
+
verifiedVisionModels.set(cacheKey, model)
|
|
522
|
+
return model
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
verifiedVisionModels.set(cacheKey, null)
|
|
527
|
+
return null
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Provider implementations
|
|
531
|
+
const callCustom = async (
|
|
532
|
+
system: string,
|
|
533
|
+
user: UserContent,
|
|
534
|
+
tools?: any[],
|
|
535
|
+
responseFormat?: any
|
|
536
|
+
): Promise<BatteryResult> => {
|
|
537
|
+
const messages = [
|
|
538
|
+
{ role: 'system', content: system },
|
|
539
|
+
{ role: 'user', content: buildUserContent(user) },
|
|
540
|
+
]
|
|
541
|
+
const isMultimodal = typeof user !== 'string' && user.images?.length
|
|
542
|
+
|
|
543
|
+
// Select appropriate model
|
|
544
|
+
let model = 'local-model'
|
|
545
|
+
if (isMultimodal) {
|
|
546
|
+
const visionModel = await findVisionModel()
|
|
547
|
+
if (visionModel) {
|
|
548
|
+
model = visionModel
|
|
549
|
+
console.log(`🔍 Using vision model: ${visionModel}`)
|
|
550
|
+
} else {
|
|
551
|
+
console.warn('⚠️ No vision model found, using default')
|
|
552
|
+
}
|
|
553
|
+
// Debug: log image info
|
|
554
|
+
const images = (user as { text: string; images?: string[] }).images || []
|
|
555
|
+
console.log(
|
|
556
|
+
`📷 Sending ${images.length} image(s), first image length: ${
|
|
557
|
+
images[0]?.length || 0
|
|
558
|
+
}`
|
|
559
|
+
)
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
// Check server load before making request
|
|
563
|
+
const pending = trackRequest(customLlmUrl, 1)
|
|
564
|
+
if (pending > 1) {
|
|
565
|
+
console.log(
|
|
566
|
+
`⏳ LM Studio: ${pending} requests pending (including this one)`
|
|
567
|
+
)
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
try {
|
|
571
|
+
const requestBody = {
|
|
572
|
+
model,
|
|
573
|
+
messages,
|
|
574
|
+
temperature: 0.7,
|
|
575
|
+
tools,
|
|
576
|
+
response_format: responseFormat,
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
// Debug: log the request structure (not the full base64)
|
|
580
|
+
if (isMultimodal) {
|
|
581
|
+
const debugMessages = messages.map((m: any) => {
|
|
582
|
+
if (Array.isArray(m.content)) {
|
|
583
|
+
return {
|
|
584
|
+
role: m.role,
|
|
585
|
+
content: m.content.map((c: any) => {
|
|
586
|
+
if (c.type === 'image_url') {
|
|
587
|
+
return {
|
|
588
|
+
type: 'image_url',
|
|
589
|
+
url_length: c.image_url?.url?.length,
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
return c
|
|
593
|
+
}),
|
|
594
|
+
}
|
|
595
|
+
}
|
|
596
|
+
return m
|
|
597
|
+
})
|
|
598
|
+
console.log(
|
|
599
|
+
'📤 Request structure:',
|
|
600
|
+
JSON.stringify({ model, messages: debugMessages }, null, 2)
|
|
601
|
+
)
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
const startTime = Date.now()
|
|
605
|
+
const response = await fetch(`${customLlmUrl}/chat/completions`, {
|
|
606
|
+
method: 'POST',
|
|
607
|
+
headers: { 'Content-Type': 'application/json' },
|
|
608
|
+
body: JSON.stringify(requestBody),
|
|
609
|
+
})
|
|
610
|
+
const elapsed = Date.now() - startTime
|
|
611
|
+
|
|
612
|
+
if (!response.ok) {
|
|
613
|
+
const errorData = await response.json().catch(() => ({}))
|
|
614
|
+
const errorMsg = errorData.error?.message || ''
|
|
615
|
+
// Check if this might be a vision request without a vision model
|
|
616
|
+
if (response.status === 400 && isMultimodal) {
|
|
617
|
+
const hasVisionModel = model !== 'local-model'
|
|
618
|
+
if (!hasVisionModel) {
|
|
619
|
+
throw new Error(
|
|
620
|
+
`LLM Error: ${response.status} - No vision model found in LM Studio. ` +
|
|
621
|
+
`Load a vision model (e.g., llava, qwen-vl) or use OpenAI/Anthropic.`
|
|
622
|
+
)
|
|
623
|
+
}
|
|
624
|
+
throw new Error(
|
|
625
|
+
`LLM Error: ${response.status} - Vision request failed with model '${model}'. ${errorMsg}`
|
|
626
|
+
)
|
|
627
|
+
}
|
|
628
|
+
throw new Error(
|
|
629
|
+
`LLM Error: ${response.status} - ${
|
|
630
|
+
errorMsg || 'Check that LM Studio is running'
|
|
631
|
+
}`
|
|
632
|
+
)
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
console.log(`✅ LM Studio response in ${elapsed}ms`)
|
|
636
|
+
const data = await response.json()
|
|
637
|
+
return data.choices?.[0]?.message ?? { content: '' }
|
|
638
|
+
} catch (e: any) {
|
|
639
|
+
if (e.message?.includes('Failed to fetch') || e.name === 'TypeError') {
|
|
640
|
+
throw new Error(
|
|
641
|
+
`Cannot connect to LM Studio at ${customLlmUrl}. Make sure LM Studio is running and CORS is enabled.`
|
|
642
|
+
)
|
|
643
|
+
}
|
|
644
|
+
throw e
|
|
645
|
+
} finally {
|
|
646
|
+
trackRequest(customLlmUrl, -1)
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
const callOpenAI = async (
|
|
651
|
+
system: string,
|
|
652
|
+
user: UserContent,
|
|
653
|
+
tools?: any[],
|
|
654
|
+
responseFormat?: any
|
|
655
|
+
): Promise<BatteryResult> => {
|
|
656
|
+
const messages = [
|
|
657
|
+
{ role: 'system', content: system },
|
|
658
|
+
{ role: 'user', content: buildUserContent(user) },
|
|
659
|
+
]
|
|
660
|
+
const body: any = {
|
|
661
|
+
model: 'gpt-4o-mini',
|
|
662
|
+
messages,
|
|
663
|
+
temperature: 0.7,
|
|
664
|
+
}
|
|
665
|
+
if (tools?.length) body.tools = tools
|
|
666
|
+
if (responseFormat) body.response_format = responseFormat
|
|
667
|
+
|
|
668
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
669
|
+
method: 'POST',
|
|
670
|
+
headers: {
|
|
671
|
+
'Content-Type': 'application/json',
|
|
672
|
+
Authorization: `Bearer ${openaiKey}`,
|
|
673
|
+
},
|
|
674
|
+
body: JSON.stringify(body),
|
|
675
|
+
})
|
|
676
|
+
if (!response.ok) {
|
|
677
|
+
const error = await response.json().catch(() => ({}))
|
|
678
|
+
throw new Error(
|
|
679
|
+
`OpenAI Error: ${response.status} - ${
|
|
680
|
+
error.error?.message || 'Check your API key'
|
|
681
|
+
}`
|
|
682
|
+
)
|
|
683
|
+
}
|
|
684
|
+
const data = await response.json()
|
|
685
|
+
return data.choices?.[0]?.message ?? { content: '' }
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
const callAnthropic = async (
|
|
689
|
+
system: string,
|
|
690
|
+
user: UserContent,
|
|
691
|
+
_tools?: any[],
|
|
692
|
+
_responseFormat?: any
|
|
693
|
+
): Promise<BatteryResult> => {
|
|
694
|
+
// Anthropic has different format for multimodal - build content array
|
|
695
|
+
let userContent: any
|
|
696
|
+
if (typeof user === 'string') {
|
|
697
|
+
userContent = user
|
|
698
|
+
} else {
|
|
699
|
+
// Anthropic multimodal format
|
|
700
|
+
userContent = [{ type: 'text', text: user.text }]
|
|
701
|
+
for (const img of user.images || []) {
|
|
702
|
+
// Anthropic expects base64 data, extract from data URL
|
|
703
|
+
const match = img.match(/^data:([^;]+);base64,(.+)$/)
|
|
704
|
+
if (match) {
|
|
705
|
+
userContent.push({
|
|
706
|
+
type: 'image',
|
|
707
|
+
source: {
|
|
708
|
+
type: 'base64',
|
|
709
|
+
media_type: match[1],
|
|
710
|
+
data: match[2],
|
|
711
|
+
},
|
|
712
|
+
})
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
718
|
+
method: 'POST',
|
|
719
|
+
headers: {
|
|
720
|
+
'Content-Type': 'application/json',
|
|
721
|
+
'x-api-key': anthropicKey,
|
|
722
|
+
'anthropic-version': '2023-06-01',
|
|
723
|
+
'anthropic-dangerous-direct-browser-access': 'true',
|
|
724
|
+
},
|
|
725
|
+
body: JSON.stringify({
|
|
726
|
+
model: 'claude-3-haiku-20240307',
|
|
727
|
+
max_tokens: 1024,
|
|
728
|
+
system,
|
|
729
|
+
messages: [{ role: 'user', content: userContent }],
|
|
730
|
+
}),
|
|
731
|
+
})
|
|
732
|
+
if (!response.ok) {
|
|
733
|
+
const error = await response.json().catch(() => ({}))
|
|
734
|
+
throw new Error(
|
|
735
|
+
`Anthropic Error: ${response.status} - ${
|
|
736
|
+
error.error?.message || 'Check your API key'
|
|
737
|
+
}`
|
|
738
|
+
)
|
|
739
|
+
}
|
|
740
|
+
const data = await response.json()
|
|
741
|
+
return { content: data.content?.[0]?.text ?? '' }
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
const callDeepseek = async (
|
|
745
|
+
system: string,
|
|
746
|
+
user: UserContent,
|
|
747
|
+
tools?: any[],
|
|
748
|
+
responseFormat?: any
|
|
749
|
+
): Promise<BatteryResult> => {
|
|
750
|
+
// Deepseek uses OpenAI-compatible format
|
|
751
|
+
const messages = [
|
|
752
|
+
{ role: 'system', content: system },
|
|
753
|
+
{ role: 'user', content: buildUserContent(user) },
|
|
754
|
+
]
|
|
755
|
+
const body: any = {
|
|
756
|
+
model: 'deepseek-chat',
|
|
757
|
+
messages,
|
|
758
|
+
temperature: 0.7,
|
|
759
|
+
}
|
|
760
|
+
if (tools?.length) body.tools = tools
|
|
761
|
+
if (responseFormat) body.response_format = responseFormat
|
|
762
|
+
|
|
763
|
+
const response = await fetch('https://api.deepseek.com/chat/completions', {
|
|
764
|
+
method: 'POST',
|
|
765
|
+
headers: {
|
|
766
|
+
'Content-Type': 'application/json',
|
|
767
|
+
Authorization: `Bearer ${deepseekKey}`,
|
|
768
|
+
},
|
|
769
|
+
body: JSON.stringify(body),
|
|
770
|
+
})
|
|
771
|
+
if (!response.ok) {
|
|
772
|
+
const error = await response.json().catch(() => ({}))
|
|
773
|
+
throw new Error(
|
|
774
|
+
`Deepseek Error: ${response.status} - ${
|
|
775
|
+
error.error?.message || 'Check your API key'
|
|
776
|
+
}`
|
|
777
|
+
)
|
|
778
|
+
}
|
|
779
|
+
const data = await response.json()
|
|
780
|
+
return data.choices?.[0]?.message ?? { content: '' }
|
|
781
|
+
}
|
|
782
|
+
|
|
783
|
+
return {
|
|
784
|
+
async predict(
|
|
785
|
+
system: string,
|
|
786
|
+
user: UserContent,
|
|
787
|
+
tools?: any[],
|
|
788
|
+
responseFormat?: any
|
|
789
|
+
): Promise<BatteryResult> {
|
|
790
|
+
// If a specific provider is selected, use it
|
|
791
|
+
if (preferredProvider === 'custom' && hasCustomUrl)
|
|
792
|
+
return callCustom(system, user, tools, responseFormat)
|
|
793
|
+
if (preferredProvider === 'openai' && hasOpenAI)
|
|
794
|
+
return callOpenAI(system, user, tools, responseFormat)
|
|
795
|
+
if (preferredProvider === 'anthropic' && hasAnthropic)
|
|
796
|
+
return callAnthropic(system, user, tools, responseFormat)
|
|
797
|
+
if (preferredProvider === 'deepseek' && hasDeepseek)
|
|
798
|
+
return callDeepseek(system, user, tools, responseFormat)
|
|
799
|
+
|
|
800
|
+
// If preferred provider not available, show helpful error
|
|
801
|
+
if (preferredProvider !== 'auto') {
|
|
802
|
+
const providerNames: Record<string, string> = {
|
|
803
|
+
custom: 'Custom Endpoint',
|
|
804
|
+
openai: 'OpenAI',
|
|
805
|
+
anthropic: 'Anthropic',
|
|
806
|
+
deepseek: 'Deepseek',
|
|
807
|
+
}
|
|
808
|
+
throw new Error(
|
|
809
|
+
`${providerNames[preferredProvider]} is selected but not configured. Add your API key in Settings.`
|
|
810
|
+
)
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
// Auto mode: use first available in priority order
|
|
814
|
+
if (hasCustomUrl) return callCustom(system, user, tools, responseFormat)
|
|
815
|
+
if (hasOpenAI) return callOpenAI(system, user, tools, responseFormat)
|
|
816
|
+
if (hasAnthropic)
|
|
817
|
+
return callAnthropic(system, user, tools, responseFormat)
|
|
818
|
+
if (hasDeepseek) return callDeepseek(system, user, tools, responseFormat)
|
|
819
|
+
|
|
820
|
+
throw new Error('No LLM provider configured')
|
|
821
|
+
},
|
|
822
|
+
|
|
823
|
+
async embed(text: string): Promise<number[]> {
|
|
824
|
+
// Embedding support for custom URL only (LM Studio)
|
|
825
|
+
if (hasCustomUrl) {
|
|
826
|
+
try {
|
|
827
|
+
const response = await fetch(`${customLlmUrl}/embeddings`, {
|
|
828
|
+
method: 'POST',
|
|
829
|
+
headers: { 'Content-Type': 'application/json' },
|
|
830
|
+
body: JSON.stringify({
|
|
831
|
+
model: 'text-embedding-model',
|
|
832
|
+
input: text,
|
|
833
|
+
}),
|
|
834
|
+
})
|
|
835
|
+
if (!response.ok) {
|
|
836
|
+
throw new Error(`Embedding Error: ${response.status}`)
|
|
837
|
+
}
|
|
838
|
+
const data = await response.json()
|
|
839
|
+
return data.data?.[0]?.embedding ?? []
|
|
840
|
+
} catch {
|
|
841
|
+
throw new Error('Embedding not available')
|
|
842
|
+
}
|
|
843
|
+
}
|
|
844
|
+
throw new Error('Embedding requires LM Studio endpoint')
|
|
845
|
+
},
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
|
|
849
|
+
// Build full capabilities object from settings
|
|
850
|
+
export function buildCapabilities(settings?: LLMSettings) {
|
|
851
|
+
const s = settings || getSettings()
|
|
852
|
+
const llmCapability = buildLLMCapability(s)
|
|
853
|
+
const llmBattery = buildLLMBattery(s)
|
|
854
|
+
|
|
855
|
+
return {
|
|
856
|
+
llm: llmCapability,
|
|
857
|
+
llmBattery,
|
|
858
|
+
}
|
|
859
|
+
}
|