@nextsparkjs/plugin-ai 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +79 -0
- package/README.md +529 -0
- package/api/README.md +65 -0
- package/api/ai-history/[id]/route.ts +112 -0
- package/api/embeddings/route.ts +129 -0
- package/api/generate/route.ts +160 -0
- package/docs/01-getting-started/01-introduction.md +237 -0
- package/docs/01-getting-started/02-installation.md +447 -0
- package/docs/01-getting-started/03-configuration.md +416 -0
- package/docs/02-features/01-text-generation.md +523 -0
- package/docs/02-features/02-embeddings.md +241 -0
- package/docs/02-features/03-ai-history.md +549 -0
- package/docs/03-advanced-usage/01-core-utilities.md +500 -0
- package/docs/04-use-cases/01-content-generation.md +453 -0
- package/entities/ai-history/ai-history.config.ts +123 -0
- package/entities/ai-history/ai-history.fields.ts +330 -0
- package/entities/ai-history/messages/en.json +56 -0
- package/entities/ai-history/messages/es.json +56 -0
- package/entities/ai-history/migrations/001_ai_history_table.sql +167 -0
- package/entities/ai-history/migrations/002_ai_history_metas.sql +103 -0
- package/lib/ai-history-meta-service.ts +379 -0
- package/lib/ai-history-service.ts +391 -0
- package/lib/ai-sdk.ts +7 -0
- package/lib/core-utils.ts +217 -0
- package/lib/plugin-env.ts +252 -0
- package/lib/sanitize.ts +122 -0
- package/lib/save-example.ts +237 -0
- package/lib/server-env.ts +104 -0
- package/package.json +23 -0
- package/plugin.config.ts +55 -0
- package/public/docs/login-404-error.png +0 -0
- package/tsconfig.json +47 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/types/ai.types.ts +51 -0
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI History Service
|
|
3
|
+
*
|
|
4
|
+
* Manages audit trail for all AI operations across plugins.
|
|
5
|
+
* Provides consistent logging interface with status tracking.
|
|
6
|
+
*
|
|
7
|
+
* Lifecycle:
|
|
8
|
+
* 1. startOperation() → status='pending'
|
|
9
|
+
* 2. updateToProcessing() → status='processing' (optional)
|
|
10
|
+
* 3. completeOperation() → status='completed' + metrics
|
|
11
|
+
* 4. failOperation() → status='failed' + error
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { query, queryOne } from '@nextsparkjs/core/lib/db'
|
|
15
|
+
import { AIHistoryMetaService } from './ai-history-meta-service'
|
|
16
|
+
|
|
17
|
+
export type AIOperation = 'generate' | 'refine' | 'analyze' | 'chat' | 'completion' | 'other'
|
|
18
|
+
export type AIProvider = 'anthropic' | 'openai' | 'google' | 'azure' | 'other'
|
|
19
|
+
export type AIStatus = 'pending' | 'processing' | 'completed' | 'failed'
|
|
20
|
+
|
|
21
|
+
export interface StartOperationParams {
|
|
22
|
+
userId: string
|
|
23
|
+
operation: AIOperation
|
|
24
|
+
model: string
|
|
25
|
+
provider?: AIProvider
|
|
26
|
+
relatedEntityType?: string
|
|
27
|
+
relatedEntityId?: string
|
|
28
|
+
// NOTE: Metadata should be stored in ai_history_metas table via completeOperation(metas)
|
|
29
|
+
// NOT in startOperation() - ai_history table has no metadata column
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export interface CompleteOperationParams {
|
|
33
|
+
historyId: string
|
|
34
|
+
tokensUsed: number
|
|
35
|
+
tokensInput?: number // Input tokens (prompt) - for precise cost calculation
|
|
36
|
+
tokensOutput?: number // Output tokens (completion) - for precise cost calculation
|
|
37
|
+
creditsUsed: number
|
|
38
|
+
estimatedCost: number
|
|
39
|
+
balanceAfter: number
|
|
40
|
+
userId: string // ✅ NEW: Required for metadata operations
|
|
41
|
+
metas?: Record<string, unknown> // ✅ NEW: Flexible metadata (sourceOperationId, userInstruction, etc.)
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export interface FailOperationParams {
|
|
45
|
+
historyId: string
|
|
46
|
+
errorMessage: string
|
|
47
|
+
tokensUsed?: number
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export interface AIHistoryRecord {
|
|
51
|
+
id: string
|
|
52
|
+
userId: string
|
|
53
|
+
relatedEntityType: string | null
|
|
54
|
+
relatedEntityId: string | null
|
|
55
|
+
operation: AIOperation
|
|
56
|
+
model: string
|
|
57
|
+
provider: AIProvider
|
|
58
|
+
status: AIStatus
|
|
59
|
+
tokensUsed: number | null
|
|
60
|
+
creditsUsed: number | null
|
|
61
|
+
estimatedCost: number | null
|
|
62
|
+
balanceAfter: number | null
|
|
63
|
+
errorMessage: string | null
|
|
64
|
+
createdAt: Date
|
|
65
|
+
completedAt: Date | null
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
export class AIHistoryService {
|
|
69
|
+
/**
|
|
70
|
+
* Start tracking an AI operation
|
|
71
|
+
* Creates initial record with status='pending'
|
|
72
|
+
*
|
|
73
|
+
* @returns History ID for subsequent updates
|
|
74
|
+
*/
|
|
75
|
+
static async startOperation(params: StartOperationParams): Promise<string> {
|
|
76
|
+
const {
|
|
77
|
+
userId,
|
|
78
|
+
operation,
|
|
79
|
+
model,
|
|
80
|
+
provider = 'anthropic',
|
|
81
|
+
relatedEntityType,
|
|
82
|
+
relatedEntityId,
|
|
83
|
+
} = params
|
|
84
|
+
|
|
85
|
+
try {
|
|
86
|
+
const result = await queryOne<{ id: string }>(
|
|
87
|
+
`INSERT INTO "ai_history" (
|
|
88
|
+
"userId",
|
|
89
|
+
operation,
|
|
90
|
+
model,
|
|
91
|
+
provider,
|
|
92
|
+
"relatedEntityType",
|
|
93
|
+
"relatedEntityId",
|
|
94
|
+
status,
|
|
95
|
+
"createdAt"
|
|
96
|
+
) VALUES ($1, $2, $3, $4, $5, $6, $7, now())
|
|
97
|
+
RETURNING id`,
|
|
98
|
+
[userId, operation, model, provider, relatedEntityType || null, relatedEntityId || null, 'pending']
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
if (!result) {
|
|
102
|
+
throw new Error('Failed to create AI history record')
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return result.id
|
|
106
|
+
} catch (error) {
|
|
107
|
+
console.error('Error starting AI operation tracking:', error)
|
|
108
|
+
throw error
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Update operation to 'processing' status (optional)
|
|
114
|
+
* Useful for long-running operations
|
|
115
|
+
*/
|
|
116
|
+
static async updateToProcessing(historyId: string): Promise<void> {
|
|
117
|
+
try {
|
|
118
|
+
await query(
|
|
119
|
+
`UPDATE "ai_history"
|
|
120
|
+
SET status = 'processing'
|
|
121
|
+
WHERE id = $1`,
|
|
122
|
+
[historyId]
|
|
123
|
+
)
|
|
124
|
+
} catch (error) {
|
|
125
|
+
console.error('Error updating AI history to processing:', error)
|
|
126
|
+
// Non-critical, don't throw
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Complete operation successfully
|
|
132
|
+
* Updates status='completed', adds metrics, and saves metadata
|
|
133
|
+
*/
|
|
134
|
+
static async completeOperation(params: CompleteOperationParams): Promise<void> {
|
|
135
|
+
const { historyId, tokensUsed, tokensInput, tokensOutput, creditsUsed, estimatedCost, balanceAfter, userId, metas } = params
|
|
136
|
+
|
|
137
|
+
try {
|
|
138
|
+
// Update main ai_history record
|
|
139
|
+
await query(
|
|
140
|
+
`UPDATE "ai_history"
|
|
141
|
+
SET status = 'completed',
|
|
142
|
+
"tokensUsed" = $2,
|
|
143
|
+
"tokensInput" = $3,
|
|
144
|
+
"tokensOutput" = $4,
|
|
145
|
+
"creditsUsed" = $5,
|
|
146
|
+
"estimatedCost" = $6,
|
|
147
|
+
"balanceAfter" = $7,
|
|
148
|
+
"completedAt" = now()
|
|
149
|
+
WHERE id = $1`,
|
|
150
|
+
[historyId, tokensUsed, tokensInput || null, tokensOutput || null, creditsUsed, estimatedCost, balanceAfter]
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
// Save metadata to ai_history_metas table
|
|
154
|
+
if (metas && Object.keys(metas).length > 0) {
|
|
155
|
+
await AIHistoryMetaService.setBulkMetas(
|
|
156
|
+
historyId,
|
|
157
|
+
metas,
|
|
158
|
+
userId,
|
|
159
|
+
{ isPublic: false, isSearchable: true }
|
|
160
|
+
)
|
|
161
|
+
}
|
|
162
|
+
} catch (error) {
|
|
163
|
+
console.error('Error completing AI operation:', error)
|
|
164
|
+
// Log but don't throw - operation succeeded even if history update failed
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Mark operation as failed
|
|
170
|
+
* Updates status='failed' and adds error message
|
|
171
|
+
*/
|
|
172
|
+
static async failOperation(params: FailOperationParams): Promise<void> {
|
|
173
|
+
const { historyId, errorMessage, tokensUsed } = params
|
|
174
|
+
|
|
175
|
+
try {
|
|
176
|
+
await query(
|
|
177
|
+
`UPDATE "ai_history"
|
|
178
|
+
SET status = 'failed',
|
|
179
|
+
"errorMessage" = $2,
|
|
180
|
+
"tokensUsed" = $3,
|
|
181
|
+
"completedAt" = now()
|
|
182
|
+
WHERE id = $1`,
|
|
183
|
+
[historyId, errorMessage, tokensUsed || null]
|
|
184
|
+
)
|
|
185
|
+
} catch (error) {
|
|
186
|
+
console.error('Error marking AI operation as failed:', error)
|
|
187
|
+
// Log but don't throw - we're already in error handling
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* Update related entity information for an existing operation
|
|
193
|
+
* Useful when entity is created AFTER AI operation starts (e.g., analyze-brief)
|
|
194
|
+
*
|
|
195
|
+
* Use case: analyze-brief runs before client exists, then client is created,
|
|
196
|
+
* and we need to link the AI history record to the newly created client.
|
|
197
|
+
*
|
|
198
|
+
* @param historyId - AI history record ID to update
|
|
199
|
+
* @param relatedEntityType - Entity type (e.g., 'clients', 'products')
|
|
200
|
+
* @param relatedEntityId - Entity ID (UUID)
|
|
201
|
+
*/
|
|
202
|
+
static async updateRelatedEntity(
|
|
203
|
+
historyId: string,
|
|
204
|
+
relatedEntityType: string,
|
|
205
|
+
relatedEntityId: string
|
|
206
|
+
): Promise<void> {
|
|
207
|
+
try {
|
|
208
|
+
await query(
|
|
209
|
+
`UPDATE "ai_history"
|
|
210
|
+
SET "relatedEntityType" = $2,
|
|
211
|
+
"relatedEntityId" = $3
|
|
212
|
+
WHERE id = $1`,
|
|
213
|
+
[historyId, relatedEntityType, relatedEntityId]
|
|
214
|
+
)
|
|
215
|
+
} catch (error) {
|
|
216
|
+
console.error('Error updating AI history related entity:', error)
|
|
217
|
+
// Log but don't throw - non-critical update
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/**
|
|
222
|
+
* Get operation history for a user
|
|
223
|
+
* Useful for displaying usage analytics
|
|
224
|
+
*/
|
|
225
|
+
static async getUserHistory(
|
|
226
|
+
userId: string,
|
|
227
|
+
options?: {
|
|
228
|
+
limit?: number
|
|
229
|
+
offset?: number
|
|
230
|
+
operation?: AIOperation
|
|
231
|
+
status?: AIStatus
|
|
232
|
+
}
|
|
233
|
+
): Promise<AIHistoryRecord[]> {
|
|
234
|
+
const { limit = 50, offset = 0, operation, status } = options || {}
|
|
235
|
+
|
|
236
|
+
try {
|
|
237
|
+
let sql = `
|
|
238
|
+
SELECT * FROM "ai_history"
|
|
239
|
+
WHERE "userId" = $1
|
|
240
|
+
`
|
|
241
|
+
const params: any[] = [userId]
|
|
242
|
+
let paramIndex = 2
|
|
243
|
+
|
|
244
|
+
if (operation) {
|
|
245
|
+
sql += ` AND operation = $${paramIndex}`
|
|
246
|
+
params.push(operation)
|
|
247
|
+
paramIndex++
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
if (status) {
|
|
251
|
+
sql += ` AND status = $${paramIndex}`
|
|
252
|
+
params.push(status)
|
|
253
|
+
paramIndex++
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
sql += ` ORDER BY "createdAt" DESC LIMIT $${paramIndex} OFFSET $${paramIndex + 1}`
|
|
257
|
+
params.push(limit, offset)
|
|
258
|
+
|
|
259
|
+
const result = await query<AIHistoryRecord>(sql, params)
|
|
260
|
+
return result.rows
|
|
261
|
+
} catch (error) {
|
|
262
|
+
console.error('Error getting user AI history:', error)
|
|
263
|
+
return []
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Get history for a specific related entity
|
|
269
|
+
* Useful for showing AI operations on a content/product/etc
|
|
270
|
+
*/
|
|
271
|
+
static async getEntityHistory(
|
|
272
|
+
entityType: string,
|
|
273
|
+
entityId: string,
|
|
274
|
+
options?: {
|
|
275
|
+
limit?: number
|
|
276
|
+
operation?: AIOperation
|
|
277
|
+
}
|
|
278
|
+
): Promise<AIHistoryRecord[]> {
|
|
279
|
+
const { limit = 20, operation } = options || {}
|
|
280
|
+
|
|
281
|
+
try {
|
|
282
|
+
let sql = `
|
|
283
|
+
SELECT * FROM "ai_history"
|
|
284
|
+
WHERE "relatedEntityType" = $1 AND "relatedEntityId" = $2
|
|
285
|
+
`
|
|
286
|
+
const params: any[] = [entityType, entityId]
|
|
287
|
+
let paramIndex = 3
|
|
288
|
+
|
|
289
|
+
if (operation) {
|
|
290
|
+
sql += ` AND operation = $${paramIndex}`
|
|
291
|
+
params.push(operation)
|
|
292
|
+
paramIndex++
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
sql += ` ORDER BY "createdAt" DESC LIMIT $${paramIndex}`
|
|
296
|
+
params.push(limit)
|
|
297
|
+
|
|
298
|
+
const result = await query<AIHistoryRecord>(sql, params)
|
|
299
|
+
return result.rows
|
|
300
|
+
} catch (error) {
|
|
301
|
+
console.error('Error getting entity AI history:', error)
|
|
302
|
+
return []
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
/**
|
|
307
|
+
* Get operation by ID
|
|
308
|
+
*/
|
|
309
|
+
static async getOperation(historyId: string): Promise<AIHistoryRecord | null> {
|
|
310
|
+
try {
|
|
311
|
+
return await queryOne<AIHistoryRecord>(
|
|
312
|
+
'SELECT * FROM "ai_history" WHERE id = $1',
|
|
313
|
+
[historyId]
|
|
314
|
+
)
|
|
315
|
+
} catch (error) {
|
|
316
|
+
console.error('Error getting AI operation:', error)
|
|
317
|
+
return null
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/**
|
|
322
|
+
* Get usage statistics for a user
|
|
323
|
+
* Useful for analytics dashboards
|
|
324
|
+
*/
|
|
325
|
+
static async getUserStats(userId: string, fromDate?: Date): Promise<{
|
|
326
|
+
totalOperations: number
|
|
327
|
+
totalTokens: number
|
|
328
|
+
totalCredits: number
|
|
329
|
+
totalCost: number
|
|
330
|
+
successRate: number
|
|
331
|
+
operationBreakdown: Record<AIOperation, number>
|
|
332
|
+
}> {
|
|
333
|
+
try {
|
|
334
|
+
let sql = `
|
|
335
|
+
SELECT
|
|
336
|
+
COUNT(*) as total_operations,
|
|
337
|
+
COALESCE(SUM("tokensUsed"), 0) as total_tokens,
|
|
338
|
+
COALESCE(SUM("creditsUsed"), 0) as total_credits,
|
|
339
|
+
COALESCE(SUM("estimatedCost"), 0) as total_cost,
|
|
340
|
+
COUNT(*) FILTER (WHERE status = 'completed') as completed,
|
|
341
|
+
operation
|
|
342
|
+
FROM "ai_history"
|
|
343
|
+
WHERE "userId" = $1
|
|
344
|
+
`
|
|
345
|
+
const params: any[] = [userId]
|
|
346
|
+
|
|
347
|
+
if (fromDate) {
|
|
348
|
+
sql += ` AND "createdAt" >= $2`
|
|
349
|
+
params.push(fromDate)
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
sql += ` GROUP BY operation`
|
|
353
|
+
|
|
354
|
+
const result = await query<{
|
|
355
|
+
total_operations: string
|
|
356
|
+
total_tokens: string
|
|
357
|
+
total_credits: string
|
|
358
|
+
total_cost: string
|
|
359
|
+
completed: string
|
|
360
|
+
operation: AIOperation
|
|
361
|
+
}>(sql, params)
|
|
362
|
+
|
|
363
|
+
const totalOps = result.rows.reduce((sum, r) => sum + parseInt(r.total_operations), 0)
|
|
364
|
+
const totalCompleted = result.rows.reduce((sum, r) => sum + parseInt(r.completed), 0)
|
|
365
|
+
|
|
366
|
+
const operationBreakdown: Record<string, number> = {}
|
|
367
|
+
result.rows.forEach(row => {
|
|
368
|
+
operationBreakdown[row.operation] = parseInt(row.total_operations)
|
|
369
|
+
})
|
|
370
|
+
|
|
371
|
+
return {
|
|
372
|
+
totalOperations: totalOps,
|
|
373
|
+
totalTokens: result.rows.reduce((sum, r) => sum + parseInt(r.total_tokens || '0'), 0),
|
|
374
|
+
totalCredits: result.rows.reduce((sum, r) => sum + parseInt(r.total_credits || '0'), 0),
|
|
375
|
+
totalCost: result.rows.reduce((sum, r) => sum + parseFloat(r.total_cost || '0'), 0),
|
|
376
|
+
successRate: totalOps > 0 ? (totalCompleted / totalOps) * 100 : 0,
|
|
377
|
+
operationBreakdown: operationBreakdown as Record<AIOperation, number>,
|
|
378
|
+
}
|
|
379
|
+
} catch (error) {
|
|
380
|
+
console.error('Error getting user AI stats:', error)
|
|
381
|
+
return {
|
|
382
|
+
totalOperations: 0,
|
|
383
|
+
totalTokens: 0,
|
|
384
|
+
totalCredits: 0,
|
|
385
|
+
totalCost: 0,
|
|
386
|
+
successRate: 0,
|
|
387
|
+
operationBreakdown: {} as Record<AIOperation, number>,
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
}
|
package/lib/ai-sdk.ts
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Re-exports from Vercel AI SDK for use in themes and other plugins
|
|
3
|
+
* This allows the 'ai' package to be isolated to the AI plugin workspace
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
export { generateObject, generateText, streamText, streamObject } from 'ai'
|
|
7
|
+
export type { CoreMessage, LanguageModel } from 'ai'
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Plugin Core Utilities
|
|
3
|
+
*
|
|
4
|
+
* Simple, direct functions for building AI endpoints
|
|
5
|
+
* No dynamic imports, no complex abstractions
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { createOpenAI } from '@ai-sdk/openai'
|
|
9
|
+
import { createAnthropic } from '@ai-sdk/anthropic'
|
|
10
|
+
import { createOpenAICompatible } from '@ai-sdk/openai-compatible'
|
|
11
|
+
import type { AIProvider, ModelSelection, AIResult } from '../types/ai.types'
|
|
12
|
+
import {
|
|
13
|
+
getServerPluginConfig,
|
|
14
|
+
isServerPluginEnabled,
|
|
15
|
+
validateServerPluginEnvironment
|
|
16
|
+
} from './server-env'
|
|
17
|
+
|
|
18
|
+
// Cost per 1K tokens (USD)
|
|
19
|
+
export const COST_CONFIG = {
|
|
20
|
+
// OpenAI models
|
|
21
|
+
'gpt-4o': { input: 0.0025, output: 0.01 },
|
|
22
|
+
'gpt-4o-mini': { input: 0.00015, output: 0.0006 },
|
|
23
|
+
'gpt-4-turbo': { input: 0.01, output: 0.03 },
|
|
24
|
+
'gpt-3.5-turbo': { input: 0.0005, output: 0.0015 },
|
|
25
|
+
|
|
26
|
+
// Anthropic models
|
|
27
|
+
'claude-sonnet-4-5-20250929': { input: 0.003, output: 0.015 }, // Current (Sept 2025)
|
|
28
|
+
'claude-3-5-sonnet-20241022': { input: 0.003, output: 0.015 }, // Deprecated Oct 28, 2025
|
|
29
|
+
'claude-3-5-haiku-20241022': { input: 0.00025, output: 0.00125 },
|
|
30
|
+
'claude-3-opus-20240229': { input: 0.015, output: 0.075 },
|
|
31
|
+
|
|
32
|
+
// Ollama models (local, no cost)
|
|
33
|
+
'llama3.2:3b': { input: 0, output: 0 },
|
|
34
|
+
'llama3.2': { input: 0, output: 0 },
|
|
35
|
+
'llama3.1': { input: 0, output: 0 },
|
|
36
|
+
'qwen2.5': { input: 0, output: 0 },
|
|
37
|
+
'mistral': { input: 0, output: 0 },
|
|
38
|
+
'gemma2': { input: 0, output: 0 },
|
|
39
|
+
'phi3.5': { input: 0, output: 0 },
|
|
40
|
+
'codellama': { input: 0, output: 0 }
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Select AI model and provider
|
|
45
|
+
*/
|
|
46
|
+
export async function selectModel(modelName: string, provider?: AIProvider): Promise<ModelSelection> {
|
|
47
|
+
// Auto-detect provider if not specified
|
|
48
|
+
if (!provider) {
|
|
49
|
+
if (modelName.startsWith('gpt-')) {
|
|
50
|
+
provider = 'openai'
|
|
51
|
+
} else if (modelName.startsWith('claude-')) {
|
|
52
|
+
provider = 'anthropic'
|
|
53
|
+
} else {
|
|
54
|
+
provider = 'ollama'
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
console.log(`🎯 [selectModel] Selected provider: ${provider}, model: ${modelName}`)
|
|
59
|
+
|
|
60
|
+
const costConfig = COST_CONFIG[modelName as keyof typeof COST_CONFIG] || { input: 0, output: 0 }
|
|
61
|
+
const config = await getServerPluginConfig()
|
|
62
|
+
|
|
63
|
+
switch (provider) {
|
|
64
|
+
case 'openai':
|
|
65
|
+
if (!config.openaiApiKey) {
|
|
66
|
+
throw new Error('OpenAI API key not configured. Set OPENAI_API_KEY in contents/plugins/ai/.env')
|
|
67
|
+
}
|
|
68
|
+
const openaiProvider = createOpenAI({
|
|
69
|
+
apiKey: config.openaiApiKey
|
|
70
|
+
})
|
|
71
|
+
return {
|
|
72
|
+
provider: 'openai',
|
|
73
|
+
model: openaiProvider(modelName),
|
|
74
|
+
modelName,
|
|
75
|
+
isLocal: false,
|
|
76
|
+
costConfig
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
case 'anthropic':
|
|
80
|
+
if (!config.anthropicApiKey) {
|
|
81
|
+
throw new Error('Anthropic API key not configured. Set ANTHROPIC_API_KEY in contents/plugins/ai/.env')
|
|
82
|
+
}
|
|
83
|
+
const anthropicProvider = createAnthropic({
|
|
84
|
+
apiKey: config.anthropicApiKey
|
|
85
|
+
})
|
|
86
|
+
return {
|
|
87
|
+
provider: 'anthropic',
|
|
88
|
+
model: anthropicProvider(modelName),
|
|
89
|
+
modelName,
|
|
90
|
+
isLocal: false,
|
|
91
|
+
costConfig
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
case 'ollama':
|
|
95
|
+
default:
|
|
96
|
+
const ollamaBaseUrl = config.ollamaBaseUrl || 'http://localhost:11434'
|
|
97
|
+
console.log(`🔥 [selectModel] Creating Ollama provider with baseURL: ${ollamaBaseUrl}, model: ${modelName}`)
|
|
98
|
+
const ollamaProvider = createOpenAICompatible({
|
|
99
|
+
baseURL: `${ollamaBaseUrl}/v1`,
|
|
100
|
+
name: 'ollama'
|
|
101
|
+
})
|
|
102
|
+
return {
|
|
103
|
+
provider: 'ollama',
|
|
104
|
+
model: ollamaProvider(modelName),
|
|
105
|
+
modelName,
|
|
106
|
+
isLocal: true,
|
|
107
|
+
costConfig
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Calculate AI generation cost
|
|
114
|
+
*/
|
|
115
|
+
export function calculateCost(
|
|
116
|
+
tokens: { input: number; output: number },
|
|
117
|
+
costConfig: { input: number; output: number }
|
|
118
|
+
): number {
|
|
119
|
+
const inputCost = (tokens.input / 1000) * costConfig.input
|
|
120
|
+
const outputCost = (tokens.output / 1000) * costConfig.output
|
|
121
|
+
return Math.round((inputCost + outputCost) * 100000) / 100000
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Validate plugin is ready to use
|
|
126
|
+
*/
|
|
127
|
+
export async function validatePlugin(): Promise<{ valid: boolean; error?: string }> {
|
|
128
|
+
try {
|
|
129
|
+
if (!(await isServerPluginEnabled())) {
|
|
130
|
+
return {
|
|
131
|
+
valid: false,
|
|
132
|
+
error: 'AI plugin disabled. Set AI_PLUGIN_ENABLED=true in contents/plugins/ai/.env'
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const envValidation = await validateServerPluginEnvironment()
|
|
137
|
+
if (!envValidation.valid) {
|
|
138
|
+
return {
|
|
139
|
+
valid: false,
|
|
140
|
+
error: `Plugin configuration invalid: ${envValidation.errors.join(', ')}`
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return { valid: true }
|
|
145
|
+
} catch (error) {
|
|
146
|
+
return {
|
|
147
|
+
valid: false,
|
|
148
|
+
error: error instanceof Error ? error.message : 'Unknown validation error'
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Extract token usage from AI SDK result
|
|
155
|
+
*/
|
|
156
|
+
export function extractTokens(result: AIResult): { input: number; output: number; total: number } {
|
|
157
|
+
return {
|
|
158
|
+
input: result.usage?.inputTokens || 0,
|
|
159
|
+
output: result.usage?.outputTokens || 0,
|
|
160
|
+
total: result.usage?.totalTokens || 0
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Common error handler for AI endpoints
|
|
166
|
+
*/
|
|
167
|
+
export function handleAIError(error: Error): { error: string; message: string; status: number } {
|
|
168
|
+
const errorMessage = error.message.toLowerCase()
|
|
169
|
+
|
|
170
|
+
// Provider-specific errors
|
|
171
|
+
if (errorMessage.includes('openai') || errorMessage.includes('api key')) {
|
|
172
|
+
return {
|
|
173
|
+
error: 'OpenAI authentication failed',
|
|
174
|
+
message: 'Check your OPENAI_API_KEY in contents/plugins/ai/.env',
|
|
175
|
+
status: 401
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if (errorMessage.includes('anthropic') || errorMessage.includes('claude')) {
|
|
180
|
+
return {
|
|
181
|
+
error: 'Anthropic authentication failed',
|
|
182
|
+
message: 'Check your ANTHROPIC_API_KEY in contents/plugins/ai/.env',
|
|
183
|
+
status: 401
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
if (errorMessage.includes('econnrefused') || errorMessage.includes('connect')) {
|
|
188
|
+
return {
|
|
189
|
+
error: 'Ollama connection failed',
|
|
190
|
+
message: 'Make sure Ollama is running (ollama serve)',
|
|
191
|
+
status: 503
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
if (errorMessage.includes('rate limit') || errorMessage.includes('quota')) {
|
|
196
|
+
return {
|
|
197
|
+
error: 'Rate limit exceeded',
|
|
198
|
+
message: 'API rate limit reached. Try again later.',
|
|
199
|
+
status: 429
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (errorMessage.includes('model') && errorMessage.includes('not found')) {
|
|
204
|
+
return {
|
|
205
|
+
error: 'Model not found',
|
|
206
|
+
message: 'The specified model is not available or not installed',
|
|
207
|
+
status: 404
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// Generic error
|
|
212
|
+
return {
|
|
213
|
+
error: 'AI generation failed',
|
|
214
|
+
message: error.message,
|
|
215
|
+
status: 500
|
|
216
|
+
}
|
|
217
|
+
}
|