@nextsparkjs/plugin-ai 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.env.example +79 -0
  2. package/README.md +529 -0
  3. package/api/README.md +65 -0
  4. package/api/ai-history/[id]/route.ts +112 -0
  5. package/api/embeddings/route.ts +129 -0
  6. package/api/generate/route.ts +160 -0
  7. package/docs/01-getting-started/01-introduction.md +237 -0
  8. package/docs/01-getting-started/02-installation.md +447 -0
  9. package/docs/01-getting-started/03-configuration.md +416 -0
  10. package/docs/02-features/01-text-generation.md +523 -0
  11. package/docs/02-features/02-embeddings.md +241 -0
  12. package/docs/02-features/03-ai-history.md +549 -0
  13. package/docs/03-advanced-usage/01-core-utilities.md +500 -0
  14. package/docs/04-use-cases/01-content-generation.md +453 -0
  15. package/entities/ai-history/ai-history.config.ts +123 -0
  16. package/entities/ai-history/ai-history.fields.ts +330 -0
  17. package/entities/ai-history/messages/en.json +56 -0
  18. package/entities/ai-history/messages/es.json +56 -0
  19. package/entities/ai-history/migrations/001_ai_history_table.sql +167 -0
  20. package/entities/ai-history/migrations/002_ai_history_metas.sql +103 -0
  21. package/lib/ai-history-meta-service.ts +379 -0
  22. package/lib/ai-history-service.ts +391 -0
  23. package/lib/ai-sdk.ts +7 -0
  24. package/lib/core-utils.ts +217 -0
  25. package/lib/plugin-env.ts +252 -0
  26. package/lib/sanitize.ts +122 -0
  27. package/lib/save-example.ts +237 -0
  28. package/lib/server-env.ts +104 -0
  29. package/package.json +23 -0
  30. package/plugin.config.ts +55 -0
  31. package/public/docs/login-404-error.png +0 -0
  32. package/tsconfig.json +47 -0
  33. package/tsconfig.tsbuildinfo +1 -0
  34. package/types/ai.types.ts +51 -0
@@ -0,0 +1,500 @@
1
+ # Core Utilities
2
+
3
+ ## Overview
4
+
5
+ The AI plugin provides a set of **core utility functions** that you can import and use in your own custom endpoints. These utilities handle model selection, cost calculation, validation, token extraction, and error handling.
6
+
7
+ **Philosophy:** Instead of providing rigid, pre-built solutions, the plugin gives you powerful building blocks to create exactly what you need.
8
+
9
+ ## Import Path
10
+
11
+ ```typescript
12
+ import {
13
+ selectModel,
14
+ calculateCost,
15
+ validatePlugin,
16
+ extractTokens,
17
+ handleAIError,
18
+ COST_CONFIG
19
+ } from '@/contents/plugins/ai/lib/core-utils'
20
+ ```
21
+
22
+ ## Core Functions
23
+
24
+ ### `selectModel()`
25
+
26
+ Automatically selects and configures the appropriate AI model and provider.
27
+
28
+ **Signature:**
29
+ ```typescript
30
+ async function selectModel(
31
+ modelName: string,
32
+ provider?: AIProvider
33
+ ): Promise<ModelSelection>
34
+ ```
35
+
36
+ **Parameters:**
37
+ - `modelName` - Model identifier (e.g., `'gpt-4o-mini'`, `'llama3.2:3b'`)
38
+ - `provider` (optional) - Force specific provider (`'openai' | 'anthropic' | 'ollama'`)
39
+
40
+ **Returns:**
41
+ ```typescript
42
+ {
43
+ provider: AIProvider, // 'openai' | 'anthropic' | 'ollama'
44
+ model: LanguageModel, // Vercel AI SDK model instance
45
+ modelName: string, // Original model name
46
+ isLocal: boolean, // true for Ollama, false for cloud
47
+ costConfig: {
48
+ input: number, // Cost per 1K input tokens
49
+ output: number // Cost per 1K output tokens
50
+ }
51
+ }
52
+ ```
53
+
54
+ **Usage:**
55
+ ```typescript
56
+ // Auto-detect provider from model name
57
+ const selection = await selectModel('gpt-4o-mini')
58
+ console.log(selection.provider) // 'openai'
59
+ console.log(selection.isLocal) // false
60
+
61
+ // Use with Vercel AI SDK
62
+ import { generateText } from 'ai'
63
+
64
+ const result = await generateText({
65
+ model: selection.model,
66
+ prompt: 'Hello world'
67
+ })
68
+ ```
69
+
70
+ **Provider Auto-Detection:**
71
+ ```typescript
72
+ // OpenAI models (start with 'gpt-')
73
+ await selectModel('gpt-4o') // provider: 'openai'
74
+ await selectModel('gpt-4o-mini') // provider: 'openai'
75
+
76
+ // Anthropic models (start with 'claude-')
77
+ await selectModel('claude-3-5-haiku-20241022') // provider: 'anthropic'
78
+
79
+ // Ollama models (everything else)
80
+ await selectModel('llama3.2:3b') // provider: 'ollama'
81
+ await selectModel('mistral') // provider: 'ollama'
82
+ ```
83
+
84
+ **Manual Provider Override:**
85
+ ```typescript
86
+ // Force Ollama provider
87
+ const selection = await selectModel('custom-model', 'ollama')
88
+ ```
89
+
90
+ ---
91
+
92
+ ### `calculateCost()`
93
+
94
+ Calculate the cost of an AI operation based on token usage.
95
+
96
+ **Signature:**
97
+ ```typescript
98
+ function calculateCost(
99
+ tokens: { input: number; output: number },
100
+ costConfig: { input: number; output: number }
101
+ ): number
102
+ ```
103
+
104
+ **Parameters:**
105
+ - `tokens` - Token usage breakdown
106
+ - `input` - Input/prompt tokens
107
+ - `output` - Output/completion tokens
108
+ - `costConfig` - Cost per 1K tokens (from `selectModel()` or `COST_CONFIG`)
109
+
110
+ **Returns:** `number` - Cost in USD (5 decimal precision)
111
+
112
+ **Usage:**
113
+ ```typescript
114
+ const tokens = { input: 100, output: 200 }
115
+ const costConfig = { input: 0.00015, output: 0.0006 }
116
+
117
+ const cost = calculateCost(tokens, costConfig)
118
+ console.log(cost) // 0.00013
119
+ ```
120
+
121
+ **With Model Selection:**
122
+ ```typescript
123
+ const selection = await selectModel('gpt-4o-mini')
124
+ const result = await generateText({ model: selection.model, prompt: '...' })
125
+
126
+ const tokens = extractTokens(result)
127
+ const cost = calculateCost(tokens, selection.costConfig)
128
+
129
+ console.log(`Cost: $${cost.toFixed(5)}`)
130
+ ```
131
+
132
+ ---
133
+
134
+ ### `validatePlugin()`
135
+
136
+ Validate that the plugin is properly configured and ready to use.
137
+
138
+ **Signature:**
139
+ ```typescript
140
+ async function validatePlugin(): Promise<PluginValidation>
141
+ ```
142
+
143
+ **Returns:**
144
+ ```typescript
145
+ {
146
+ valid: boolean,
147
+ error?: string // Present if valid is false
148
+ }
149
+ ```
150
+
151
+ **Usage:**
152
+ ```typescript
153
+ const validation = await validatePlugin()
154
+
155
+ if (!validation.valid) {
156
+ return Response.json(
157
+ { error: validation.error },
158
+ { status: 503 }
159
+ )
160
+ }
161
+
162
+ // Plugin is ready, proceed with AI operation
163
+ ```
164
+
165
+ **Validation Checks:**
166
+ - Plugin is enabled (`AI_PLUGIN_ENABLED=true`)
167
+ - At least one provider is configured
168
+ - Environment variables are valid
169
+
170
+ **Example Errors:**
171
+ ```typescript
172
+ { valid: false, error: 'AI plugin disabled. Set AI_PLUGIN_ENABLED=true' }
173
+ { valid: false, error: 'No AI provider configured' }
174
+ { valid: false, error: 'Plugin configuration invalid: missing OPENAI_API_KEY' }
175
+ ```
176
+
177
+ ---
178
+
179
+ ### `extractTokens()`
180
+
181
+ Extract token usage from Vercel AI SDK result.
182
+
183
+ **Signature:**
184
+ ```typescript
185
+ function extractTokens(result: AIResult): TokenUsage
186
+ ```
187
+
188
+ **Parameters:**
189
+ - `result` - Result from `generateText()` or similar AI SDK function
190
+
191
+ **Returns:**
192
+ ```typescript
193
+ {
194
+ input: number, // Input/prompt tokens
195
+ output: number, // Output/completion tokens
196
+ total: number // Total tokens used
197
+ }
198
+ ```
199
+
200
+ **Usage:**
201
+ ```typescript
202
+ import { generateText } from 'ai'
203
+
204
+ const result = await generateText({
205
+ model: selectedModel.model,
206
+ prompt: 'Hello'
207
+ })
208
+
209
+ const tokens = extractTokens(result)
210
+ console.log(tokens)
211
+ // { input: 5, output: 12, total: 17 }
212
+ ```
213
+
214
+ **Handles Missing Data:**
215
+ ```typescript
216
+ // If result.usage is undefined, returns zeros
217
+ extractTokens({ text: 'response' })
218
+ // { input: 0, output: 0, total: 0 }
219
+ ```
220
+
221
+ ---
222
+
223
+ ### `handleAIError()`
224
+
225
+ Common error handler that provides user-friendly error messages.
226
+
227
+ **Signature:**
228
+ ```typescript
229
+ function handleAIError(error: Error): AIErrorResult
230
+ ```
231
+
232
+ **Parameters:**
233
+ - `error` - Error object from AI operation
234
+
235
+ **Returns:**
236
+ ```typescript
237
+ {
238
+ error: string, // Error type
239
+ message: string, // User-friendly message
240
+ status: number // HTTP status code
241
+ }
242
+ ```
243
+
244
+ **Usage:**
245
+ ```typescript
246
+ try {
247
+ const result = await generateText({ ... })
248
+ } catch (error) {
249
+ const errorInfo = handleAIError(error as Error)
250
+ return Response.json(
251
+ { error: errorInfo.error, message: errorInfo.message },
252
+ { status: errorInfo.status }
253
+ )
254
+ }
255
+ ```
256
+
257
+ **Error Detection:**
258
+ ```typescript
259
+ // OpenAI errors
260
+ handleAIError(new Error('openai authentication failed'))
261
+ // → { error: 'OpenAI authentication failed',
262
+ // message: 'Check your OPENAI_API_KEY...',
263
+ // status: 401 }
264
+
265
+ // Ollama connection errors
266
+ handleAIError(new Error('ECONNREFUSED'))
267
+ // → { error: 'Ollama connection failed',
268
+ // message: 'Make sure Ollama is running...',
269
+ // status: 503 }
270
+
271
+ // Rate limits
272
+ handleAIError(new Error('rate limit exceeded'))
273
+ // → { error: 'Rate limit exceeded',
274
+ // message: 'Try again later',
275
+ // status: 429 }
276
+
277
+ // Model not found
278
+ handleAIError(new Error('model not found'))
279
+ // → { error: 'Model not found',
280
+ // message: 'Model not available...',
281
+ // status: 404 }
282
+
283
+ // Generic errors
284
+ handleAIError(new Error('Unknown error'))
285
+ // → { error: 'AI generation failed',
286
+ // message: 'Unknown error',
287
+ // status: 500 }
288
+ ```
289
+
290
+ ---
291
+
292
+ ### `COST_CONFIG`
293
+
294
+ Pre-configured cost data for all supported models.
295
+
296
+ **Type:**
297
+ ```typescript
298
+ const COST_CONFIG: {
299
+ [modelName: string]: {
300
+ input: number // Cost per 1K input tokens (USD)
301
+ output: number // Cost per 1K output tokens (USD)
302
+ }
303
+ }
304
+ ```
305
+
306
+ **Available Models:**
307
+ ```typescript
308
+ // OpenAI
309
+ COST_CONFIG['gpt-4o'] // { input: 0.0025, output: 0.01 }
310
+ COST_CONFIG['gpt-4o-mini'] // { input: 0.00015, output: 0.0006 }
311
+ COST_CONFIG['gpt-3.5-turbo'] // { input: 0.0005, output: 0.0015 }
312
+
313
+ // Anthropic
314
+ COST_CONFIG['claude-3-5-sonnet-20241022'] // { input: 0.003, output: 0.015 }
315
+ COST_CONFIG['claude-3-5-haiku-20241022'] // { input: 0.00025, output: 0.00125 }
316
+
317
+ // Ollama (all free)
318
+ COST_CONFIG['llama3.2:3b'] // { input: 0, output: 0 }
319
+ COST_CONFIG['llama3.2'] // { input: 0, output: 0 }
320
+ COST_CONFIG['qwen2.5'] // { input: 0, output: 0 }
321
+ ```
322
+
323
+ **Usage:**
324
+ ```typescript
325
+ const modelCosts = COST_CONFIG['gpt-4o-mini']
326
+ console.log(`Input: $${modelCosts.input}/1K tokens`)
327
+ console.log(`Output: $${modelCosts.output}/1K tokens`)
328
+ ```
329
+
330
+ ## Complete Example: Custom Endpoint
331
+
332
+ ```typescript
333
+ // app/api/custom/summarize/route.ts
334
+ import { NextRequest, NextResponse } from 'next/server'
335
+ import { generateText } from 'ai'
336
+ import {
337
+ selectModel,
338
+ calculateCost,
339
+ validatePlugin,
340
+ extractTokens,
341
+ handleAIError
342
+ } from '@/contents/plugins/ai/lib/core-utils'
343
+ import { authenticateRequest } from '@/core/lib/api/auth/dual-auth'
344
+
345
+ export async function POST(request: NextRequest) {
346
+ try {
347
+ // 1. Authenticate
348
+ const authResult = await authenticateRequest(request)
349
+ if (!authResult.success) {
350
+ return NextResponse.json(
351
+ { error: 'Authentication required' },
352
+ { status: 401 }
353
+ )
354
+ }
355
+
356
+ // 2. Validate plugin
357
+ const validation = await validatePlugin()
358
+ if (!validation.valid) {
359
+ return NextResponse.json(
360
+ { error: validation.error },
361
+ { status: 503 }
362
+ )
363
+ }
364
+
365
+ // 3. Parse request
366
+ const { text, model = 'gpt-4o-mini' } = await request.json()
367
+
368
+ // 4. Select model
369
+ const selectedModel = await selectModel(model)
370
+
371
+ // 5. Generate summary
372
+ const result = await generateText({
373
+ model: selectedModel.model,
374
+ system: 'You are a concise summarizer. Provide key points only.',
375
+ prompt: `Summarize this text:\n\n${text}`,
376
+ maxOutputTokens: 300
377
+ })
378
+
379
+ // 6. Calculate metrics
380
+ const tokens = extractTokens(result)
381
+ const cost = calculateCost(tokens, selectedModel.costConfig)
382
+
383
+ // 7. Return response
384
+ return NextResponse.json({
385
+ success: true,
386
+ summary: result.text,
387
+ model: selectedModel.modelName,
388
+ provider: selectedModel.provider,
389
+ cost,
390
+ tokens
391
+ })
392
+
393
+ } catch (error) {
394
+ console.error('Summarize error:', error)
395
+ const errorInfo = handleAIError(error as Error)
396
+ return NextResponse.json(
397
+ { error: errorInfo.error, message: errorInfo.message },
398
+ { status: errorInfo.status }
399
+ )
400
+ }
401
+ }
402
+ ```
403
+
404
+ ## TypeScript Types
405
+
406
+ Import types for full type safety:
407
+
408
+ ```typescript
409
+ import type {
410
+ AIProvider,
411
+ ModelSelection,
412
+ AIResult,
413
+ TokenUsage,
414
+ PluginValidation,
415
+ AIErrorResult
416
+ } from '@/contents/plugins/ai/types/ai.types'
417
+
418
+ // Use in function signatures
419
+ async function myCustomFunction(
420
+ model: string
421
+ ): Promise<ModelSelection> {
422
+ return await selectModel(model)
423
+ }
424
+
425
+ function calculateOperationCost(
426
+ result: AIResult,
427
+ costConfig: { input: number; output: number }
428
+ ): number {
429
+ const tokens: TokenUsage = extractTokens(result)
430
+ return calculateCost(tokens, costConfig)
431
+ }
432
+ ```
433
+
434
+ ## Best Practices
435
+
436
+ ### 1. Always Validate Plugin
437
+
438
+ ```typescript
439
+ // ✅ Good: Validate before any AI operation
440
+ const validation = await validatePlugin()
441
+ if (!validation.valid) {
442
+ return error response
443
+ }
444
+
445
+ // ❌ Bad: Skip validation
446
+ const result = await generateText({ ... }) // May fail unexpectedly
447
+ ```
448
+
449
+ ### 2. Use selectModel() for Consistency
450
+
451
+ ```typescript
452
+ // ✅ Good: Use selectModel utility
453
+ const selection = await selectModel(modelName)
454
+ const result = await generateText({ model: selection.model, ... })
455
+
456
+ // ❌ Bad: Manually configure providers
457
+ const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY })
458
+ const model = openai('gpt-4o-mini')
459
+ ```
460
+
461
+ ### 3. Always Calculate Cost
462
+
463
+ ```typescript
464
+ // ✅ Good: Track all costs
465
+ const tokens = extractTokens(result)
466
+ const cost = calculateCost(tokens, selection.costConfig)
467
+ await saveToHistory({ cost, tokens })
468
+
469
+ // ❌ Bad: Ignore costs
470
+ const result = await generateText({ ... })
471
+ // No cost tracking
472
+ ```
473
+
474
+ ### 4. Handle Errors Gracefully
475
+
476
+ ```typescript
477
+ // ✅ Good: Use handleAIError
478
+ try {
479
+ // AI operation
480
+ } catch (error) {
481
+ const errorInfo = handleAIError(error as Error)
482
+ return Response.json(
483
+ { error: errorInfo.error, message: errorInfo.message },
484
+ { status: errorInfo.status }
485
+ )
486
+ }
487
+
488
+ // ❌ Bad: Generic error handling
489
+ try {
490
+ // AI operation
491
+ } catch (error) {
492
+ return Response.json({ error: 'Failed' }, { status: 500 })
493
+ }
494
+ ```
495
+
496
+ ## Next Steps
497
+
498
+ - **[Custom Endpoints](./02-custom-endpoints.md)** - Build your own AI endpoints
499
+ - **[Text Generation](../02-features/01-text-generation.md)** - Use the generate endpoint
500
+ - **[AI History](../02-features/03-ai-history.md)** - Track operations