@nextsparkjs/plugin-ai 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +79 -0
- package/README.md +529 -0
- package/api/README.md +65 -0
- package/api/ai-history/[id]/route.ts +112 -0
- package/api/embeddings/route.ts +129 -0
- package/api/generate/route.ts +160 -0
- package/docs/01-getting-started/01-introduction.md +237 -0
- package/docs/01-getting-started/02-installation.md +447 -0
- package/docs/01-getting-started/03-configuration.md +416 -0
- package/docs/02-features/01-text-generation.md +523 -0
- package/docs/02-features/02-embeddings.md +241 -0
- package/docs/02-features/03-ai-history.md +549 -0
- package/docs/03-advanced-usage/01-core-utilities.md +500 -0
- package/docs/04-use-cases/01-content-generation.md +453 -0
- package/entities/ai-history/ai-history.config.ts +123 -0
- package/entities/ai-history/ai-history.fields.ts +330 -0
- package/entities/ai-history/messages/en.json +56 -0
- package/entities/ai-history/messages/es.json +56 -0
- package/entities/ai-history/migrations/001_ai_history_table.sql +167 -0
- package/entities/ai-history/migrations/002_ai_history_metas.sql +103 -0
- package/lib/ai-history-meta-service.ts +379 -0
- package/lib/ai-history-service.ts +391 -0
- package/lib/ai-sdk.ts +7 -0
- package/lib/core-utils.ts +217 -0
- package/lib/plugin-env.ts +252 -0
- package/lib/sanitize.ts +122 -0
- package/lib/save-example.ts +237 -0
- package/lib/server-env.ts +104 -0
- package/package.json +23 -0
- package/plugin.config.ts +55 -0
- package/public/docs/login-404-error.png +0 -0
- package/tsconfig.json +47 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/types/ai.types.ts +51 -0
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI History Entity Linking Endpoint
|
|
3
|
+
*
|
|
4
|
+
* PATCH /api/v1/plugin/ai/ai-history/:id
|
|
5
|
+
*
|
|
6
|
+
* Updates the relatedEntityType and relatedEntityId for an AI history record.
|
|
7
|
+
* Used to link AI operations (e.g., analyze-brief) to entities created afterward (e.g., clients).
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { NextRequest, NextResponse } from 'next/server'
|
|
11
|
+
import { auth } from '@nextsparkjs/core/lib/auth'
|
|
12
|
+
import { AIHistoryService } from '@/plugins/ai/lib/ai-history-service'
|
|
13
|
+
|
|
14
|
+
interface RouteParams {
|
|
15
|
+
params: Promise<{
|
|
16
|
+
id: string
|
|
17
|
+
}>
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* PATCH /api/v1/plugin/ai/ai-history/:id
|
|
22
|
+
* Update related entity information for an AI history record
|
|
23
|
+
*/
|
|
24
|
+
export async function PATCH(
|
|
25
|
+
request: NextRequest,
|
|
26
|
+
{ params }: RouteParams
|
|
27
|
+
): Promise<NextResponse> {
|
|
28
|
+
try {
|
|
29
|
+
// Authenticate user
|
|
30
|
+
const session = await auth.api.getSession({
|
|
31
|
+
headers: request.headers
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
if (!session?.user) {
|
|
35
|
+
return NextResponse.json(
|
|
36
|
+
{ error: 'Unauthorized' },
|
|
37
|
+
{ status: 401 }
|
|
38
|
+
)
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Await params (Next.js 15 pattern for dynamic routes)
|
|
42
|
+
const { id: historyId } = await params
|
|
43
|
+
|
|
44
|
+
if (!historyId) {
|
|
45
|
+
return NextResponse.json(
|
|
46
|
+
{ error: 'Missing history ID' },
|
|
47
|
+
{ status: 400 }
|
|
48
|
+
)
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Parse request body
|
|
52
|
+
const body = await request.json()
|
|
53
|
+
const { relatedEntityType, relatedEntityId } = body
|
|
54
|
+
|
|
55
|
+
if (!relatedEntityType || !relatedEntityId) {
|
|
56
|
+
return NextResponse.json(
|
|
57
|
+
{
|
|
58
|
+
error: 'Missing required fields',
|
|
59
|
+
details: 'Both relatedEntityType and relatedEntityId are required'
|
|
60
|
+
},
|
|
61
|
+
{ status: 400 }
|
|
62
|
+
)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Verify the AI history record exists and belongs to this user
|
|
66
|
+
const existingRecord = await AIHistoryService.getOperation(historyId)
|
|
67
|
+
|
|
68
|
+
if (!existingRecord) {
|
|
69
|
+
return NextResponse.json(
|
|
70
|
+
{ error: 'AI history record not found' },
|
|
71
|
+
{ status: 404 }
|
|
72
|
+
)
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (existingRecord.userId !== session.user.id) {
|
|
76
|
+
return NextResponse.json(
|
|
77
|
+
{ error: 'Forbidden - this record belongs to another user' },
|
|
78
|
+
{ status: 403 }
|
|
79
|
+
)
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Update the related entity information
|
|
83
|
+
await AIHistoryService.updateRelatedEntity(
|
|
84
|
+
historyId,
|
|
85
|
+
relatedEntityType,
|
|
86
|
+
relatedEntityId
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
return NextResponse.json({
|
|
90
|
+
success: true,
|
|
91
|
+
message: 'AI history record linked to entity successfully',
|
|
92
|
+
data: {
|
|
93
|
+
historyId,
|
|
94
|
+
relatedEntityType,
|
|
95
|
+
relatedEntityId
|
|
96
|
+
}
|
|
97
|
+
})
|
|
98
|
+
|
|
99
|
+
} catch (error) {
|
|
100
|
+
console.error('Error updating AI history related entity:', error)
|
|
101
|
+
|
|
102
|
+
const errorMessage = error instanceof Error ? error.message : 'Failed to update AI history record'
|
|
103
|
+
|
|
104
|
+
return NextResponse.json(
|
|
105
|
+
{
|
|
106
|
+
error: 'Internal server error',
|
|
107
|
+
details: errorMessage
|
|
108
|
+
},
|
|
109
|
+
{ status: 500 }
|
|
110
|
+
)
|
|
111
|
+
}
|
|
112
|
+
}
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Embeddings Endpoint
|
|
3
|
+
*
|
|
4
|
+
* Generate text embeddings using OpenAI's text-embedding-3-small model
|
|
5
|
+
* Accessible via: /api/v1/plugin/ai/embeddings
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { NextRequest, NextResponse } from 'next/server'
|
|
9
|
+
import { validatePlugin, handleAIError } from '../../lib/core-utils'
|
|
10
|
+
import { getServerPluginConfig } from '../../lib/server-env'
|
|
11
|
+
import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
|
|
12
|
+
import { embed } from 'ai'
|
|
13
|
+
import { openai } from '@ai-sdk/openai'
|
|
14
|
+
import { z } from 'zod'
|
|
15
|
+
|
|
16
|
+
// Request validation schema
|
|
17
|
+
const EmbeddingRequestSchema = z.object({
|
|
18
|
+
text: z.string().min(1, 'Text cannot be empty').max(50000, 'Text too long')
|
|
19
|
+
})
|
|
20
|
+
|
|
21
|
+
export async function POST(request: NextRequest) {
|
|
22
|
+
try {
|
|
23
|
+
// 1. Authentication
|
|
24
|
+
const authResult = await authenticateRequest(request)
|
|
25
|
+
if (!authResult.success) {
|
|
26
|
+
return NextResponse.json({ error: 'Authentication required' }, { status: 401 })
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// 2. Validate plugin
|
|
30
|
+
const validation = await validatePlugin()
|
|
31
|
+
if (!validation.valid) {
|
|
32
|
+
return NextResponse.json({ error: validation.error }, { status: 503 })
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// 3. Parse and validate request body
|
|
36
|
+
const rawBody = await request.json()
|
|
37
|
+
const validationResult = EmbeddingRequestSchema.safeParse(rawBody)
|
|
38
|
+
|
|
39
|
+
if (!validationResult.success) {
|
|
40
|
+
return NextResponse.json({
|
|
41
|
+
error: 'Validation failed',
|
|
42
|
+
details: validationResult.error.issues
|
|
43
|
+
}, { status: 400 })
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const { text } = validationResult.data
|
|
47
|
+
|
|
48
|
+
// 4. Check OpenAI API key
|
|
49
|
+
const config = await getServerPluginConfig()
|
|
50
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
51
|
+
return NextResponse.json({
|
|
52
|
+
error: 'OpenAI API key not configured',
|
|
53
|
+
message: 'Add OPENAI_API_KEY to contents/plugins/ai/.env'
|
|
54
|
+
}, { status: 503 })
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// 5. Generate embedding using OpenAI text-embedding-3-small (same as n8n)
|
|
58
|
+
const { embedding, usage } = await embed({
|
|
59
|
+
model: openai.embedding('text-embedding-3-small'),
|
|
60
|
+
value: text
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
// 6. Return response
|
|
64
|
+
return NextResponse.json({
|
|
65
|
+
success: true,
|
|
66
|
+
embedding,
|
|
67
|
+
model: 'text-embedding-3-small',
|
|
68
|
+
dimensions: embedding.length,
|
|
69
|
+
tokens: usage?.tokens || 0,
|
|
70
|
+
userId: authResult.user!.id
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
} catch (error) {
|
|
74
|
+
console.error('AI Embeddings error:', error)
|
|
75
|
+
const errorInfo = handleAIError(error as Error)
|
|
76
|
+
return NextResponse.json(
|
|
77
|
+
{ error: errorInfo.error, message: errorInfo.message },
|
|
78
|
+
{ status: errorInfo.status }
|
|
79
|
+
)
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Get endpoint info
|
|
85
|
+
*/
|
|
86
|
+
export async function GET(): Promise<NextResponse> {
|
|
87
|
+
return NextResponse.json({
|
|
88
|
+
endpoint: '/api/v1/plugin/ai/embeddings',
|
|
89
|
+
description: 'Generate text embeddings using OpenAI',
|
|
90
|
+
|
|
91
|
+
usage: {
|
|
92
|
+
method: 'POST',
|
|
93
|
+
body: {
|
|
94
|
+
text: 'string (required) - Text to convert to embedding vector'
|
|
95
|
+
}
|
|
96
|
+
},
|
|
97
|
+
|
|
98
|
+
response: {
|
|
99
|
+
embedding: 'number[] - Vector representation (1536 dimensions)',
|
|
100
|
+
model: 'string - Model used (text-embedding-3-small)',
|
|
101
|
+
dimensions: 'number - Embedding dimensions (1536)',
|
|
102
|
+
tokens: 'number - Tokens used'
|
|
103
|
+
},
|
|
104
|
+
|
|
105
|
+
example: {
|
|
106
|
+
request: {
|
|
107
|
+
text: 'Premium wireless headphones with noise cancellation'
|
|
108
|
+
},
|
|
109
|
+
response: {
|
|
110
|
+
success: true,
|
|
111
|
+
embedding: '[1536 numbers...]',
|
|
112
|
+
model: 'text-embedding-3-small',
|
|
113
|
+
dimensions: 1536,
|
|
114
|
+
tokens: 8
|
|
115
|
+
}
|
|
116
|
+
},
|
|
117
|
+
|
|
118
|
+
model: {
|
|
119
|
+
name: 'text-embedding-3-small',
|
|
120
|
+
dimensions: 1536,
|
|
121
|
+
maxTokens: 8191,
|
|
122
|
+
cost: '$0.00002 per 1K tokens (5x cheaper than ada-002)'
|
|
123
|
+
},
|
|
124
|
+
|
|
125
|
+
setup: {
|
|
126
|
+
required: 'Add OPENAI_API_KEY to contents/plugins/ai/.env'
|
|
127
|
+
}
|
|
128
|
+
})
|
|
129
|
+
}
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI Generate Endpoint
|
|
3
|
+
*
|
|
4
|
+
* Simple AI assistant endpoint - generic and helpful
|
|
5
|
+
* Accessible via: /api/plugin/ai/generate
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { NextRequest, NextResponse } from 'next/server'
|
|
9
|
+
import { selectModel, calculateCost, validatePlugin, extractTokens, handleAIError } from '../../lib/core-utils'
|
|
10
|
+
import { getServerPluginConfig } from '../../lib/server-env'
|
|
11
|
+
import { authenticateRequest } from '@nextsparkjs/core/lib/api/auth/dual-auth'
|
|
12
|
+
import { generateText } from 'ai'
|
|
13
|
+
import { saveExampleSafely } from '../../lib/save-example'
|
|
14
|
+
import { z } from 'zod'
|
|
15
|
+
|
|
16
|
+
// Simple, generic system prompt
|
|
17
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant. Provide accurate, helpful, and well-structured responses. Be concise but thorough, and always aim to be useful to the person asking.`
|
|
18
|
+
|
|
19
|
+
// Request validation schema
|
|
20
|
+
const GenerateRequestSchema = z.object({
|
|
21
|
+
prompt: z.string().min(1, 'Prompt cannot be empty').max(10000, 'Prompt too long'),
|
|
22
|
+
model: z.string().optional(),
|
|
23
|
+
maxTokens: z.number().min(1).max(10000).optional(),
|
|
24
|
+
temperature: z.number().min(0).max(1).optional(),
|
|
25
|
+
saveExample: z.boolean().optional().default(false)
|
|
26
|
+
})
|
|
27
|
+
|
|
28
|
+
export async function POST(request: NextRequest) {
|
|
29
|
+
try {
|
|
30
|
+
// 1. Authentication
|
|
31
|
+
const authResult = await authenticateRequest(request)
|
|
32
|
+
if (!authResult.success) {
|
|
33
|
+
return NextResponse.json({ error: 'Authentication required' }, { status: 401 })
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// 2. Validate plugin
|
|
37
|
+
const validation = await validatePlugin()
|
|
38
|
+
if (!validation.valid) {
|
|
39
|
+
return NextResponse.json({ error: validation.error }, { status: 503 })
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// 3. Get default configuration from environment
|
|
43
|
+
const config = await getServerPluginConfig()
|
|
44
|
+
|
|
45
|
+
// 4. Parse and validate request body
|
|
46
|
+
const rawBody = await request.json()
|
|
47
|
+
const validationResult = GenerateRequestSchema.safeParse(rawBody)
|
|
48
|
+
|
|
49
|
+
if (!validationResult.success) {
|
|
50
|
+
return NextResponse.json({
|
|
51
|
+
error: 'Validation failed',
|
|
52
|
+
details: validationResult.error.issues
|
|
53
|
+
}, { status: 400 })
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const {
|
|
57
|
+
prompt,
|
|
58
|
+
model = config.defaultModel,
|
|
59
|
+
maxTokens = config.maxTokens,
|
|
60
|
+
temperature = config.defaultTemperature,
|
|
61
|
+
saveExample = false
|
|
62
|
+
} = validationResult.data
|
|
63
|
+
|
|
64
|
+
// 5. Select model
|
|
65
|
+
const selectedModel = await selectModel(model)
|
|
66
|
+
|
|
67
|
+
// 6. Generate AI response
|
|
68
|
+
const result = await generateText({
|
|
69
|
+
model: selectedModel.model,
|
|
70
|
+
system: SYSTEM_PROMPT,
|
|
71
|
+
prompt,
|
|
72
|
+
maxOutputTokens: maxTokens,
|
|
73
|
+
temperature
|
|
74
|
+
})
|
|
75
|
+
|
|
76
|
+
// 7. Calculate metrics
|
|
77
|
+
const tokens = extractTokens(result)
|
|
78
|
+
const cost = calculateCost(tokens, selectedModel.costConfig)
|
|
79
|
+
|
|
80
|
+
// 8. Save example if requested (opt-in)
|
|
81
|
+
if (saveExample) {
|
|
82
|
+
await saveExampleSafely(
|
|
83
|
+
{
|
|
84
|
+
prompt,
|
|
85
|
+
response: result.text,
|
|
86
|
+
model: selectedModel.modelName,
|
|
87
|
+
status: 'completed',
|
|
88
|
+
metadata: {
|
|
89
|
+
tokens: tokens.total,
|
|
90
|
+
cost,
|
|
91
|
+
provider: selectedModel.provider,
|
|
92
|
+
isLocal: selectedModel.isLocal
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
authResult.user!.id
|
|
96
|
+
)
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// 9. Return response
|
|
100
|
+
return NextResponse.json({
|
|
101
|
+
success: true,
|
|
102
|
+
response: result.text,
|
|
103
|
+
model: selectedModel.modelName,
|
|
104
|
+
provider: selectedModel.provider,
|
|
105
|
+
isLocal: selectedModel.isLocal,
|
|
106
|
+
cost,
|
|
107
|
+
tokens,
|
|
108
|
+
userId: authResult.user!.id,
|
|
109
|
+
exampleSaved: saveExample
|
|
110
|
+
})
|
|
111
|
+
|
|
112
|
+
} catch (error) {
|
|
113
|
+
console.error('AI Generate error:', error)
|
|
114
|
+
const errorInfo = handleAIError(error as Error)
|
|
115
|
+
return NextResponse.json(
|
|
116
|
+
{ error: errorInfo.error, message: errorInfo.message },
|
|
117
|
+
{ status: errorInfo.status }
|
|
118
|
+
)
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Get endpoint info
|
|
124
|
+
*/
|
|
125
|
+
export async function GET(): Promise<NextResponse> {
|
|
126
|
+
const config = await getServerPluginConfig()
|
|
127
|
+
|
|
128
|
+
return NextResponse.json({
|
|
129
|
+
endpoint: '/api/plugin/ai/generate',
|
|
130
|
+
description: 'Simple AI assistant endpoint',
|
|
131
|
+
|
|
132
|
+
usage: {
|
|
133
|
+
method: 'POST',
|
|
134
|
+
body: {
|
|
135
|
+
prompt: 'string (required) - Your question or request',
|
|
136
|
+
model: `string (optional, default: ${config.defaultModel}) - AI model to use`,
|
|
137
|
+
maxTokens: `number (optional, default: ${config.maxTokens}) - Max response length`,
|
|
138
|
+
temperature: `number (optional, default: ${config.defaultTemperature}) - Response creativity (0-1)`,
|
|
139
|
+
saveExample: 'boolean (optional, default: false) - Save interaction as example (opt-in, data will be sanitized)'
|
|
140
|
+
}
|
|
141
|
+
},
|
|
142
|
+
|
|
143
|
+
example: {
|
|
144
|
+
prompt: 'Explain quantum computing in simple terms',
|
|
145
|
+
model: config.defaultModel
|
|
146
|
+
},
|
|
147
|
+
|
|
148
|
+
models: {
|
|
149
|
+
local: ['llama3.2:3b', 'llama3.2', 'llama3.1', 'qwen2.5', 'mistral'],
|
|
150
|
+
openai: ['gpt-4o', 'gpt-4o-mini', 'gpt-3.5-turbo'],
|
|
151
|
+
anthropic: ['claude-3-5-sonnet-20241022', 'claude-3-5-haiku-20241022']
|
|
152
|
+
},
|
|
153
|
+
|
|
154
|
+
setup: {
|
|
155
|
+
local: `ollama serve && ollama pull ${config.ollamaDefaultModel}`,
|
|
156
|
+
openai: 'Add OPENAI_API_KEY to contents/plugins/ai/.env',
|
|
157
|
+
anthropic: 'Add ANTHROPIC_API_KEY to contents/plugins/ai/.env'
|
|
158
|
+
}
|
|
159
|
+
})
|
|
160
|
+
}
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
# AI Plugin Introduction
|
|
2
|
+
|
|
3
|
+
## Overview
|
|
4
|
+
|
|
5
|
+
The **AI Plugin** is a versatile, multi-provider AI utility system that brings powerful artificial intelligence capabilities to your SaaS application. Unlike monolithic AI solutions, this plugin provides **core utilities and example implementations** that you can extend and customize for your specific needs.
|
|
6
|
+
|
|
7
|
+
**Philosophy:**
|
|
8
|
+
- **Utility-First** - Provides reusable functions, not rigid solutions
|
|
9
|
+
- **Provider-Agnostic** - Supports OpenAI, Anthropic, and Ollama (local)
|
|
10
|
+
- **Extensible** - Build custom endpoints using provided utilities
|
|
11
|
+
- **Production-Ready** - Includes history tracking, cost calculation, and error handling
|
|
12
|
+
|
|
13
|
+
## Multi-Provider Support
|
|
14
|
+
|
|
15
|
+
### Supported Providers
|
|
16
|
+
|
|
17
|
+
**OpenAI**
|
|
18
|
+
- Models: GPT-4o, GPT-4o Mini, GPT-3.5 Turbo
|
|
19
|
+
- Best for: Production deployments, highest quality responses
|
|
20
|
+
- Cost: Pay per token (~$0.00015-0.0025 per 1K input tokens)
|
|
21
|
+
|
|
22
|
+
**Anthropic**
|
|
23
|
+
- Models: Claude 3.5 Sonnet, Claude 3.5 Haiku
|
|
24
|
+
- Best for: Complex reasoning, long context windows
|
|
25
|
+
- Cost: Pay per token (~$0.00025-0.003 per 1K input tokens)
|
|
26
|
+
|
|
27
|
+
**Ollama (Local)**
|
|
28
|
+
- Models: Llama 3.2, Llama 3.1, Qwen 2.5, Mistral, Gemma 2
|
|
29
|
+
- Best for: Development, privacy-sensitive applications, cost-free inference
|
|
30
|
+
- Cost: Free (runs on your hardware)
|
|
31
|
+
|
|
32
|
+
## Key Use Cases
|
|
33
|
+
|
|
34
|
+
### 1. Content Generation
|
|
35
|
+
|
|
36
|
+
Generate high-quality content for various purposes:
|
|
37
|
+
- **Marketing Copy** - Product descriptions, ad copy, landing pages
|
|
38
|
+
- **Blog Posts** - Articles, tutorials, documentation
|
|
39
|
+
- **Email Campaigns** - Personalized emails, newsletters
|
|
40
|
+
- **Social Media** - Posts, captions, engagement content
|
|
41
|
+
|
|
42
|
+
**Already in use for:** Content creation workflows, automated copywriting
|
|
43
|
+
|
|
44
|
+
### 2. AI-Powered Auditing
|
|
45
|
+
|
|
46
|
+
Analyze and audit content using AI:
|
|
47
|
+
- **Content Analysis** - Quality checks, tone analysis, compliance
|
|
48
|
+
- **Data Validation** - Structured data extraction and verification
|
|
49
|
+
- **Report Generation** - Automated insights and summaries
|
|
50
|
+
- **Quality Assurance** - Consistency checks across content
|
|
51
|
+
|
|
52
|
+
**Already in use for:** Content auditing workflows, quality control
|
|
53
|
+
|
|
54
|
+
### 3. Semantic Search with Embeddings
|
|
55
|
+
|
|
56
|
+
Enable meaning-based search and recommendations:
|
|
57
|
+
- **Semantic Search** - Find content by meaning, not just keywords
|
|
58
|
+
- **Content Recommendations** - Similar content suggestions
|
|
59
|
+
- **Similarity Matching** - Duplicate detection, content clustering
|
|
60
|
+
- **RAG (Retrieval Augmented Generation)** - Context-aware AI responses
|
|
61
|
+
|
|
62
|
+
**Already in use for:** Embedding generation for search systems
|
|
63
|
+
|
|
64
|
+
### 4. Custom AI Workflows
|
|
65
|
+
|
|
66
|
+
Build specialized AI features:
|
|
67
|
+
- **Classification** - Categorize content, sentiment analysis
|
|
68
|
+
- **Summarization** - Extract key points from long text
|
|
69
|
+
- **Translation** - Multi-language support
|
|
70
|
+
- **Custom Analysis** - Domain-specific AI operations
|
|
71
|
+
|
|
72
|
+
## Architecture
|
|
73
|
+
|
|
74
|
+
### Core Components
|
|
75
|
+
|
|
76
|
+
```
|
|
77
|
+
contents/plugins/ai/
|
|
78
|
+
├── lib/
|
|
79
|
+
│ ├── core-utils.ts # Core utility functions
|
|
80
|
+
│ ├── ai-history-service.ts # History tracking
|
|
81
|
+
│ └── server-env.ts # Configuration
|
|
82
|
+
│
|
|
83
|
+
├── api/
|
|
84
|
+
│ ├── generate/ # Text generation endpoint
|
|
85
|
+
│ ├── embeddings/ # Embeddings endpoint
|
|
86
|
+
│ └── ai-history/ # History management
|
|
87
|
+
│
|
|
88
|
+
├── entities/
|
|
89
|
+
│ └── ai-history/ # AI History entity
|
|
90
|
+
│
|
|
91
|
+
├── types/
|
|
92
|
+
│ └── ai.types.ts # TypeScript types
|
|
93
|
+
│
|
|
94
|
+
└── plugin.config.ts # Plugin configuration
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Core Utilities (`core-utils.ts`)
|
|
98
|
+
|
|
99
|
+
**Primary Functions:**
|
|
100
|
+
- `selectModel()` - Automatically select and configure AI models
|
|
101
|
+
- `calculateCost()` - Track token usage and costs
|
|
102
|
+
- `validatePlugin()` - Ensure plugin is properly configured
|
|
103
|
+
- `extractTokens()` - Extract token usage from AI responses
|
|
104
|
+
- `handleAIError()` - Consistent error handling across providers
|
|
105
|
+
- `COST_CONFIG` - Up-to-date pricing for all models
|
|
106
|
+
|
|
107
|
+
### Example Endpoints
|
|
108
|
+
|
|
109
|
+
**`/api/plugin/ai/generate`**
|
|
110
|
+
- General-purpose text generation
|
|
111
|
+
- Supports all providers and models
|
|
112
|
+
- Includes cost tracking and history
|
|
113
|
+
- Flexible system prompts and parameters
|
|
114
|
+
|
|
115
|
+
**`/api/plugin/ai/embeddings`**
|
|
116
|
+
- Generate semantic embeddings
|
|
117
|
+
- Uses OpenAI text-embedding-3-small (1536 dimensions)
|
|
118
|
+
- Optimized for search and recommendations
|
|
119
|
+
|
|
120
|
+
### History Tracking System
|
|
121
|
+
|
|
122
|
+
**AI History Entity** tracks every AI operation:
|
|
123
|
+
- **Audit Trail** - Complete record of all AI interactions
|
|
124
|
+
- **Cost Tracking** - Token usage and estimated costs
|
|
125
|
+
- **Performance Monitoring** - Response times and success rates
|
|
126
|
+
- **Entity Linking** - Connect AI operations to application entities
|
|
127
|
+
- **Metadata Support** - Store custom operation data
|
|
128
|
+
|
|
129
|
+
### Vercel AI SDK Integration
|
|
130
|
+
|
|
131
|
+
Built on the [Vercel AI SDK](https://sdk.vercel.ai/):
|
|
132
|
+
- **Unified API** - Consistent interface across providers
|
|
133
|
+
- **Streaming Support** - Real-time response streaming
|
|
134
|
+
- **Type Safety** - Full TypeScript support
|
|
135
|
+
- **Error Handling** - Robust error management
|
|
136
|
+
|
|
137
|
+
## Versatility and Extensibility
|
|
138
|
+
|
|
139
|
+
### Why This Plugin is Different
|
|
140
|
+
|
|
141
|
+
**Not a Chatbot Plugin:**
|
|
142
|
+
- Doesn't force you into a specific UI or chat interface
|
|
143
|
+
- Provides building blocks, not finished products
|
|
144
|
+
- You decide how to use AI in your application
|
|
145
|
+
|
|
146
|
+
**Utility-Based Design:**
|
|
147
|
+
- Import core functions into your own endpoints
|
|
148
|
+
- Build custom workflows using provided utilities
|
|
149
|
+
- Extend with your own logic and business rules
|
|
150
|
+
|
|
151
|
+
### Building Custom Endpoints
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
// Your custom endpoint using plugin utilities
|
|
155
|
+
import { selectModel, calculateCost, extractTokens } from '@/contents/plugins/ai/lib/core-utils'
|
|
156
|
+
import { generateText } from 'ai'
|
|
157
|
+
|
|
158
|
+
export async function POST(request: Request) {
|
|
159
|
+
// Your custom logic here
|
|
160
|
+
const { prompt, model } = await request.json()
|
|
161
|
+
|
|
162
|
+
// Use plugin utilities
|
|
163
|
+
const selectedModel = await selectModel(model)
|
|
164
|
+
const result = await generateText({
|
|
165
|
+
model: selectedModel.model,
|
|
166
|
+
prompt: `Custom system prompt\n\n${prompt}`
|
|
167
|
+
})
|
|
168
|
+
|
|
169
|
+
const tokens = extractTokens(result)
|
|
170
|
+
const cost = calculateCost(tokens, selectedModel.costConfig)
|
|
171
|
+
|
|
172
|
+
return Response.json({
|
|
173
|
+
response: result.text,
|
|
174
|
+
cost,
|
|
175
|
+
tokens
|
|
176
|
+
})
|
|
177
|
+
}
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## Real-World Applications
|
|
181
|
+
|
|
182
|
+
### Content Generation Platform
|
|
183
|
+
|
|
184
|
+
**Use Case:** Automated marketing content creation
|
|
185
|
+
**Implementation:** Custom endpoint using `generate` with specialized prompts
|
|
186
|
+
**Result:** 10x faster content production with consistent quality
|
|
187
|
+
|
|
188
|
+
### AI-Powered CMS
|
|
189
|
+
|
|
190
|
+
**Use Case:** Content auditing and quality control
|
|
191
|
+
**Implementation:** Analysis workflows using custom prompts and history tracking
|
|
192
|
+
**Result:** Automated quality checks across thousands of content pieces
|
|
193
|
+
|
|
194
|
+
### Semantic Product Search
|
|
195
|
+
|
|
196
|
+
**Use Case:** E-commerce search by description
|
|
197
|
+
**Implementation:** Embeddings generation + vector database integration
|
|
198
|
+
**Result:** Customers find products by describing what they want
|
|
199
|
+
|
|
200
|
+
## Getting Started
|
|
201
|
+
|
|
202
|
+
### Quick Setup
|
|
203
|
+
|
|
204
|
+
1. **Enable Plugin** - Activate in theme configuration
|
|
205
|
+
2. **Configure Provider** - Set up API keys or Ollama
|
|
206
|
+
3. **Test Endpoint** - Try the generate endpoint
|
|
207
|
+
4. **Build Custom Features** - Use utilities for your use cases
|
|
208
|
+
|
|
209
|
+
### Next Steps
|
|
210
|
+
|
|
211
|
+
- **[Installation](./02-installation.md)** - Set up the plugin
|
|
212
|
+
- **[Configuration](./03-configuration.md)** - Configure providers and settings
|
|
213
|
+
- **[Text Generation](../02-features/01-text-generation.md)** - Start generating content
|
|
214
|
+
- **[Core Utilities](../04-advanced-usage/01-core-utilities.md)** - Build custom endpoints
|
|
215
|
+
|
|
216
|
+
## Key Features Summary
|
|
217
|
+
|
|
218
|
+
✅ **Multi-Provider Support** - OpenAI, Anthropic, Ollama
|
|
219
|
+
✅ **Text Generation** - Flexible content creation
|
|
220
|
+
✅ **Embeddings** - Semantic search capabilities
|
|
221
|
+
✅ **History Tracking** - Complete audit trail
|
|
222
|
+
✅ **Cost Calculation** - Automatic usage tracking
|
|
223
|
+
✅ **Error Handling** - Robust error management
|
|
224
|
+
✅ **TypeScript** - Full type safety
|
|
225
|
+
✅ **Extensible** - Build custom endpoints
|
|
226
|
+
✅ **Production-Ready** - Used in real applications
|
|
227
|
+
|
|
228
|
+
## Philosophy
|
|
229
|
+
|
|
230
|
+
This plugin is designed for developers who want:
|
|
231
|
+
- **Control** - Build exactly what you need
|
|
232
|
+
- **Flexibility** - Choose your provider and model
|
|
233
|
+
- **Simplicity** - Clean utilities without bloat
|
|
234
|
+
- **Performance** - Efficient, well-tested code
|
|
235
|
+
- **Maintainability** - Clear patterns and documentation
|
|
236
|
+
|
|
237
|
+
It's not a black box. It's a toolkit.
|