@nextsparkjs/plugin-ai 0.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +79 -0
- package/README.md +529 -0
- package/api/README.md +65 -0
- package/api/ai-history/[id]/route.ts +112 -0
- package/api/embeddings/route.ts +129 -0
- package/api/generate/route.ts +160 -0
- package/docs/01-getting-started/01-introduction.md +237 -0
- package/docs/01-getting-started/02-installation.md +447 -0
- package/docs/01-getting-started/03-configuration.md +416 -0
- package/docs/02-features/01-text-generation.md +523 -0
- package/docs/02-features/02-embeddings.md +241 -0
- package/docs/02-features/03-ai-history.md +549 -0
- package/docs/03-advanced-usage/01-core-utilities.md +500 -0
- package/docs/04-use-cases/01-content-generation.md +453 -0
- package/entities/ai-history/ai-history.config.ts +123 -0
- package/entities/ai-history/ai-history.fields.ts +330 -0
- package/entities/ai-history/messages/en.json +56 -0
- package/entities/ai-history/messages/es.json +56 -0
- package/entities/ai-history/migrations/001_ai_history_table.sql +167 -0
- package/entities/ai-history/migrations/002_ai_history_metas.sql +103 -0
- package/lib/ai-history-meta-service.ts +379 -0
- package/lib/ai-history-service.ts +391 -0
- package/lib/ai-sdk.ts +7 -0
- package/lib/core-utils.ts +217 -0
- package/lib/plugin-env.ts +252 -0
- package/lib/sanitize.ts +122 -0
- package/lib/save-example.ts +237 -0
- package/lib/server-env.ts +104 -0
- package/package.json +23 -0
- package/plugin.config.ts +55 -0
- package/public/docs/login-404-error.png +0 -0
- package/tsconfig.json +47 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/types/ai.types.ts +51 -0
package/.env.example
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# ============================================
|
|
2
|
+
# AI PLUGIN CONFIGURATION
|
|
3
|
+
# ============================================
|
|
4
|
+
#
|
|
5
|
+
# Copy this file to .env and configure your AI provider credentials
|
|
6
|
+
# This file is loaded automatically by the AI plugin
|
|
7
|
+
#
|
|
8
|
+
# ============================================
|
|
9
|
+
|
|
10
|
+
# ===========================
|
|
11
|
+
# AI Provider Credentials
|
|
12
|
+
# ===========================
|
|
13
|
+
|
|
14
|
+
# Anthropic API Key for Claude models
|
|
15
|
+
# Get your key from: https://console.anthropic.com/
|
|
16
|
+
ANTHROPIC_API_KEY=your-anthropic-key-here
|
|
17
|
+
|
|
18
|
+
# OpenAI API Key for GPT models
|
|
19
|
+
# Get your key from: https://platform.openai.com/
|
|
20
|
+
OPENAI_API_KEY=your-openai-key-here
|
|
21
|
+
|
|
22
|
+
# ===========================
|
|
23
|
+
# AI Provider Selection
|
|
24
|
+
# ===========================
|
|
25
|
+
|
|
26
|
+
# Use Local AI (Ollama) by default - set to false to use cloud models
|
|
27
|
+
USE_LOCAL_AI=false
|
|
28
|
+
|
|
29
|
+
# Default model when using cloud providers
|
|
30
|
+
DEFAULT_CLOUD_MODEL=claude-sonnet-4-5-20250929
|
|
31
|
+
|
|
32
|
+
# ===========================
|
|
33
|
+
# Ollama Configuration (local models)
|
|
34
|
+
# ===========================
|
|
35
|
+
|
|
36
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
37
|
+
OLLAMA_DEFAULT_MODEL=llama3.2:3b
|
|
38
|
+
|
|
39
|
+
# ===========================
|
|
40
|
+
# AI Plugin Configuration
|
|
41
|
+
# ===========================
|
|
42
|
+
|
|
43
|
+
# Enable/disable the AI plugin
|
|
44
|
+
AI_PLUGIN_ENABLED=true
|
|
45
|
+
|
|
46
|
+
# Enable debug logging for development
|
|
47
|
+
AI_PLUGIN_DEBUG=false
|
|
48
|
+
|
|
49
|
+
# Default provider preference (anthropic, openai, ollama)
|
|
50
|
+
AI_PLUGIN_DEFAULT_PROVIDER=anthropic
|
|
51
|
+
|
|
52
|
+
# Maximum tokens for AI generation
|
|
53
|
+
AI_PLUGIN_MAX_TOKENS=4000
|
|
54
|
+
|
|
55
|
+
# Default temperature for AI generation (0.0-1.0)
|
|
56
|
+
AI_PLUGIN_DEFAULT_TEMPERATURE=0.7
|
|
57
|
+
|
|
58
|
+
# ===========================
|
|
59
|
+
# Cost Tracking
|
|
60
|
+
# ===========================
|
|
61
|
+
|
|
62
|
+
# Enable cost tracking and monitoring
|
|
63
|
+
AI_PLUGIN_COST_TRACKING_ENABLED=true
|
|
64
|
+
|
|
65
|
+
# Daily cost limit (USD)
|
|
66
|
+
AI_PLUGIN_DAILY_COST_LIMIT=10.00
|
|
67
|
+
|
|
68
|
+
# Monthly cost limit (USD)
|
|
69
|
+
AI_PLUGIN_MONTHLY_COST_LIMIT=100.00
|
|
70
|
+
|
|
71
|
+
# ===========================
|
|
72
|
+
# Rate Limiting
|
|
73
|
+
# ===========================
|
|
74
|
+
|
|
75
|
+
# Maximum requests per minute
|
|
76
|
+
AI_PLUGIN_RATE_LIMIT_REQUESTS_PER_MINUTE=60
|
|
77
|
+
|
|
78
|
+
# Maximum tokens per minute
|
|
79
|
+
AI_PLUGIN_RATE_LIMIT_TOKENS_PER_MINUTE=50000
|
package/README.md
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
1
|
+
# AI Plugin
|
|
2
|
+
|
|
3
|
+
Plugin empresarial de IA con soporte multi-modelo, generación de contenido, y características de seguridad. Diseñado para integrarse dinámicamente con el sistema de entidades y plugins.
|
|
4
|
+
|
|
5
|
+
## Características
|
|
6
|
+
|
|
7
|
+
### 🤖 Soporte Multi-Modelo
|
|
8
|
+
- **Locales**: Ollama, LM Studio, LocalAI
|
|
9
|
+
- **Cloud**: OpenAI, Anthropic, Groq
|
|
10
|
+
- **Selección automática** basada en casos de uso
|
|
11
|
+
- **Fallback estratégico** cuando los modelos no están disponibles
|
|
12
|
+
|
|
13
|
+
### 🎯 Generación de Contenido
|
|
14
|
+
- **Templates flexibles** para diferentes tipos de contenido
|
|
15
|
+
- **Optimización por plataforma** (Twitter, Instagram, LinkedIn, etc.)
|
|
16
|
+
- **Generación de variantes** automática
|
|
17
|
+
- **Sugerencias basadas en IA**
|
|
18
|
+
|
|
19
|
+
### 🛡️ Características de Seguridad
|
|
20
|
+
- **Detección de PII** automática
|
|
21
|
+
- **Filtros de contenido** tóxico
|
|
22
|
+
- **Enmascaramiento de datos** sensibles
|
|
23
|
+
- **Cumplimiento GDPR/CCPA**
|
|
24
|
+
|
|
25
|
+
### 💰 Gestión de Costos
|
|
26
|
+
- **Seguimiento de tokens** en tiempo real
|
|
27
|
+
- **Estimación de costos** por solicitud
|
|
28
|
+
- **Límites de cuota** configurables
|
|
29
|
+
- **Rate limiting** dinámico
|
|
30
|
+
|
|
31
|
+
### 🔌 Integración con Entidades
|
|
32
|
+
- **Procesamiento dinámico** de cualquier tipo de entidad
|
|
33
|
+
- **Análisis automático** de contenido
|
|
34
|
+
- **Mejora inteligente** de datos
|
|
35
|
+
- **Sugerencias contextuales**
|
|
36
|
+
|
|
37
|
+
## Instalación y Configuración
|
|
38
|
+
|
|
39
|
+
### ⭐ Nuevo: Variables de Entorno a Nivel de Plugin
|
|
40
|
+
|
|
41
|
+
**¡Ahora puedes configurar el plugin con variables de entorno específicas!**
|
|
42
|
+
|
|
43
|
+
1. **Copia el archivo de ejemplo:**
|
|
44
|
+
```bash
|
|
45
|
+
cp contents/plugins/ai/.env.example contents/plugins/ai/.env
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
2. **Configura tus claves API:**
|
|
49
|
+
```env
|
|
50
|
+
# API Keys
|
|
51
|
+
ANTHROPIC_API_KEY=sk-ant-api03-tu_clave_anthropic
|
|
52
|
+
OPENAI_API_KEY=sk-tu_clave_openai
|
|
53
|
+
|
|
54
|
+
# Configuración del Plugin
|
|
55
|
+
AI_PLUGIN_ENABLED=true
|
|
56
|
+
AI_PLUGIN_DEBUG=false
|
|
57
|
+
AI_PLUGIN_DEFAULT_PROVIDER=ollama
|
|
58
|
+
|
|
59
|
+
# Ollama (Modelos Locales)
|
|
60
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
61
|
+
OLLAMA_DEFAULT_MODEL=llama3.2:3b
|
|
62
|
+
|
|
63
|
+
# Gestión de Costos
|
|
64
|
+
AI_PLUGIN_COST_TRACKING_ENABLED=true
|
|
65
|
+
AI_PLUGIN_DAILY_COST_LIMIT=10.00
|
|
66
|
+
AI_PLUGIN_MONTHLY_COST_LIMIT=100.00
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Ventajas de la Configuración a Nivel de Plugin
|
|
70
|
+
|
|
71
|
+
- ✅ **Aislamiento**: Configuración específica del plugin sin contaminar `.env` principal
|
|
72
|
+
- ✅ **Modularidad**: Cada plugin maneja sus propias variables de entorno
|
|
73
|
+
- ✅ **Seguridad**: Las claves API están contenidas en el ámbito del plugin
|
|
74
|
+
- ✅ **Flexibilidad**: Diferentes plugins pueden usar diferentes claves API
|
|
75
|
+
- ✅ **Fallback**: Recurre a variables de entorno del sistema si no existe `.env` del plugin
|
|
76
|
+
|
|
77
|
+
### Variables de Entorno del Sistema (Método Anterior)
|
|
78
|
+
|
|
79
|
+
También puedes usar variables de entorno globales:
|
|
80
|
+
|
|
81
|
+
```env
|
|
82
|
+
# OpenAI
|
|
83
|
+
OPENAI_API_KEY=tu_clave_openai
|
|
84
|
+
|
|
85
|
+
# Anthropic
|
|
86
|
+
ANTHROPIC_API_KEY=tu_clave_anthropic
|
|
87
|
+
|
|
88
|
+
# Groq
|
|
89
|
+
GROQ_API_KEY=tu_clave_groq
|
|
90
|
+
|
|
91
|
+
# Configuración local (opcional)
|
|
92
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
93
|
+
LM_STUDIO_BASE_URL=http://localhost:1234
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### Configuración del Plugin
|
|
97
|
+
|
|
98
|
+
El plugin se registra automáticamente en el sistema y carga su configuración desde:
|
|
99
|
+
|
|
100
|
+
1. **Primera prioridad**: `contents/plugins/ai/.env` (archivo específico del plugin)
|
|
101
|
+
2. **Segunda prioridad**: Variables de entorno del sistema (`process.env`)
|
|
102
|
+
3. **Tercera prioridad**: Valores por defecto incorporados
|
|
103
|
+
|
|
104
|
+
## Uso
|
|
105
|
+
|
|
106
|
+
### 1. Componentes React
|
|
107
|
+
|
|
108
|
+
#### AIProvider
|
|
109
|
+
Proveedor de contexto para toda la funcionalidad de IA:
|
|
110
|
+
|
|
111
|
+
\`\`\`tsx
|
|
112
|
+
import { AIProvider } from '@/contents/plugins/ai/components/AIProvider'
|
|
113
|
+
|
|
114
|
+
function App() {
|
|
115
|
+
return (
|
|
116
|
+
<AIProvider
|
|
117
|
+
userId="user123"
|
|
118
|
+
preferLocal={false}
|
|
119
|
+
autoSelect={true}
|
|
120
|
+
>
|
|
121
|
+
<YourApp />
|
|
122
|
+
</AIProvider>
|
|
123
|
+
)
|
|
124
|
+
}
|
|
125
|
+
\`\`\`
|
|
126
|
+
|
|
127
|
+
#### AIChat
|
|
128
|
+
Interfaz de chat con IA integrada:
|
|
129
|
+
|
|
130
|
+
\`\`\`tsx
|
|
131
|
+
import { AIChat } from '@/contents/plugins/ai/components/AIChat'
|
|
132
|
+
|
|
133
|
+
function ChatPage() {
|
|
134
|
+
return (
|
|
135
|
+
<AIChat
|
|
136
|
+
entityContext={{
|
|
137
|
+
type: 'task',
|
|
138
|
+
id: 'task_123',
|
|
139
|
+
data: { title: 'Proyecto importante' }
|
|
140
|
+
}}
|
|
141
|
+
contentType="custom"
|
|
142
|
+
showModelSelector={true}
|
|
143
|
+
onEntityProcessed={(result) => {
|
|
144
|
+
console.log('Entidad procesada:', result)
|
|
145
|
+
}}
|
|
146
|
+
/>
|
|
147
|
+
)
|
|
148
|
+
}
|
|
149
|
+
\`\`\`
|
|
150
|
+
|
|
151
|
+
#### ContentGenerator
|
|
152
|
+
Generador de contenido con templates:
|
|
153
|
+
|
|
154
|
+
\`\`\`tsx
|
|
155
|
+
import { ContentGenerator } from '@/contents/plugins/ai/components/ContentGenerator'
|
|
156
|
+
|
|
157
|
+
function ContentPage() {
|
|
158
|
+
return (
|
|
159
|
+
<ContentGenerator
|
|
160
|
+
defaultContentType="social"
|
|
161
|
+
showTemplates={true}
|
|
162
|
+
autoSuggestions={true}
|
|
163
|
+
onContentGenerated={(content) => {
|
|
164
|
+
console.log('Contenido generado:', content)
|
|
165
|
+
}}
|
|
166
|
+
/>
|
|
167
|
+
)
|
|
168
|
+
}
|
|
169
|
+
\`\`\`
|
|
170
|
+
|
|
171
|
+
### 2. Hooks
|
|
172
|
+
|
|
173
|
+
#### useAI
|
|
174
|
+
Hook principal para funcionalidad de IA:
|
|
175
|
+
|
|
176
|
+
\`\`\`tsx
|
|
177
|
+
import { useAI } from '@/contents/plugins/ai/hooks/useAI'
|
|
178
|
+
|
|
179
|
+
function MyComponent() {
|
|
180
|
+
const ai = useAI({
|
|
181
|
+
userId: 'user123',
|
|
182
|
+
autoCheck: true
|
|
183
|
+
})
|
|
184
|
+
|
|
185
|
+
const handleGenerate = async () => {
|
|
186
|
+
const result = await ai.generateText('Escribe un email profesional')
|
|
187
|
+
console.log(result)
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
const handleAnalyze = async () => {
|
|
191
|
+
const analysis = await ai.analyze('Este es mi contenido')
|
|
192
|
+
console.log(analysis)
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
return (
|
|
196
|
+
<div>
|
|
197
|
+
<button onClick={handleGenerate}>Generar</button>
|
|
198
|
+
<button onClick={handleAnalyze}>Analizar</button>
|
|
199
|
+
</div>
|
|
200
|
+
)
|
|
201
|
+
}
|
|
202
|
+
\`\`\`
|
|
203
|
+
|
|
204
|
+
#### useContentGeneration
|
|
205
|
+
Hook especializado para generación de contenido:
|
|
206
|
+
|
|
207
|
+
\`\`\`tsx
|
|
208
|
+
import { useContentGeneration } from '@/contents/plugins/ai/hooks/useContentGeneration'
|
|
209
|
+
|
|
210
|
+
function ContentComponent() {
|
|
211
|
+
const content = useContentGeneration({
|
|
212
|
+
defaultContentType: 'blog',
|
|
213
|
+
autoSuggestions: true
|
|
214
|
+
})
|
|
215
|
+
|
|
216
|
+
const handleGenerate = async () => {
|
|
217
|
+
const result = await content.generateContent(
|
|
218
|
+
'Escribe sobre tecnología',
|
|
219
|
+
{
|
|
220
|
+
platform: 'linkedin',
|
|
221
|
+
tone: 'profesional',
|
|
222
|
+
audience: 'desarrolladores'
|
|
223
|
+
}
|
|
224
|
+
)
|
|
225
|
+
console.log(result)
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
return <button onClick={handleGenerate}>Generar Contenido</button>
|
|
229
|
+
}
|
|
230
|
+
\`\`\`
|
|
231
|
+
|
|
232
|
+
### 3. API REST
|
|
233
|
+
|
|
234
|
+
#### ⭐ Nuevo: Generar Contenido Multi-Proveedor
|
|
235
|
+
|
|
236
|
+
**Usando Anthropic Claude (configuración del plugin):**
|
|
237
|
+
\`\`\`bash
|
|
238
|
+
POST /api/plugin/ai/generate
|
|
239
|
+
Content-Type: application/json
|
|
240
|
+
|
|
241
|
+
{
|
|
242
|
+
"prompt": "Explica la computación cuántica en términos simples",
|
|
243
|
+
"model": "claude-3-5-haiku-20241022",
|
|
244
|
+
"contentType": "blog",
|
|
245
|
+
"maxTokens": 500
|
|
246
|
+
}
|
|
247
|
+
\`\`\`
|
|
248
|
+
|
|
249
|
+
**Usando OpenAI GPT (configuración del plugin):**
|
|
250
|
+
\`\`\`bash
|
|
251
|
+
POST /api/plugin/ai/generate
|
|
252
|
+
Content-Type: application/json
|
|
253
|
+
|
|
254
|
+
{
|
|
255
|
+
"prompt": "Escribe una función para invertir una cadena",
|
|
256
|
+
"model": "gpt-4o-mini",
|
|
257
|
+
"contentType": "analysis",
|
|
258
|
+
"temperature": 0.3
|
|
259
|
+
}
|
|
260
|
+
\`\`\`
|
|
261
|
+
|
|
262
|
+
**Usando Ollama Local (sin clave API requerida):**
|
|
263
|
+
\`\`\`bash
|
|
264
|
+
POST /api/plugin/ai/generate
|
|
265
|
+
Content-Type: application/json
|
|
266
|
+
|
|
267
|
+
{
|
|
268
|
+
"prompt": "Crea una publicación en redes sociales sobre sostenibilidad",
|
|
269
|
+
"model": "llama3.2:3b",
|
|
270
|
+
"contentType": "social"
|
|
271
|
+
}
|
|
272
|
+
\`\`\`
|
|
273
|
+
|
|
274
|
+
**Respuesta con seguimiento de costos en tiempo real:**
|
|
275
|
+
\`\`\`json
|
|
276
|
+
{
|
|
277
|
+
"success": true,
|
|
278
|
+
"data": {
|
|
279
|
+
"content": "Contenido generado aquí...",
|
|
280
|
+
"model": "claude-3-5-haiku-20241022",
|
|
281
|
+
"provider": "anthropic",
|
|
282
|
+
"isLocal": false,
|
|
283
|
+
"tokens": {
|
|
284
|
+
"input": 25,
|
|
285
|
+
"output": 150,
|
|
286
|
+
"total": 175
|
|
287
|
+
},
|
|
288
|
+
"cost": 0.000194,
|
|
289
|
+
"metadata": {
|
|
290
|
+
"costBreakdown": {
|
|
291
|
+
"inputTokens": 25,
|
|
292
|
+
"outputTokens": 150,
|
|
293
|
+
"inputCost": 0.00000625,
|
|
294
|
+
"outputCost": 0.0001875,
|
|
295
|
+
"totalCost": 0.000194
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
\`\`\`
|
|
301
|
+
|
|
302
|
+
#### Información de Capacidades
|
|
303
|
+
\`\`\`bash
|
|
304
|
+
GET /api/plugin/ai/generate
|
|
305
|
+
\`\`\`
|
|
306
|
+
|
|
307
|
+
#### Generar Contenido (Método Anterior)
|
|
308
|
+
\`\`\`bash
|
|
309
|
+
POST /api/v1/ai/generate
|
|
310
|
+
Content-Type: application/json
|
|
311
|
+
|
|
312
|
+
{
|
|
313
|
+
"prompt": "Escribe un tweet sobre IA",
|
|
314
|
+
"contentType": "social",
|
|
315
|
+
"platform": "twitter",
|
|
316
|
+
"tone": "casual"
|
|
317
|
+
}
|
|
318
|
+
\`\`\`
|
|
319
|
+
|
|
320
|
+
#### Analizar Contenido
|
|
321
|
+
\`\`\`bash
|
|
322
|
+
POST /api/v1/ai/analyze
|
|
323
|
+
Content-Type: application/json
|
|
324
|
+
|
|
325
|
+
{
|
|
326
|
+
"content": "Este es mi contenido para analizar",
|
|
327
|
+
"analysisType": "comprehensive"
|
|
328
|
+
}
|
|
329
|
+
\`\`\`
|
|
330
|
+
|
|
331
|
+
#### Procesar Entidad
|
|
332
|
+
\`\`\`bash
|
|
333
|
+
POST /api/v1/ai/entity
|
|
334
|
+
Content-Type: application/json
|
|
335
|
+
|
|
336
|
+
{
|
|
337
|
+
"entityType": "task",
|
|
338
|
+
"entityId": "task_123",
|
|
339
|
+
"action": "analyze",
|
|
340
|
+
"prompt": "Analiza esta tarea y sugiere mejoras"
|
|
341
|
+
}
|
|
342
|
+
\`\`\`
|
|
343
|
+
|
|
344
|
+
### 4. Integración Directa con AIAPI
|
|
345
|
+
|
|
346
|
+
Para uso directo desde el código del servidor:
|
|
347
|
+
|
|
348
|
+
\`\`\`tsx
|
|
349
|
+
import { AIAPI } from '@/contents/plugins/ai/lib/ai-api'
|
|
350
|
+
|
|
351
|
+
// Generación simple
|
|
352
|
+
const text = await AIAPI.generateText('Hola mundo')
|
|
353
|
+
|
|
354
|
+
// Generación avanzada
|
|
355
|
+
const response = await AIAPI.generateContentAdvanced({
|
|
356
|
+
prompt: 'Escribe un artículo',
|
|
357
|
+
systemPrompt: 'Eres un experto escritor',
|
|
358
|
+
options: {
|
|
359
|
+
contentType: 'blog',
|
|
360
|
+
temperature: 0.7
|
|
361
|
+
}
|
|
362
|
+
})
|
|
363
|
+
|
|
364
|
+
// Procesamiento de entidades
|
|
365
|
+
const result = await AIAPI.processEntity(
|
|
366
|
+
'user',
|
|
367
|
+
{ name: 'Juan', email: 'juan@example.com' },
|
|
368
|
+
'analyze'
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
// Verificación de seguridad
|
|
372
|
+
const safety = await AIAPI.isContentSafe('Este contenido')
|
|
373
|
+
\`\`\`
|
|
374
|
+
|
|
375
|
+
## Arquitectura
|
|
376
|
+
|
|
377
|
+
### Estructura de Archivos
|
|
378
|
+
\`\`\`
|
|
379
|
+
contents/plugins/ai/
|
|
380
|
+
├── plugin.config.ts # Configuración principal del plugin
|
|
381
|
+
├── types/
|
|
382
|
+
│ └── ai.types.ts # Definiciones de tipos TypeScript
|
|
383
|
+
├── lib/
|
|
384
|
+
│ ├── ai-api.ts # API exportable principal
|
|
385
|
+
│ ├── ai-core.ts # Funcionalidad core de IA
|
|
386
|
+
│ ├── model-selector.ts # Selección de modelos
|
|
387
|
+
│ ├── prompts.ts # Sistema de prompts modulares
|
|
388
|
+
│ ├── safety.ts # Características de seguridad
|
|
389
|
+
│ ├── cost-tracker.ts # Seguimiento de costos
|
|
390
|
+
│ └── rate-limiter.ts # Rate limiting
|
|
391
|
+
├── hooks/
|
|
392
|
+
│ ├── useAI.ts # Hook principal de IA
|
|
393
|
+
│ ├── useContentGeneration.ts # Hook de generación
|
|
394
|
+
│ └── useModelSelection.ts # Hook de selección de modelos
|
|
395
|
+
├── components/
|
|
396
|
+
│ ├── AIProvider.tsx # Proveedor de contexto React
|
|
397
|
+
│ ├── AIChat.tsx # Componente de chat
|
|
398
|
+
│ ├── ContentGenerator.tsx # Generador de contenido
|
|
399
|
+
│ ├── ModelSelector.tsx # Selector de modelos
|
|
400
|
+
│ └── UsageMonitor.tsx # Monitor de uso
|
|
401
|
+
└── api/
|
|
402
|
+
└── endpoints.ts # Endpoints dinámicos de API
|
|
403
|
+
\`\`\`
|
|
404
|
+
|
|
405
|
+
### Flujo de Datos
|
|
406
|
+
|
|
407
|
+
1. **Configuración**: El plugin se registra automáticamente en el sistema
|
|
408
|
+
2. **Inicialización**: Los modelos y servicios se configuran al activar
|
|
409
|
+
3. **Solicitud**: Los usuarios pueden usar componentes, hooks, o API
|
|
410
|
+
4. **Procesamiento**: Las solicitudes pasan por el AI Core
|
|
411
|
+
5. **Seguridad**: Se aplican filtros de contenido y PII
|
|
412
|
+
6. **Modelo**: Se selecciona el modelo óptimo automáticamente
|
|
413
|
+
7. **Generación**: Se ejecuta la solicitud de IA
|
|
414
|
+
8. **Respuesta**: Se devuelve el contenido procesado
|
|
415
|
+
9. **Seguimiento**: Se registran métricas y costos
|
|
416
|
+
|
|
417
|
+
### Integración con Entidades
|
|
418
|
+
|
|
419
|
+
El plugin puede procesar cualquier tipo de entidad dinámicamente:
|
|
420
|
+
|
|
421
|
+
\`\`\`tsx
|
|
422
|
+
// Para tareas
|
|
423
|
+
await AIAPI.processEntity('task', taskData, 'analyze')
|
|
424
|
+
|
|
425
|
+
// Para usuarios
|
|
426
|
+
await AIAPI.processEntity('user', userData, 'enhance')
|
|
427
|
+
|
|
428
|
+
// Para posts
|
|
429
|
+
await AIAPI.processEntity('post', postData, 'summarize')
|
|
430
|
+
|
|
431
|
+
// Para proyectos
|
|
432
|
+
await AIAPI.processEntity('project', projectData, 'suggest')
|
|
433
|
+
\`\`\`
|
|
434
|
+
|
|
435
|
+
### Endpoints Dinámicos
|
|
436
|
+
|
|
437
|
+
El sistema de endpoints se adapta automáticamente:
|
|
438
|
+
|
|
439
|
+
- \`/api/v1/ai/\` → Info del plugin
|
|
440
|
+
- \`/api/v1/ai/generate\` → Generación de contenido
|
|
441
|
+
- \`/api/v1/ai/analyze\` → Análisis de contenido
|
|
442
|
+
- \`/api/v1/ai/enhance\` → Mejora de contenido
|
|
443
|
+
- \`/api/v1/ai/entity\` → Procesamiento de entidades
|
|
444
|
+
- \`/api/v1/ai/models\` → Modelos disponibles
|
|
445
|
+
- \`/api/v1/ai/usage\` → Estadísticas de uso
|
|
446
|
+
- \`/api/v1/ai/health\` → Estado del plugin
|
|
447
|
+
|
|
448
|
+
## Casos de Uso
|
|
449
|
+
|
|
450
|
+
### 1. Asistente de Tareas
|
|
451
|
+
Analiza y mejora tareas automáticamente:
|
|
452
|
+
|
|
453
|
+
\`\`\`tsx
|
|
454
|
+
const enhancedTask = await AIAPI.processEntity(
|
|
455
|
+
'task',
|
|
456
|
+
{ title: 'Hacer algo', description: 'Pendiente' },
|
|
457
|
+
'enhance'
|
|
458
|
+
)
|
|
459
|
+
\`\`\`
|
|
460
|
+
|
|
461
|
+
### 2. Generación de Contenido Social
|
|
462
|
+
Crea contenido optimizado para diferentes plataformas:
|
|
463
|
+
|
|
464
|
+
\`\`\`tsx
|
|
465
|
+
const tweetContent = await content.generateContent(
|
|
466
|
+
'Lanzamiento de producto',
|
|
467
|
+
{ platform: 'twitter', tone: 'emocionante' }
|
|
468
|
+
)
|
|
469
|
+
\`\`\`
|
|
470
|
+
|
|
471
|
+
### 3. Chat con Contexto de Entidad
|
|
472
|
+
Chat que entiende el contexto de entidades específicas:
|
|
473
|
+
|
|
474
|
+
\`\`\`tsx
|
|
475
|
+
<AIChat
|
|
476
|
+
entityContext={{
|
|
477
|
+
type: 'project',
|
|
478
|
+
id: 'project_456',
|
|
479
|
+
data: projectData
|
|
480
|
+
}}
|
|
481
|
+
/>
|
|
482
|
+
\`\`\`
|
|
483
|
+
|
|
484
|
+
### 4. Análisis de Contenido
|
|
485
|
+
Analiza contenido para obtener insights:
|
|
486
|
+
|
|
487
|
+
\`\`\`tsx
|
|
488
|
+
const analysis = await ai.analyze(content, 'comprehensive')
|
|
489
|
+
// Retorna: legibilidad, sentimiento, temas, SEO, etc.
|
|
490
|
+
\`\`\`
|
|
491
|
+
|
|
492
|
+
## Contribución
|
|
493
|
+
|
|
494
|
+
Para extender el plugin:
|
|
495
|
+
|
|
496
|
+
1. **Nuevos Modelos**: Agrega configuraciones en \`model-selector.ts\`
|
|
497
|
+
2. **Nuevos Endpoints**: Registra en \`endpoints.ts\`
|
|
498
|
+
3. **Nuevos Tipos de Contenido**: Extiende \`prompts.ts\`
|
|
499
|
+
4. **Nuevos Componentes**: Agrega en \`components/\`
|
|
500
|
+
5. **Nuevos Hooks**: Implementa en \`hooks/\`
|
|
501
|
+
|
|
502
|
+
## Seguridad y Privacidad
|
|
503
|
+
|
|
504
|
+
- **Datos sensibles**: Se enmascaran automáticamente
|
|
505
|
+
- **Contenido tóxico**: Se filtra antes del procesamiento
|
|
506
|
+
- **Logs**: No se almacenan datos de usuario
|
|
507
|
+
- **Cumplimiento**: Compatible con GDPR y CCPA
|
|
508
|
+
- **Rate limiting**: Previene abuso del sistema
|
|
509
|
+
|
|
510
|
+
## Monitoreo y Métricas
|
|
511
|
+
|
|
512
|
+
El plugin incluye monitoreo completo:
|
|
513
|
+
|
|
514
|
+
- **Uso por usuario**: Tokens, costos, solicitudes
|
|
515
|
+
- **Rendimiento**: Tiempo de respuesta, tasa de éxito
|
|
516
|
+
- **Modelos**: Popularidad, efectividad
|
|
517
|
+
- **Errores**: Seguimiento y alertas
|
|
518
|
+
- **Cuotas**: Límites diarios y mensuales
|
|
519
|
+
|
|
520
|
+
## Soporte
|
|
521
|
+
|
|
522
|
+
Para problemas o preguntas:
|
|
523
|
+
|
|
524
|
+
1. Revisa los logs del plugin en la consola
|
|
525
|
+
2. Verifica la configuración de variables de entorno
|
|
526
|
+
3. Confirma que los modelos estén disponibles
|
|
527
|
+
4. Chequea los límites de cuota y rate limiting
|
|
528
|
+
|
|
529
|
+
El plugin está diseñado para ser completamente flexible y extensible, integrándose perfectamente con el sistema de entidades dinámicas.
|
package/api/README.md
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# AI Plugin - Generate Endpoint
|
|
2
|
+
|
|
3
|
+
Simple AI assistant endpoint for general assistance.
|
|
4
|
+
|
|
5
|
+
## Usage
|
|
6
|
+
|
|
7
|
+
**Endpoint:** `POST /api/plugin/ai/generate`
|
|
8
|
+
|
|
9
|
+
**Body:**
|
|
10
|
+
```json
|
|
11
|
+
{
|
|
12
|
+
"prompt": "Your question or request",
|
|
13
|
+
"model": "llama3.2:3b",
|
|
14
|
+
"maxTokens": 1000,
|
|
15
|
+
"temperature": 0.7
|
|
16
|
+
}
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
**Response:**
|
|
20
|
+
```json
|
|
21
|
+
{
|
|
22
|
+
"success": true,
|
|
23
|
+
"response": "AI response here...",
|
|
24
|
+
"model": "llama3.2:3b",
|
|
25
|
+
"provider": "ollama",
|
|
26
|
+
"isLocal": true,
|
|
27
|
+
"cost": 0,
|
|
28
|
+
"tokens": { "input": 15, "output": 42, "total": 57 },
|
|
29
|
+
"userId": "user_123"
|
|
30
|
+
}
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Examples
|
|
34
|
+
|
|
35
|
+
### Basic Chat
|
|
36
|
+
```bash
|
|
37
|
+
curl -X POST /api/plugin/ai/generate \
|
|
38
|
+
-H "Content-Type: application/json" \
|
|
39
|
+
-d '{"prompt": "Explain machine learning in simple terms"}'
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### With Custom Model
|
|
43
|
+
```bash
|
|
44
|
+
curl -X POST /api/plugin/ai/generate \
|
|
45
|
+
-H "Content-Type: application/json" \
|
|
46
|
+
-d '{
|
|
47
|
+
"prompt": "Write a short poem about coding",
|
|
48
|
+
"model": "claude-3-5-haiku-20241022",
|
|
49
|
+
"maxTokens": 200
|
|
50
|
+
}'
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Available Models
|
|
54
|
+
|
|
55
|
+
- **Local (Free):** llama3.2:3b, llama3.2, llama3.1, qwen2.5, mistral
|
|
56
|
+
- **OpenAI:** gpt-4o, gpt-4o-mini, gpt-3.5-turbo
|
|
57
|
+
- **Anthropic:** claude-3-5-sonnet-20241022, claude-3-5-haiku-20241022
|
|
58
|
+
|
|
59
|
+
## Authentication
|
|
60
|
+
|
|
61
|
+
Requires session authentication or API key with appropriate scope.
|
|
62
|
+
|
|
63
|
+
## Custom Endpoints
|
|
64
|
+
|
|
65
|
+
For specialized use cases, copy this endpoint to your `app/api/` directory and modify as needed. See `examples/app-api-examples/` for templates.
|