@agentlify/mcp-server 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,424 @@
1
+ # Production Chatbot Example
2
+
3
+ Complete implementation of a production-ready chatbot using Agentlify.
4
+
5
+ ---
6
+
7
+ ## Overview
8
+
9
+ This example shows a full-featured chatbot with:
10
+
11
+ - ✅ Context management
12
+ - ✅ Error handling & fallbacks
13
+ - ✅ Cost tracking
14
+ - ✅ Streaming responses
15
+ - ✅ Conversation history
16
+
17
+ ---
18
+
19
+ ## Setup
20
+
21
+ ```bash
22
+ npm install openai express dotenv
23
+ ```
24
+
25
+ ```bash
26
+ # .env
27
+ AGENTLIFY_API_KEY=mp_your_key
28
+ AGENTLIFY_ROUTER_ID=your_router_id
29
+ PORT=3000
30
+ ```
31
+
32
+ ---
33
+
34
+ ## Complete Implementation
35
+
36
+ ### server.js
37
+
38
+ ```javascript
39
+ const express = require('express');
40
+ const { OpenAI } = require('openai');
41
+ require('dotenv').config();
42
+
43
+ const app = express();
44
+ app.use(express.json());
45
+
46
+ // Initialize Agentlify client
47
+ const client = new OpenAI({
48
+ apiKey: process.env.AGENTLIFY_API_KEY,
49
+ baseURL: `https://agentlify.co/api/router/${process.env.AGENTLIFY_ROUTER_ID}`,
50
+ });
51
+
52
+ // In-memory conversation storage (use Redis/DB in production)
53
+ const conversations = new Map();
54
+
55
+ // Cost tracking
56
+ let totalCost = 0;
57
+ let requestCount = 0;
58
+
59
+ // Chat endpoint
60
+ app.post('/api/chat', async (req, res) => {
61
+ const { sessionId, message } = req.body;
62
+
63
+ if (!sessionId || !message) {
64
+ return res.status(400).json({ error: 'sessionId and message required' });
65
+ }
66
+
67
+ try {
68
+ // Get or create conversation history
69
+ if (!conversations.has(sessionId)) {
70
+ conversations.set(sessionId, [
71
+ {
72
+ role: 'system',
73
+ content:
74
+ 'You are a helpful, friendly assistant. Keep responses concise.',
75
+ },
76
+ ]);
77
+ }
78
+
79
+ const history = conversations.get(sessionId);
80
+ history.push({ role: 'user', content: message });
81
+
82
+ // Keep last 10 messages for context (cost control)
83
+ if (history.length > 21) {
84
+ // system + 10 pairs
85
+ history.splice(1, 2); // Remove oldest user/assistant pair
86
+ }
87
+
88
+ // Make request with timeout and retry
89
+ const response = await makeRequestWithRetry(history);
90
+
91
+ // Extract response
92
+ const assistantMessage = response.choices[0].message.content;
93
+ history.push({ role: 'assistant', content: assistantMessage });
94
+
95
+ // Track costs
96
+ const cost = response._meta?.cost || 0;
97
+ totalCost += cost;
98
+ requestCount++;
99
+
100
+ // Return response with metadata
101
+ res.json({
102
+ message: assistantMessage,
103
+ meta: {
104
+ cost: cost,
105
+ modelUsed: response._meta?.modelUsed,
106
+ latency: response._meta?.latency,
107
+ sessionCost: calculateSessionCost(sessionId),
108
+ },
109
+ });
110
+ } catch (error) {
111
+ console.error('Chat error:', error);
112
+ res.status(500).json({
113
+ error: 'Failed to process message',
114
+ details: error.message,
115
+ });
116
+ }
117
+ });
118
+
119
+ // Streaming endpoint
120
+ app.post('/api/chat/stream', async (req, res) => {
121
+ const { sessionId, message } = req.body;
122
+
123
+ if (!sessionId || !message) {
124
+ return res.status(400).json({ error: 'sessionId and message required' });
125
+ }
126
+
127
+ try {
128
+ // Setup history
129
+ if (!conversations.has(sessionId)) {
130
+ conversations.set(sessionId, [
131
+ {
132
+ role: 'system',
133
+ content: 'You are a helpful, friendly assistant.',
134
+ },
135
+ ]);
136
+ }
137
+
138
+ const history = conversations.get(sessionId);
139
+ history.push({ role: 'user', content: message });
140
+
141
+ // Set headers for streaming
142
+ res.setHeader('Content-Type', 'text/event-stream');
143
+ res.setHeader('Cache-Control', 'no-cache');
144
+ res.setHeader('Connection', 'keep-alive');
145
+
146
+ // Create stream
147
+ const stream = await client.chat.completions.create({
148
+ messages: history,
149
+ stream: true,
150
+ });
151
+
152
+ let fullResponse = '';
153
+
154
+ // Stream chunks
155
+ for await (const chunk of stream) {
156
+ const content = chunk.choices[0]?.delta?.content || '';
157
+ if (content) {
158
+ fullResponse += content;
159
+ res.write(`data: ${JSON.stringify({ content })}\n\n`);
160
+ }
161
+ }
162
+
163
+ // Save full response to history
164
+ history.push({ role: 'assistant', content: fullResponse });
165
+
166
+ res.write('data: [DONE]\n\n');
167
+ res.end();
168
+ } catch (error) {
169
+ console.error('Stream error:', error);
170
+ res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`);
171
+ res.end();
172
+ }
173
+ });
174
+
175
+ // Analytics endpoint
176
+ app.get('/api/analytics', (req, res) => {
177
+ res.json({
178
+ totalCost,
179
+ requestCount,
180
+ averageCost: requestCount > 0 ? totalCost / requestCount : 0,
181
+ activeSessions: conversations.size,
182
+ });
183
+ });
184
+
185
+ // Helper: Retry logic
186
+ async function makeRequestWithRetry(messages, maxRetries = 3) {
187
+ for (let i = 0; i < maxRetries; i++) {
188
+ try {
189
+ return await client.chat.completions.create({
190
+ messages,
191
+ max_tokens: 500, // Cost control
192
+ temperature: 0.7,
193
+ });
194
+ } catch (error) {
195
+ if (i === maxRetries - 1) throw error;
196
+ await new Promise((resolve) => setTimeout(resolve, 1000 * (i + 1)));
197
+ }
198
+ }
199
+ }
200
+
201
+ // Helper: Calculate session cost
202
+ function calculateSessionCost(sessionId) {
203
+ // In production, track per-session costs properly
204
+ return conversations.has(sessionId) ? 0.05 : 0;
205
+ }
206
+
207
+ // Start server
208
+ const PORT = process.env.PORT || 3000;
209
+ app.listen(PORT, () => {
210
+ console.log(`Chatbot server running on port ${PORT}`);
211
+ console.log(`Cost-optimized routing enabled`);
212
+ });
213
+ ```
214
+
215
+ ---
216
+
217
+ ## Client Example (React)
218
+
219
+ ### ChatComponent.jsx
220
+
221
+ ```jsx
222
+ import { useState, useEffect } from 'react';
223
+
224
+ export default function ChatComponent() {
225
+ const [messages, setMessages] = useState([]);
226
+ const [input, setInput] = useState('');
227
+ const [loading, setLoading] = useState(false);
228
+ const [sessionId] = useState(() => `session-${Date.now()}`);
229
+ const [analytics, setAnalytics] = useState(null);
230
+
231
+ const sendMessage = async () => {
232
+ if (!input.trim()) return;
233
+
234
+ const userMessage = { role: 'user', content: input };
235
+ setMessages((prev) => [...prev, userMessage]);
236
+ setInput('');
237
+ setLoading(true);
238
+
239
+ try {
240
+ const response = await fetch('/api/chat', {
241
+ method: 'POST',
242
+ headers: { 'Content-Type': 'application/json' },
243
+ body: JSON.stringify({ sessionId, message: input }),
244
+ });
245
+
246
+ const data = await response.json();
247
+
248
+ setMessages((prev) => [
249
+ ...prev,
250
+ { role: 'assistant', content: data.message },
251
+ ]);
252
+
253
+ // Show cost info
254
+ console.log('Request cost:', data.meta.cost);
255
+ console.log('Model used:', data.meta.modelUsed);
256
+ } catch (error) {
257
+ console.error('Error:', error);
258
+ setMessages((prev) => [
259
+ ...prev,
260
+ { role: 'assistant', content: 'Sorry, something went wrong.' },
261
+ ]);
262
+ } finally {
263
+ setLoading(false);
264
+ }
265
+ };
266
+
267
+ // Fetch analytics
268
+ useEffect(() => {
269
+ fetch('/api/analytics')
270
+ .then((res) => res.json())
271
+ .then(setAnalytics);
272
+ }, [messages]);
273
+
274
+ return (
275
+ <div className="chat-container">
276
+ <div className="messages">
277
+ {messages.map((msg, i) => (
278
+ <div key={i} className={`message ${msg.role}`}>
279
+ <strong>{msg.role}:</strong> {msg.content}
280
+ </div>
281
+ ))}
282
+ {loading && <div className="loading">Thinking...</div>}
283
+ </div>
284
+
285
+ <div className="input-area">
286
+ <input
287
+ value={input}
288
+ onChange={(e) => setInput(e.target.value)}
289
+ onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
290
+ placeholder="Type a message..."
291
+ />
292
+ <button onClick={sendMessage} disabled={loading}>
293
+ Send
294
+ </button>
295
+ </div>
296
+
297
+ {analytics && (
298
+ <div className="analytics">
299
+ <small>
300
+ Total cost: ${analytics.totalCost.toFixed(4)} | Avg per request: $
301
+ {analytics.averageCost.toFixed(4)} | Requests:{' '}
302
+ {analytics.requestCount}
303
+ </small>
304
+ </div>
305
+ )}
306
+ </div>
307
+ );
308
+ }
309
+ ```
310
+
311
+ ---
312
+
313
+ ## Cost Analysis
314
+
315
+ ### Typical Conversation (10 messages):
316
+
317
+ | Component | Tokens | Cost |
318
+ | ----------------------------------------- | --------- | --------- |
319
+ | System prompt | 20 | $0.0001 |
320
+ | User messages (avg 50 tokens each) | 500 | $0.0025 |
321
+ | Assistant responses (avg 100 tokens each) | 1,000 | $0.0060 |
322
+ | **Total per conversation** | **1,520** | **$0.01** |
323
+
324
+ ### Monthly Costs (1,000 conversations/day):
325
+
326
+ | Router Strategy | Monthly Cost | vs. GPT-4 Only |
327
+ | --------------- | ------------ | --------------- |
328
+ | Cost-Optimized | $300 | Save $450 (60%) |
329
+ | Balanced | $450 | Save $300 (40%) |
330
+ | GPT-4 Only | $750 | Baseline |
331
+
332
+ ---
333
+
334
+ ## Production Checklist
335
+
336
+ - [ ] Use environment variables for API keys
337
+ - [ ] Implement proper error handling
338
+ - [ ] Add retry logic with exponential backoff
339
+ - [ ] Limit conversation history (cost control)
340
+ - [ ] Track costs per session
341
+ - [ ] Implement rate limiting
342
+ - [ ] Add authentication
343
+ - [ ] Store conversations in database (not memory)
344
+ - [ ] Monitor with dashboard
345
+ - [ ] Set up alerts for cost spikes
346
+
347
+ ---
348
+
349
+ ## Advanced Features
350
+
351
+ ### 1. Function Calling (Weather Example)
352
+
353
+ ```javascript
354
+ const response = await client.chat.completions.create({
355
+ messages: history,
356
+ tools: [
357
+ {
358
+ type: 'function',
359
+ function: {
360
+ name: 'get_weather',
361
+ description: 'Get current weather',
362
+ parameters: {
363
+ type: 'object',
364
+ properties: {
365
+ location: { type: 'string' },
366
+ },
367
+ },
368
+ },
369
+ },
370
+ ],
371
+ });
372
+
373
+ // Handle function calls
374
+ if (response.choices[0].message.tool_calls) {
375
+ const toolCall = response.choices[0].message.tool_calls[0];
376
+ const weather = await fetchWeather(
377
+ JSON.parse(toolCall.function.arguments).location,
378
+ );
379
+ // Continue conversation with function result
380
+ }
381
+ ```
382
+
383
+ ---
384
+
385
+ ### 2. Context Pruning
386
+
387
+ ```javascript
388
+ function pruneHistory(history, maxTokens = 2000) {
389
+ // Keep system prompt
390
+ const system = history[0];
391
+ let messages = history.slice(1);
392
+
393
+ // Estimate tokens
394
+ let estimatedTokens = JSON.stringify(system).length / 4;
395
+
396
+ // Remove oldest messages until under limit
397
+ while (estimatedTokens > maxTokens && messages.length > 2) {
398
+ messages.shift();
399
+ estimatedTokens = JSON.stringify([system, ...messages]).length / 4;
400
+ }
401
+
402
+ return [system, ...messages];
403
+ }
404
+ ```
405
+
406
+ ---
407
+
408
+ ## Monitoring
409
+
410
+ Use MCP tools to monitor your chatbot:
411
+
412
+ ```
413
+ User: "Show usage for my chatbot router"
414
+ AI:
415
+ Requests today: 1,234
416
+ Cost today: $12.45
417
+ Average latency: 450ms
418
+ Top model: gpt-4o-mini (70%)
419
+ Success rate: 99.8%
420
+ ```
421
+
422
+ ---
423
+
424
+ **This production chatbot shows real-world Agentlify usage with cost optimization, error handling, and proper architecture!**
@@ -0,0 +1,192 @@
1
+ # Migrating from Anthropic to Agentlify
2
+
3
+ **Zero code changes required** - Keep using the Anthropic SDK with Agentlify's intelligent routing!
4
+
5
+ ---
6
+
7
+ ## Why Use Agentlify?
8
+
9
+ ✅ **40-60% cost savings** with intelligent routing
10
+ ✅ **Keep using Anthropic SDK** - just update configuration
11
+ ✅ **Access 100+ models** including all Claude models + OpenAI, Google, more
12
+ ✅ **Automatic fallbacks** for improved reliability
13
+ ✅ **Real-time cost tracking** with detailed analytics
14
+
15
+ ---
16
+
17
+ ## Migration Steps
18
+
19
+ ### Step 1: Create Agentlify Account & Router
20
+
21
+ 1. Sign up at [agentlify.app](https://agentlify.app)
22
+ 2. Create a router (cost/quality/speed optimized)
23
+ 3. Get your API key (`mp_xxx`) from Settings → API Keys
24
+ 4. Copy your Router ID from the router page
25
+
26
+ ### Step 2: Update Anthropic SDK Configuration
27
+
28
+ ---
29
+
30
+ ## Python Migration
31
+
32
+ ### Before (Anthropic):
33
+
34
+ ```python
35
+ from anthropic import Anthropic
36
+
37
+ client = Anthropic(api_key="sk-ant-...")
38
+
39
+ response = client.messages.create(
40
+ model="claude-3-5-sonnet-20241022",
41
+ messages=[{"role": "user", "content": "Hello!"}],
42
+ max_tokens=1024
43
+ )
44
+ ```
45
+
46
+ ### After (Agentlify):
47
+
48
+ ```python
49
+ from anthropic import Anthropic # Same import!
50
+
51
+ # Just change these two lines:
52
+ client = Anthropic(
53
+ api_key="mp_xxx", # Your Agentlify API key
54
+ base_url="https://agentlify.co/api/router/your_router_id"
55
+ )
56
+
57
+ # Everything else works the same!
58
+ response = client.messages.create(
59
+ model="claude-3-5-sonnet-20241022", # Optional - router optimizes
60
+ messages=[{"role": "user", "content": "Hello!"}],
61
+ max_tokens=1024
62
+ )
63
+
64
+ # NEW: Access Agentlify metadata
65
+ print(f"Cost: ${response._meta.cost}")
66
+ print(f"Model used: {response._meta.modelUsed}")
67
+ ```
68
+
69
+ ### What Changed:
70
+
71
+ 1. ✅ **`api_key`**: Use Agentlify key
72
+ 2. ✅ **`base_url`**: Point to your router
73
+ 3. ✅ **That's it!** Everything else identical
74
+
75
+ ---
76
+
77
+ ## JavaScript/TypeScript Migration
78
+
79
+ ### Before (Anthropic):
80
+
81
+ ```javascript
82
+ import Anthropic from '@anthropic-ai/sdk';
83
+
84
+ const client = new Anthropic({
85
+ apiKey: 'sk-ant-...',
86
+ });
87
+
88
+ const response = await client.messages.create({
89
+ model: 'claude-3-5-sonnet-20241022',
90
+ messages: [{ role: 'user', content: 'Hello!' }],
91
+ max_tokens: 1024,
92
+ });
93
+ ```
94
+
95
+ ### After (Agentlify):
96
+
97
+ ```javascript
98
+ import Anthropic from '@anthropic-ai/sdk'; // Same import!
99
+
100
+ // Just change these two lines:
101
+ const client = new Anthropic({
102
+ apiKey: 'mp_xxx', // Your Agentlify API key
103
+ baseURL: 'https://agentlify.co/api/router/your_router_id',
104
+ });
105
+
106
+ // Everything else works the same!
107
+ const response = await client.messages.create({
108
+ model: 'claude-3-5-sonnet-20241022', // Optional - router optimizes
109
+ messages: [{ role: 'user', content: 'Hello!' }],
110
+ max_tokens: 1024,
111
+ });
112
+
113
+ // NEW: Access Agentlify metadata
114
+ console.log('Cost:', response._meta?.cost);
115
+ console.log('Model used:', response._meta?.modelUsed);
116
+ ```
117
+
118
+ ---
119
+
120
+ ## Feature Parity
121
+
122
+ Everything works identically:
123
+
124
+ | Anthropic Feature | Agentlify Support | Notes |
125
+ | ----------------- | ------------------ | ------------------------ |
126
+ | Messages API | ✅ Fully supported | Identical API |
127
+ | Streaming | ✅ Fully supported | Works the same |
128
+ | Tool Use | ✅ Fully supported | Same format |
129
+ | Vision | ✅ Fully supported | All Claude vision models |
130
+ | System Prompts | ✅ Fully supported | Identical behavior |
131
+
132
+ ---
133
+
134
+ ## Streaming Example
135
+
136
+ ```python
137
+ with client.messages.stream(
138
+ model="claude-3-5-sonnet-20241022",
139
+ messages=[{"role": "user", "content": "Write a poem"}],
140
+ max_tokens=1024
141
+ ) as stream:
142
+ for text in stream.text_stream:
143
+ print(text, end="", flush=True)
144
+ ```
145
+
146
+ **Works exactly the same with Agentlify!**
147
+
148
+ ---
149
+
150
+ ## Cost Comparison
151
+
152
+ ### Example: 1M input + 500K output tokens
153
+
154
+ | Provider | Monthly Cost |
155
+ | -------------------------- | ---------------- |
156
+ | Anthropic Direct | $100.00 |
157
+ | Agentlify (cost-optimized) | **$45.00** |
158
+ | **Savings** | **$55.00 (55%)** |
159
+
160
+ ---
161
+
162
+ ## Migration Checklist
163
+
164
+ - [ ] Create Agentlify account
165
+ - [ ] Create router
166
+ - [ ] Get API key (`mp_xxx`)
167
+ - [ ] Update `api_key` in code
168
+ - [ ] Update `base_url`/`baseURL` to router
169
+ - [ ] Test with sample request
170
+ - [ ] Monitor in dashboard
171
+ - [ ] Deploy!
172
+
173
+ **Total time: ~2 minutes**
174
+
175
+ ---
176
+
177
+ ## Need Help?
178
+
179
+ Use the MCP `migrate_code` tool:
180
+
181
+ ```
182
+ User: "Migrate this Anthropic code: [paste code]"
183
+
184
+ AI Assistant:
185
+ ✅ Converted code
186
+ ✅ Cost savings estimate
187
+ ✅ Next steps
188
+ ```
189
+
190
+ ---
191
+
192
+ **Key Takeaway:** Keep using Anthropic SDK - just point it at Agentlify for intelligent routing and cost savings!