@goldensheepai/toknxr-cli 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +21 -0
- package/.env.example +21 -0
- package/README.md +238 -0
- package/interactions.log +8 -0
- package/lib/ai-analytics.js +296 -0
- package/lib/auth.js +73 -0
- package/lib/cli.js +382 -0
- package/lib/code-analysis.js +304 -0
- package/lib/code-review.js +319 -0
- package/lib/config.js +7 -0
- package/lib/dashboard.js +363 -0
- package/lib/hallucination-detector.js +272 -0
- package/lib/policy.js +49 -0
- package/lib/pricing.js +20 -0
- package/lib/proxy.js +359 -0
- package/lib/sync.js +95 -0
- package/package.json +38 -0
- package/src/ai-analytics.ts +418 -0
- package/src/auth.ts +80 -0
- package/src/cli.ts +447 -0
- package/src/code-analysis.ts +365 -0
- package/src/config.ts +10 -0
- package/src/dashboard.tsx +391 -0
- package/src/hallucination-detector.ts +368 -0
- package/src/policy.ts +55 -0
- package/src/pricing.ts +21 -0
- package/src/proxy.ts +438 -0
- package/src/sync.ts +129 -0
- package/start.sh +56 -0
- package/test-analysis.mjs +77 -0
- package/test-coding.mjs +27 -0
- package/test-generate-sample-data.js +118 -0
- package/test-proxy.mjs +25 -0
- package/toknxr.config.json +63 -0
- package/toknxr.policy.json +18 -0
- package/tsconfig.json +19 -0
package/.env
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
# TokNXR Environment Variables
|
2
|
+
# Copy this file to .env and fill in your actual API keys
|
3
|
+
|
4
|
+
# Google AI API Key (used for both Gemini-Pro and Gemini-Free)
|
5
|
+
GEMINI_API_KEY=AIzaSyCtrQ8e5H66vbrAozWlWUmCcObJmrI2ovg
|
6
|
+
|
7
|
+
# OpenAI API Key
|
8
|
+
OPENAI_API_KEY=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
|
9
|
+
|
10
|
+
# Anthropic Claude API Key
|
11
|
+
ANTHROPIC_API_KEY=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
|
12
|
+
|
13
|
+
# Supabase Configuration
|
14
|
+
SUPABASE_URL=https://your-project.supabase.co
|
15
|
+
SUPABASE_SERVICE_ROLE_KEY=your_service_role_key
|
16
|
+
# Optional: Webhook URL for budget alerts
|
17
|
+
# WEBHOOK_URL=https://your-webhook-url.com/alerts
|
18
|
+
SUPABASE_URL=https://pkdytotoptkknghtsomn.supabase.co
|
19
|
+
SUPABASE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InBrZHl0b3RvcHRra25naHRzb21uIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTk3MjI1OTgsImV4cCI6MjA3NTI5ODU5OH0.Y3RVJvx7w-eGD4Nv2aVv8jnkUKhmA2vpkBs7_rFzEoQ
|
20
|
+
# Optional: Custom port for the proxy server (default: 8788)
|
21
|
+
# PORT=8788
|
package/.env.example
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
# TokNXR Environment Variables
|
2
|
+
# Copy this file to .env and fill in your actual API keys
|
3
|
+
|
4
|
+
# Google AI API Key (used for both Gemini-Pro and Gemini-Free)
|
5
|
+
GEMINI_API_KEY=AIzaSyCtrQ8e5H66vbrAozWlWUmCcObJmrI2ovg
|
6
|
+
|
7
|
+
# OpenAI API Key
|
8
|
+
OPENAI_API_KEY=your-openai-api-key-here
|
9
|
+
|
10
|
+
# Anthropic Claude API Key
|
11
|
+
ANTHROPIC_API_KEY=your-anthropic-api-key-here
|
12
|
+
|
13
|
+
# Optional: Webhook URL for budget alerts
|
14
|
+
# WEBHOOK_URL=https://your-webhook-url.com/alerts
|
15
|
+
SUPABASE_URL=your-supabase-url
|
16
|
+
SUPABASE_ANON_KEY=your-anon-key
|
17
|
+
SUPABASE_SERVICE_ROLE_KEY=your-service-role-key
|
18
|
+
SUPABASE_JWT_SECRET=your-jwt-secret
|
19
|
+
|
20
|
+
# Optional: Custom port for the proxy server (default: 8787)
|
21
|
+
PORT=8787
|
package/README.md
ADDED
@@ -0,0 +1,238 @@
|
|
1
|
+
# TokNXR - AI Usage Analytics & Cost Tracking
|
2
|
+
|
3
|
+
🚀 **One Command Setup** - Track AI usage across all major providers with comprehensive analytics, cost monitoring, and code quality analysis.
|
4
|
+
|
5
|
+
## 🔥 Quick Start (ONE COMMAND!)
|
6
|
+
|
7
|
+
### Option 1: Ultimate One-Command Setup (Recommended)
|
8
|
+
```bash
|
9
|
+
# From the ToknXR-CLI directory, run:
|
10
|
+
./toknxr-cli/start.sh
|
11
|
+
```
|
12
|
+
This single command does **EVERYTHING**:
|
13
|
+
- ✅ Sets up all configuration files
|
14
|
+
- ✅ Creates environment variables template
|
15
|
+
- ✅ Starts the server with all 5 providers
|
16
|
+
- ✅ Opens your AI analytics dashboard
|
17
|
+
|
18
|
+
### Option 2: NPM Commands
|
19
|
+
```bash
|
20
|
+
# 1. Navigate to the toknxr-cli directory
|
21
|
+
cd toknxr-cli
|
22
|
+
|
23
|
+
# 2. One command setup and launch (does everything!)
|
24
|
+
npm run launch
|
25
|
+
|
26
|
+
# 3. Or use the alias:
|
27
|
+
npm run go
|
28
|
+
```
|
29
|
+
|
30
|
+
### Option 3: Manual Setup (if you prefer)
|
31
|
+
```bash
|
32
|
+
cd toknxr-cli
|
33
|
+
npm run quickstart # Sets up everything
|
34
|
+
# Add your API keys to .env file
|
35
|
+
npm start # Start tracking
|
36
|
+
```
|
37
|
+
|
38
|
+
**That's it!** 🎉 Your AI usage analytics dashboard will be available at `http://localhost:8788/dashboard`
|
39
|
+
|
40
|
+
## 📦 What's Included
|
41
|
+
|
42
|
+
✅ **5 AI Providers** with full configuration support:
|
43
|
+
- **Ollama-Llama3** (Local AI - Free)
|
44
|
+
- **Gemini-Pro** (Google AI - Paid)
|
45
|
+
- **Gemini-Free** (Google AI - Free tier)
|
46
|
+
- **OpenAI-GPT4** (OpenAI - Paid)
|
47
|
+
- **Anthropic-Claude** (Claude - Paid)
|
48
|
+
|
49
|
+
✅ **Advanced Analytics**:
|
50
|
+
- Real-time token usage tracking
|
51
|
+
- Cost monitoring and budget alerts
|
52
|
+
- Code quality analysis for coding requests
|
53
|
+
- Hallucination detection
|
54
|
+
- Effectiveness scoring
|
55
|
+
|
56
|
+
✅ **Smart Features**:
|
57
|
+
- Automatic provider routing
|
58
|
+
- Budget enforcement with alerts
|
59
|
+
- Comprehensive logging
|
60
|
+
- Web dashboard for visualization
|
61
|
+
|
62
|
+
## 🔧 Setup Details
|
63
|
+
|
64
|
+
### Environment Variables (.env file)
|
65
|
+
```bash
|
66
|
+
# Required: Google AI API Key (for Gemini models)
|
67
|
+
GEMINI_API_KEY=your_gemini_api_key_here
|
68
|
+
|
69
|
+
# Optional: OpenAI API Key
|
70
|
+
OPENAI_API_KEY=your_openai_api_key_here
|
71
|
+
|
72
|
+
# Optional: Anthropic Claude API Key
|
73
|
+
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
74
|
+
|
75
|
+
# Optional: Webhook for budget alerts
|
76
|
+
WEBHOOK_URL=https://your-webhook-url.com/alerts
|
77
|
+
```
|
78
|
+
|
79
|
+
### API Provider Endpoints
|
80
|
+
|
81
|
+
Once running, your AI providers will be available at:
|
82
|
+
|
83
|
+
| Provider | Endpoint | Status |
|
84
|
+
|----------|----------|---------|
|
85
|
+
| **Ollama-Llama3** | `http://localhost:8788/ollama` | ✅ Ready |
|
86
|
+
| **Gemini-Pro** | `http://localhost:8788/gemini` | ✅ Ready |
|
87
|
+
| **Gemini-Free** | `http://localhost:8788/gemini-free` | ✅ Ready |
|
88
|
+
| **OpenAI-GPT4** | `http://localhost:8788/openai` | ✅ Ready |
|
89
|
+
| **Anthropic-Claude** | `http://localhost:8788/anthropic` | ✅ Ready |
|
90
|
+
|
91
|
+
## 💡 Usage Examples
|
92
|
+
|
93
|
+
### Using with curl
|
94
|
+
```bash
|
95
|
+
# Test Gemini-Free (no API key needed for testing)
|
96
|
+
curl -X POST http://localhost:8788/gemini-free \
|
97
|
+
-H "Content-Type: application/json" \
|
98
|
+
-d '{"contents":[{"parts":[{"text":"Hello, world!"}]}]}'
|
99
|
+
|
100
|
+
# Test with your own API key for full access
|
101
|
+
curl -X POST http://localhost:8788/gemini \
|
102
|
+
-H "Content-Type: application/json" \
|
103
|
+
-d '{"contents":[{"parts":[{"text":"Write a Python function"}]}]}'
|
104
|
+
```
|
105
|
+
|
106
|
+
### Using with JavaScript/Node.js
|
107
|
+
```javascript
|
108
|
+
const response = await fetch('http://localhost:8788/gemini-free', {
|
109
|
+
method: 'POST',
|
110
|
+
headers: { 'Content-Type': 'application/json' },
|
111
|
+
body: JSON.stringify({
|
112
|
+
contents: [{ parts: [{ text: 'Your prompt here' }] }]
|
113
|
+
})
|
114
|
+
});
|
115
|
+
const data = await response.json();
|
116
|
+
```
|
117
|
+
|
118
|
+
### Using with Python
|
119
|
+
```python
|
120
|
+
import requests
|
121
|
+
|
122
|
+
response = requests.post('http://localhost:8788/gemini-free', json={
|
123
|
+
'contents': [{'parts': [{'text': 'Your prompt here'}]}]
|
124
|
+
})
|
125
|
+
result = response.json()
|
126
|
+
```
|
127
|
+
|
128
|
+
## 📊 Analytics & Monitoring
|
129
|
+
|
130
|
+
### View Usage Statistics
|
131
|
+
```bash
|
132
|
+
# View token usage and cost statistics
|
133
|
+
npm run cli stats
|
134
|
+
|
135
|
+
# Analyze code quality from coding requests
|
136
|
+
npm run cli code-analysis
|
137
|
+
|
138
|
+
# Check for AI hallucinations
|
139
|
+
npm run cli hallucination-analysis
|
140
|
+
```
|
141
|
+
|
142
|
+
### Dashboard Access
|
143
|
+
- **Main Dashboard**: `http://localhost:8788/dashboard`
|
144
|
+
- **Health Check**: `http://localhost:8788/health`
|
145
|
+
- **API Stats**: `http://localhost:8788/api/stats`
|
146
|
+
|
147
|
+
## 🛠️ Advanced Configuration
|
148
|
+
|
149
|
+
### Budget Management
|
150
|
+
The system includes intelligent budget management:
|
151
|
+
|
152
|
+
```bash
|
153
|
+
# Initialize budget policies
|
154
|
+
npm run cli policy:init
|
155
|
+
|
156
|
+
# Monthly limits (configurable in toknxr.policy.json):
|
157
|
+
# - Total: $50/month across all providers
|
158
|
+
# - Gemini-Pro: $30/month
|
159
|
+
# - Gemini-Free: $10/month
|
160
|
+
# - OpenAI-GPT4: $20/month
|
161
|
+
# - Anthropic-Claude: $15/month
|
162
|
+
# - Ollama-Llama3: $0/month (free)
|
163
|
+
```
|
164
|
+
|
165
|
+
### Custom Configuration
|
166
|
+
Edit `toknxr.config.json` to:
|
167
|
+
- Add new AI providers
|
168
|
+
- Modify token mapping
|
169
|
+
- Update API endpoints
|
170
|
+
- Configure authentication
|
171
|
+
|
172
|
+
## 🔍 Troubleshooting
|
173
|
+
|
174
|
+
### Common Issues
|
175
|
+
|
176
|
+
**Port 8788 already in use:**
|
177
|
+
```bash
|
178
|
+
# Kill existing process
|
179
|
+
pkill -f "npm run start"
|
180
|
+
# Then restart
|
181
|
+
npm start
|
182
|
+
```
|
183
|
+
|
184
|
+
**API key not working:**
|
185
|
+
- Verify your API keys in the `.env` file
|
186
|
+
- Check that keys have the correct permissions
|
187
|
+
- Test keys directly with the provider's API
|
188
|
+
|
189
|
+
**Ollama not available:**
|
190
|
+
- Ensure Ollama is running: `ollama serve`
|
191
|
+
- Check that it's accessible at `http://localhost:11434`
|
192
|
+
|
193
|
+
### Getting Help
|
194
|
+
```bash
|
195
|
+
# View all available commands
|
196
|
+
npm run cli --help
|
197
|
+
|
198
|
+
# Check available Gemini models
|
199
|
+
npm run cli models
|
200
|
+
|
201
|
+
# Test direct API calls (bypassing proxy)
|
202
|
+
npm run cli call --model models/gemini-1.5-flash --prompt "Hello"
|
203
|
+
```
|
204
|
+
|
205
|
+
## 🚀 Production Deployment
|
206
|
+
|
207
|
+
For production use:
|
208
|
+
|
209
|
+
1. **Secure your API keys** using your platform's secret management
|
210
|
+
2. **Set up proper logging** and monitoring
|
211
|
+
3. **Configure webhook alerts** for budget limits
|
212
|
+
4. **Use a process manager** like PM2 for stability
|
213
|
+
5. **Set up reverse proxy** with proper CORS configuration
|
214
|
+
|
215
|
+
## 📈 What's Tracked
|
216
|
+
|
217
|
+
For every AI request, TokNXR captures:
|
218
|
+
|
219
|
+
- **Token Usage**: Prompt, completion, and total tokens
|
220
|
+
- **Cost Calculation**: Real-time cost in USD
|
221
|
+
- **Provider Info**: Which AI model was used
|
222
|
+
- **Code Quality**: Syntax validation, readability, structure
|
223
|
+
- **Effectiveness**: How well the AI understood your request
|
224
|
+
- **Hallucinations**: Detection of potential AI fabrications
|
225
|
+
- **Business Impact**: Time waste and quality degradation metrics
|
226
|
+
|
227
|
+
## 🤝 Contributing
|
228
|
+
|
229
|
+
We welcome contributions! Areas where help is needed:
|
230
|
+
|
231
|
+
- Additional AI provider integrations
|
232
|
+
- Enhanced analytics and visualizations
|
233
|
+
- Performance optimizations
|
234
|
+
- Testing and documentation
|
235
|
+
|
236
|
+
---
|
237
|
+
|
238
|
+
**Made with ❤️ for AI developers who want to track and optimize their AI usage**
|
package/interactions.log
ADDED
@@ -0,0 +1,8 @@
|
|
1
|
+
{"timestamp":"2025-10-05T20:14:35.420Z","provider":"Gemini-Pro","model":"unknown","promptTokens":9,"completionTokens":2056,"totalTokens":3506,"costUSD":0,"taskType":"chat"}
|
2
|
+
{"timestamp":"2025-10-05T20:18:24.093Z","provider":"Gemini-Pro","model":"unknown","promptTokens":9,"completionTokens":2236,"totalTokens":3564,"costUSD":0,"taskType":"chat"}
|
3
|
+
{"timestamp":"2025-10-05T20:21:58.826Z","provider":"Gemini-Pro","model":"unknown","promptTokens":9,"completionTokens":2054,"totalTokens":3322,"costUSD":0,"taskType":"chat"}
|
4
|
+
{"requestId":"84870610-70c2-4b31-834f-6c55f92c8495","timestamp":"2025-10-05T20:49:46.035Z","provider":"Gemini-Pro","model":"gemini-2.5-flash","promptTokens":2,"completionTokens":519,"totalTokens":1930,"costUSD":0.3117,"taskType":"chat"}
|
5
|
+
{"timestamp":"2025-10-05T21:44:57.972Z","provider":"Gemini-Pro","model":"gemini-2.5-flash","promptTokens":150,"completionTokens":200,"totalTokens":350,"costUSD":0.05,"taskType":"coding","userPrompt":"Create a React component for a todo list","aiResponse":"Here's a React todo component...","extractedCode":"function TodoList() {\n const [todos, setTodos] = useState([]);\n return (\n <div>\n <h1>My Todos</h1>\n {/* Todo implementation */}\n </div>\n );\n}","codeQualityScore":85,"codeQualityMetrics":{"syntaxValid":true,"estimatedReadability":0.8,"hasFunctions":true,"hasClasses":false,"linesOfCode":12,"potentialIssues":[]},"effectivenessScore":88}
|
6
|
+
{"timestamp":"2025-10-05T21:59:57.974Z","provider":"OpenAI-GPT4","model":"gpt-4","promptTokens":80,"completionTokens":120,"totalTokens":200,"costUSD":0.02,"taskType":"coding","userPrompt":"Write a Python function to calculate fibonacci","aiResponse":"def fibonacci(n):...","extractedCode":"def fibonacci(n):\n if n <= 1:\n return n\n return fibonacci(n-1) + fibonacci(n-2)","codeQualityScore":92,"codeQualityMetrics":{"syntaxValid":true,"estimatedReadability":0.9,"hasFunctions":true,"hasClasses":false,"linesOfCode":4,"potentialIssues":[]},"effectivenessScore":95}
|
7
|
+
{"timestamp":"2025-10-05T22:09:57.974Z","provider":"Gemini-Pro","model":"gemini-2.5-flash","promptTokens":200,"completionTokens":300,"totalTokens":500,"costUSD":0.08,"taskType":"coding","userPrompt":"Create a TypeScript API endpoint","aiResponse":"Here's a TypeScript API endpoint...","extractedCode":"import express from 'express';\n\nconst app = express();\n\napp.get('/api/users', async (req, res) => {\n // Implementation here\n});\n\nexport default app;","codeQualityScore":78,"codeQualityMetrics":{"syntaxValid":true,"estimatedReadability":0.7,"hasFunctions":true,"hasClasses":false,"linesOfCode":8,"potentialIssues":["Missing error handling"]},"effectivenessScore":82}
|
8
|
+
{"requestId":"b8d3c1cc-c3c5-427a-b9f3-0af12b7e8e8d","timestamp":"2025-10-05T22:19:07.944Z","provider":"Gemini-Pro","model":"gemini-2.5-flash","promptTokens":13,"completionTokens":1133,"totalTokens":2206,"costUSD":0.68175,"taskType":"coding","extractedCode":"/**\n * Calculates the factorial of a non-negative integer using recursion.\n *\n * @param {number} n The non-negative integer for which to calculate the factorial.\n * @returns {number} The factorial of n.\n * @throws {Error} If n is a negative number.\n * @throws {Error} If n is not an integer.\n */\nfunction factorial(n) {\n // Input validation: Factorial is traditionally defined for non-negative integers.\n if (n < 0) {\n throw new Error(\"Factorial is not defined for negative numbers.\");\n }\n if (!Number.isInteger(n)) {\n throw new Error(\"Factorial is only defined for integer numbers.\");\n }\n\n // Base Case: This is where the recursion stops.\n // The factorial of 0 is 1, and the factorial of 1 is 1.\n if (n === 0 || n === 1) {\n return 1;\n }\n\n // Recursive Step: The function calls itself with a smaller input.\n // n! = n * (n-1)!\n return n * factorial(n - 1);\n}\n\n// --- Examples ---\nconsole.log(`Factorial of 0: ${factorial(0)}`); // Expected: 1\nconsole.log(`Factorial of 1: ${factorial(1)}`); // Expected: 1\nconsole.log(`Factorial of 5: ${factorial(5)}`); // Expected: 120 (5 * 4 * 3 * 2 * 1)\nconsole.log(`Factorial of 7: ${factorial(7)}`); // Expected: 5040 (7 * 6 * 5 * 4 * 3 * 2 * 1)\nconsole.log(`Factorial of 10: ${factorial(10)}`); // Expected: 3628800\n\n// --- Error Handling Examples ---\ntry {\n console.log(`Factorial of -3: ${factorial(-3)}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is not defined for negative numbers.\n}\n\ntry {\n console.log(`Factorial of 3.5: ${factorial(3.5)}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is only defined for integer numbers.\n}\n\ntry {\n console.log(`Factorial of \"abc\": ${factorial(\"abc\")}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is only defined for integer numbers.\n}","codeQualityMetrics":{"syntaxValid":true,"linesOfCode":47,"complexity":10,"hasFunctions":true,"hasClasses":false,"hasTests":false,"estimatedReadability":6,"potentialIssues":[],"language":"javascript"},"codeQualityScore":100,"effectivenessScore":68,"userPrompt":"Write a JavaScript function to calculate the factorial of a number using recursion","aiResponse":"Here's a JavaScript function to calculate the factorial of a number using recursion, along with explanations and examples:\n\n```javascript\n/**\n * Calculates the factorial of a non-negative integer using recursion.\n *\n * @param {number} n The non-negative integer for which to calculate the factorial.\n * @returns {number} The factorial of n.\n * @throws {Error} If n is a negative number.\n * @throws {Error} If n is not an integer.\n */\nfunction factorial(n) {\n // Input validation: Factorial is traditionally defined for non-negative integers.\n if (n < 0) {\n throw new Error(\"Factorial is not defined for negative numbers.\");\n }\n if (!Number.isInteger(n)) {\n throw new Error(\"Factorial is only defined for integer numbers.\");\n }\n\n // Base Case: This is where the recursion stops.\n // The factorial of 0 is 1, and the factorial of 1 is 1.\n if (n === 0 || n === 1) {\n return 1;\n }\n\n // Recursive Step: The function calls itself with a smaller input.\n // n! = n * (n-1)!\n return n * factorial(n - 1);\n}\n\n// --- Examples ---\nconsole.log(`Factorial of 0: ${factorial(0)}`); // Expected: 1\nconsole.log(`Factorial of 1: ${factorial(1)}`); // Expected: 1\nconsole.log(`Factorial of 5: ${factorial(5)}`); // Expected: 120 (5 * 4 * 3 * 2 * 1)\nconsole.log(`Factorial of 7: ${factorial(7)}`); // Expected: 5040 (7 * 6 * 5 * 4 * 3 * 2 * 1)\nconsole.log(`Factorial of 10: ${factorial(10)}`); // Expected: 3628800\n\n// --- Error Handling Examples ---\ntry {\n console.log(`Factorial of -3: ${factorial(-3)}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is not defined for negative numbers.\n}\n\ntry {\n console.log(`Factorial of 3.5: ${factorial(3.5)}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is only defined for integer numbers.\n}\n\ntry {\n console.log(`Factorial of \"abc\": ${factorial(\"abc\")}`);\n} catch (error) {\n console.error(`Error: ${error.message}`); // Expected: Error: Factorial is only defined for integer numbers.\n}\n```\n\n### How it works (Recursive Breakdown):\n\nLet's trace `factorial(3)`:\n\n1. `factorial(3)` is called.\n * `n` is 3. It's not 0 or 1.\n * It returns `3 * factorial(2)`.\n\n2. `factorial(2)` is called (from inside the first call).\n * `n` is 2. It's not 0 or 1.\n * It returns `2 * factorial(1)`.\n\n3. `factorial(1)` is called (from inside the second call).\n * `n` is 1. **This hits the base case!**\n * It returns `1`.\n\n4. Now, the execution unwinds:\n * The `factorial(2)` call receives `1` from `factorial(1)`.\n * It calculates `2 * 1 = 2`.\n * It returns `2`.\n\n5. Finally, the `factorial(3)` call receives `2` from `factorial(2)`.\n * It calculates `3 * 2 = 6`.\n * It returns `6`.\n\nThe result `6` is then printed.\n\n### Key Concepts:\n\n1. **Base Case:** Every recursive function *must* have one or more base cases. This is a condition where the function stops calling itself and returns a direct value. Without a base case, the function would call itself indefinitely, leading to a \"Stack Overflow\" error. For factorial, `factorial(0) = 1` and `factorial(1) = 1` are the base cases.\n2. **Recursive Step:** This is where the function calls itself with a modified (usually smaller or simpler) input. The idea is to break down the problem into a smaller instance of the *same* problem. For factorial, `n! = n * (n-1)!` is the recursive step. The problem `n!` is solved by using the solution to `(n-1)!`.\n3. **Input Validation:** It's good practice to add checks for invalid inputs (like negative numbers or non-integers for factorial) to make the function more robust."}
|
@@ -0,0 +1,296 @@
|
|
1
|
+
import fs from 'fs';
|
2
|
+
import path from 'path';
|
3
|
+
import { HallucinationDetector } from './hallucination-detector.js';
|
4
|
+
/**
|
5
|
+
* Enhanced AI Analytics with hallucination tracking
|
6
|
+
*/
|
7
|
+
export class AIAnalytics {
|
8
|
+
constructor(logFilePath) {
|
9
|
+
this.detector = new HallucinationDetector();
|
10
|
+
this.logFilePath = logFilePath || path.resolve(process.cwd(), 'interactions.log');
|
11
|
+
}
|
12
|
+
/**
|
13
|
+
* Analyze a single interaction for hallucinations and business impact
|
14
|
+
*/
|
15
|
+
analyzeInteraction(userPrompt, aiResponse, context) {
|
16
|
+
// Run hallucination detection
|
17
|
+
const hallucinationDetection = this.detector.detectHallucination(userPrompt, aiResponse);
|
18
|
+
// Calculate business impact if hallucination detected
|
19
|
+
let businessImpact;
|
20
|
+
if (hallucinationDetection.isLikelyHallucination) {
|
21
|
+
businessImpact = this.detector.calculateBusinessImpact(hallucinationDetection.confidence, 1, // This interaction
|
22
|
+
context.costUSD);
|
23
|
+
}
|
24
|
+
return {
|
25
|
+
timestamp: new Date().toISOString(),
|
26
|
+
provider: context.provider,
|
27
|
+
model: context.model,
|
28
|
+
userPrompt,
|
29
|
+
aiResponse,
|
30
|
+
taskType: context.taskType,
|
31
|
+
costUSD: context.costUSD,
|
32
|
+
totalTokens: context.totalTokens,
|
33
|
+
codeQualityScore: context.codeQualityScore,
|
34
|
+
effectivenessScore: context.effectivenessScore,
|
35
|
+
hallucinationDetection,
|
36
|
+
businessImpact
|
37
|
+
};
|
38
|
+
}
|
39
|
+
/**
|
40
|
+
* Generate comprehensive analytics from interaction logs
|
41
|
+
*/
|
42
|
+
generateAnalytics() {
|
43
|
+
if (!fs.existsSync(this.logFilePath)) {
|
44
|
+
return this.getEmptyAnalytics();
|
45
|
+
}
|
46
|
+
const fileContent = fs.readFileSync(this.logFilePath, 'utf8');
|
47
|
+
const lines = fileContent.trim().split('\n');
|
48
|
+
const interactions = [];
|
49
|
+
// Parse all interactions
|
50
|
+
for (const line of lines) {
|
51
|
+
try {
|
52
|
+
const interaction = JSON.parse(line);
|
53
|
+
if (interaction.userPrompt && interaction.aiResponse) {
|
54
|
+
interactions.push(interaction);
|
55
|
+
}
|
56
|
+
}
|
57
|
+
catch (error) {
|
58
|
+
// Skip malformed lines
|
59
|
+
continue;
|
60
|
+
}
|
61
|
+
}
|
62
|
+
if (interactions.length === 0) {
|
63
|
+
return this.getEmptyAnalytics();
|
64
|
+
}
|
65
|
+
return this.aggregateAnalytics(interactions);
|
66
|
+
}
|
67
|
+
/**
|
68
|
+
* Aggregate analytics from interaction data
|
69
|
+
*/
|
70
|
+
aggregateAnalytics(interactions) {
|
71
|
+
const totalInteractions = interactions.length;
|
72
|
+
// Calculate hallucination metrics
|
73
|
+
const hallucinations = interactions.filter(i => i.hallucinationDetection?.isLikelyHallucination);
|
74
|
+
const hallucinationCount = hallucinations.length;
|
75
|
+
const hallucinationRate = (hallucinationCount / totalInteractions) * 100;
|
76
|
+
const avgConfidence = hallucinations.length > 0
|
77
|
+
? hallucinations.reduce((sum, h) => sum + (h.hallucinationDetection?.confidence || 0), 0) / hallucinations.length
|
78
|
+
: 0;
|
79
|
+
// Category breakdown
|
80
|
+
const byCategory = {};
|
81
|
+
hallucinations.forEach(h => {
|
82
|
+
h.hallucinationDetection?.evidence.forEach(evidence => {
|
83
|
+
byCategory[evidence.type] = (byCategory[evidence.type] || 0) + 1;
|
84
|
+
});
|
85
|
+
});
|
86
|
+
// Provider comparison
|
87
|
+
const providerStats = {};
|
88
|
+
interactions.forEach(interaction => {
|
89
|
+
if (!providerStats[interaction.provider]) {
|
90
|
+
providerStats[interaction.provider] = [];
|
91
|
+
}
|
92
|
+
providerStats[interaction.provider].push(interaction);
|
93
|
+
});
|
94
|
+
const providerComparison = {};
|
95
|
+
Object.entries(providerStats).forEach(([provider, providerInteractions]) => {
|
96
|
+
const providerHallucinations = providerInteractions.filter(i => i.hallucinationDetection?.isLikelyHallucination);
|
97
|
+
const providerHallucinationRate = (providerHallucinations.length / providerInteractions.length) * 100;
|
98
|
+
const avgQualityScore = providerInteractions.reduce((sum, i) => sum + (i.codeQualityScore || 0), 0) / providerInteractions.length;
|
99
|
+
const avgEffectivenessScore = providerInteractions.reduce((sum, i) => sum + (i.effectivenessScore || 0), 0) / providerInteractions.length;
|
100
|
+
// Calculate business impact for this provider
|
101
|
+
const totalCost = providerInteractions.reduce((sum, i) => sum + i.costUSD, 0);
|
102
|
+
const businessImpact = this.detector.calculateBusinessImpact(providerHallucinationRate, providerInteractions.length, totalCost / providerInteractions.length);
|
103
|
+
providerComparison[provider] = {
|
104
|
+
totalInteractions: providerInteractions.length,
|
105
|
+
hallucinationRate: Math.round(providerHallucinationRate * 10) / 10,
|
106
|
+
avgQualityScore: Math.round(avgQualityScore),
|
107
|
+
avgEffectivenessScore: Math.round(avgEffectivenessScore),
|
108
|
+
businessImpact
|
109
|
+
};
|
110
|
+
});
|
111
|
+
// Calculate overall business impact
|
112
|
+
const totalCost = interactions.reduce((sum, i) => sum + i.costUSD, 0);
|
113
|
+
const avgCostPerInteraction = totalCost / totalInteractions;
|
114
|
+
const overallBusinessImpact = this.detector.calculateBusinessImpact(hallucinationRate, totalInteractions, avgCostPerInteraction);
|
115
|
+
const hallucinationMetrics = {
|
116
|
+
totalAnalyses: totalInteractions,
|
117
|
+
hallucinationCount,
|
118
|
+
hallucinationRate: Math.round(hallucinationRate * 10) / 10,
|
119
|
+
avgConfidence: Math.round(avgConfidence),
|
120
|
+
byCategory,
|
121
|
+
byProvider: Object.fromEntries(Object.entries(providerComparison).map(([provider, stats]) => [provider, stats.hallucinationRate])),
|
122
|
+
businessImpact: overallBusinessImpact
|
123
|
+
};
|
124
|
+
// Generate trends (last 30 days)
|
125
|
+
const trends = this.generateTrends(interactions);
|
126
|
+
// Generate recommendations
|
127
|
+
const recommendations = this.generateRecommendations(hallucinationMetrics, providerComparison);
|
128
|
+
return {
|
129
|
+
totalInteractions,
|
130
|
+
hallucinationMetrics,
|
131
|
+
providerComparison,
|
132
|
+
trends,
|
133
|
+
recommendations
|
134
|
+
};
|
135
|
+
}
|
136
|
+
/**
|
137
|
+
* Generate trend data from interactions
|
138
|
+
*/
|
139
|
+
generateTrends(interactions) {
|
140
|
+
const last30Days = new Date();
|
141
|
+
last30Days.setDate(last30Days.getDate() - 30);
|
142
|
+
const recentInteractions = interactions.filter(i => new Date(i.timestamp) >= last30Days);
|
143
|
+
// Group by day
|
144
|
+
const dailyGroups = {};
|
145
|
+
recentInteractions.forEach(interaction => {
|
146
|
+
const date = new Date(interaction.timestamp).toISOString().split('T')[0];
|
147
|
+
if (!dailyGroups[date]) {
|
148
|
+
dailyGroups[date] = [];
|
149
|
+
}
|
150
|
+
dailyGroups[date].push(interaction);
|
151
|
+
});
|
152
|
+
// Calculate daily metrics
|
153
|
+
const hallucinationRateOverTime = Object.entries(dailyGroups)
|
154
|
+
.sort(([a], [b]) => a.localeCompare(b))
|
155
|
+
.map(([date, dayInteractions]) => {
|
156
|
+
const hallucinations = dayInteractions.filter(i => i.hallucinationDetection?.isLikelyHallucination);
|
157
|
+
const rate = dayInteractions.length > 0 ? (hallucinations.length / dayInteractions.length) * 100 : 0;
|
158
|
+
return { date, rate: Math.round(rate * 10) / 10 };
|
159
|
+
});
|
160
|
+
const qualityScoreOverTime = Object.entries(dailyGroups)
|
161
|
+
.sort(([a], [b]) => a.localeCompare(b))
|
162
|
+
.map(([date, dayInteractions]) => {
|
163
|
+
const avgQuality = dayInteractions.reduce((sum, i) => sum + (i.codeQualityScore || 0), 0) / dayInteractions.length;
|
164
|
+
return { date, score: Math.round(avgQuality) };
|
165
|
+
});
|
166
|
+
const costEfficiencyOverTime = Object.entries(dailyGroups)
|
167
|
+
.sort(([a], [b]) => a.localeCompare(b))
|
168
|
+
.map(([date, dayInteractions]) => {
|
169
|
+
const totalCost = dayInteractions.reduce((sum, i) => sum + i.costUSD, 0);
|
170
|
+
const avgQuality = dayInteractions.reduce((sum, i) => sum + (i.codeQualityScore || 0), 0) / dayInteractions.length;
|
171
|
+
const efficiency = totalCost > 0 ? (avgQuality / totalCost) * 1000 : 0; // Quality per dollar
|
172
|
+
return { date, efficiency: Math.round(efficiency * 10) / 10 };
|
173
|
+
});
|
174
|
+
return {
|
175
|
+
hallucinationRateOverTime,
|
176
|
+
qualityScoreOverTime,
|
177
|
+
costEfficiencyOverTime
|
178
|
+
};
|
179
|
+
}
|
180
|
+
/**
|
181
|
+
* Generate actionable recommendations
|
182
|
+
*/
|
183
|
+
generateRecommendations(metrics, providerComparison) {
|
184
|
+
const recommendations = [];
|
185
|
+
// Hallucination rate recommendations
|
186
|
+
if (metrics.hallucinationRate > 20) {
|
187
|
+
recommendations.push(`🚨 CRITICAL: Hallucination rate is ${metrics.hallucinationRate}%. Consider reviewing AI-generated content more carefully.`);
|
188
|
+
}
|
189
|
+
else if (metrics.hallucinationRate > 10) {
|
190
|
+
recommendations.push(`⚠️ WARNING: Hallucination rate is ${metrics.hallucinationRate}%. Monitor closely and verify important information.`);
|
191
|
+
}
|
192
|
+
else if (metrics.hallucinationRate > 5) {
|
193
|
+
recommendations.push(`ℹ️ Hallucination rate is ${metrics.hallucinationRate}%. Generally acceptable but watch for patterns.`);
|
194
|
+
}
|
195
|
+
// Provider-specific recommendations
|
196
|
+
const worstProvider = Object.entries(providerComparison)
|
197
|
+
.sort(([, a], [, b]) => (b.hallucinationRate || 0) - (a.hallucinationRate || 0))[0];
|
198
|
+
if (worstProvider && (worstProvider[1].hallucinationRate || 0) > 15) {
|
199
|
+
recommendations.push(`🔄 Consider reducing usage of ${worstProvider[0]} (hallucination rate: ${worstProvider[1].hallucinationRate}%) or improve prompt quality.`);
|
200
|
+
}
|
201
|
+
// Business impact recommendations
|
202
|
+
if (metrics.businessImpact.estimatedDevTimeWasted > 5) {
|
203
|
+
recommendations.push(`⏱️ Hallucinations are wasting ~${metrics.businessImpact.estimatedDevTimeWasted} hours of development time. Consider AI response verification workflows.`);
|
204
|
+
}
|
205
|
+
if (metrics.businessImpact.roiImpact > 10) {
|
206
|
+
recommendations.push(`💰 Hallucinations are reducing ROI by ${metrics.businessImpact.roiImpact}%. Review AI usage strategy and prompt engineering.`);
|
207
|
+
}
|
208
|
+
// Quality improvement recommendations
|
209
|
+
const bestProvider = Object.entries(providerComparison)
|
210
|
+
.sort(([, a], [, b]) => (b.avgQualityScore || 0) - (a.avgQualityScore || 0))[0];
|
211
|
+
if (bestProvider && (bestProvider[1].avgQualityScore || 0) > 80) {
|
212
|
+
recommendations.push(`✅ ${bestProvider[0]} shows good performance (quality: ${bestProvider[1].avgQualityScore}/100). Consider using it more for critical tasks.`);
|
213
|
+
}
|
214
|
+
// Category-specific recommendations
|
215
|
+
if (metrics.byCategory.fabrication > metrics.byCategory.contradiction) {
|
216
|
+
recommendations.push(`🔧 High fabrication rate detected. Focus on improving technical prompt specificity and providing more context.`);
|
217
|
+
}
|
218
|
+
if (metrics.byCategory.contradiction > 3) {
|
219
|
+
recommendations.push(`⚖️ Multiple contradictions detected. Consider breaking complex requests into smaller, focused prompts.`);
|
220
|
+
}
|
221
|
+
if (recommendations.length === 0) {
|
222
|
+
recommendations.push('✨ AI hallucination metrics look good! Continue monitoring for any emerging patterns.');
|
223
|
+
}
|
224
|
+
return recommendations;
|
225
|
+
}
|
226
|
+
/**
|
227
|
+
* Get empty analytics for when no data is available
|
228
|
+
*/
|
229
|
+
getEmptyAnalytics() {
|
230
|
+
return {
|
231
|
+
totalInteractions: 0,
|
232
|
+
hallucinationMetrics: {
|
233
|
+
totalAnalyses: 0,
|
234
|
+
hallucinationCount: 0,
|
235
|
+
hallucinationRate: 0,
|
236
|
+
avgConfidence: 0,
|
237
|
+
byCategory: {},
|
238
|
+
byProvider: {},
|
239
|
+
businessImpact: {
|
240
|
+
estimatedDevTimeWasted: 0,
|
241
|
+
qualityDegradationScore: 0,
|
242
|
+
roiImpact: 0,
|
243
|
+
costOfHallucinations: 0
|
244
|
+
}
|
245
|
+
},
|
246
|
+
providerComparison: {},
|
247
|
+
trends: {
|
248
|
+
hallucinationRateOverTime: [],
|
249
|
+
qualityScoreOverTime: [],
|
250
|
+
costEfficiencyOverTime: []
|
251
|
+
},
|
252
|
+
recommendations: ['No data available yet. Start using AI through the proxy to see analytics.']
|
253
|
+
};
|
254
|
+
}
|
255
|
+
/**
|
256
|
+
* Export analytics to JSON file
|
257
|
+
*/
|
258
|
+
exportAnalytics(filePath) {
|
259
|
+
const analytics = this.generateAnalytics();
|
260
|
+
const exportPath = filePath || path.resolve(process.cwd(), 'ai-analytics.json');
|
261
|
+
fs.writeFileSync(exportPath, JSON.stringify(analytics, null, 2));
|
262
|
+
console.log(`AI analytics exported to ${exportPath}`);
|
263
|
+
}
|
264
|
+
/**
|
265
|
+
* Get real-time hallucination rate for a specific provider
|
266
|
+
*/
|
267
|
+
getProviderHallucinationRate(provider, hours = 24) {
|
268
|
+
if (!fs.existsSync(this.logFilePath))
|
269
|
+
return 0;
|
270
|
+
const cutoffTime = new Date(Date.now() - hours * 60 * 60 * 1000);
|
271
|
+
const fileContent = fs.readFileSync(this.logFilePath, 'utf8');
|
272
|
+
const lines = fileContent.trim().split('\n');
|
273
|
+
let providerInteractions = 0;
|
274
|
+
let providerHallucinations = 0;
|
275
|
+
for (const line of lines) {
|
276
|
+
try {
|
277
|
+
const interaction = JSON.parse(line);
|
278
|
+
const interactionTime = new Date(interaction.timestamp);
|
279
|
+
if (interactionTime >= cutoffTime && interaction.provider === provider) {
|
280
|
+
providerInteractions++;
|
281
|
+
if (interaction.hallucinationDetection?.isLikelyHallucination) {
|
282
|
+
providerHallucinations++;
|
283
|
+
}
|
284
|
+
}
|
285
|
+
}
|
286
|
+
catch {
|
287
|
+
continue;
|
288
|
+
}
|
289
|
+
}
|
290
|
+
return providerInteractions > 0 ? (providerHallucinations / providerInteractions) * 100 : 0;
|
291
|
+
}
|
292
|
+
}
|
293
|
+
/**
|
294
|
+
* Global AI analytics instance
|
295
|
+
*/
|
296
|
+
export const aiAnalytics = new AIAnalytics();
|
package/lib/auth.js
ADDED
@@ -0,0 +1,73 @@
|
|
1
|
+
import * as http from 'http';
|
2
|
+
import open from 'open';
|
3
|
+
import chalk from 'chalk';
|
4
|
+
import * as keytar from 'keytar'; // Import keytar
|
5
|
+
const CLI_LOGIN_PORT = 8789;
|
6
|
+
const WEB_APP_URL = 'http://localhost:3000';
|
7
|
+
const SERVICE_NAME = 'toknxr-cli'; // A unique name for our service in the keychain
|
8
|
+
const ACCOUNT_NAME = 'default-user'; // A generic account name for the stored token
|
9
|
+
// Function to securely store the token
|
10
|
+
const storeToken = async (token) => {
|
11
|
+
await keytar.setPassword(SERVICE_NAME, ACCOUNT_NAME, token);
|
12
|
+
console.log(chalk.green('Supabase JWT securely stored in system keychain.'));
|
13
|
+
};
|
14
|
+
// Function to retrieve the token
|
15
|
+
const getToken = async () => {
|
16
|
+
return await keytar.getPassword(SERVICE_NAME, ACCOUNT_NAME);
|
17
|
+
};
|
18
|
+
export const login = async () => {
|
19
|
+
const server = new Promise((resolve, reject) => {
|
20
|
+
const s = http.createServer(async (req, res) => {
|
21
|
+
// Handle CORS preflight requests
|
22
|
+
res.setHeader('Access-Control-Allow-Origin', WEB_APP_URL);
|
23
|
+
res.setHeader('Access-Control-Allow-Methods', 'POST, OPTIONS');
|
24
|
+
res.setHeader('Access-Control-Allow-Headers', 'Content-Type');
|
25
|
+
if (req.method === 'OPTIONS') {
|
26
|
+
res.writeHead(204);
|
27
|
+
res.end();
|
28
|
+
return;
|
29
|
+
}
|
30
|
+
if (req.method === 'POST' && req.url === '/token') {
|
31
|
+
const chunks = [];
|
32
|
+
for await (const chunk of req) {
|
33
|
+
chunks.push(chunk);
|
34
|
+
}
|
35
|
+
const requestBody = Buffer.concat(chunks).toString();
|
36
|
+
const { token: supabaseJwt } = JSON.parse(requestBody); // Supabase JWT
|
37
|
+
if (supabaseJwt) {
|
38
|
+
console.log(chalk.green('CLI authentication successful!'));
|
39
|
+
res.writeHead(200, { 'Content-Type': 'application/json' });
|
40
|
+
res.end(JSON.stringify({ success: true }));
|
41
|
+
s.close();
|
42
|
+
resolve(supabaseJwt); // Resolve with the Supabase JWT
|
43
|
+
}
|
44
|
+
else {
|
45
|
+
res.writeHead(400, { 'Content-Type': 'application/json' });
|
46
|
+
res.end(JSON.stringify({ error: 'No token provided' }));
|
47
|
+
s.close();
|
48
|
+
reject(new Error('No token provided'));
|
49
|
+
}
|
50
|
+
}
|
51
|
+
else {
|
52
|
+
res.writeHead(404);
|
53
|
+
res.end();
|
54
|
+
}
|
55
|
+
});
|
56
|
+
s.listen(CLI_LOGIN_PORT, async () => {
|
57
|
+
const loginUrl = `${WEB_APP_URL}/cli-login?port=${CLI_LOGIN_PORT}`;
|
58
|
+
console.log(chalk.yellow('Your browser has been opened to complete the login process.'));
|
59
|
+
await open(loginUrl);
|
60
|
+
});
|
61
|
+
});
|
62
|
+
try {
|
63
|
+
const supabaseJwt = await server; // Get the Supabase JWT
|
64
|
+
await storeToken(supabaseJwt); // Store the Supabase JWT securely
|
65
|
+
console.log(chalk.cyan('Authentication complete. You can now use TokNxr CLI commands.'));
|
66
|
+
}
|
67
|
+
catch (error) {
|
68
|
+
const message = error instanceof Error ? error.message : String(error);
|
69
|
+
console.error(chalk.red('Login failed:', message));
|
70
|
+
}
|
71
|
+
};
|
72
|
+
// Export getToken for other parts of the CLI to use
|
73
|
+
export { getToken };
|