adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,1234 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Alexa Request Handler - TMLPD-Free Version
|
|
3
|
+
*
|
|
4
|
+
* This version removes TMLPD dependency completely and uses direct AI API calls.
|
|
5
|
+
* Simplifies architecture and improves reliability by eliminating single point of failure.
|
|
6
|
+
*
|
|
7
|
+
* Responsibilities:
|
|
8
|
+
* - Parse Alexa request format
|
|
9
|
+
* - Detect language using Google Translate API
|
|
10
|
+
* - Translate if needed using Sarvam API
|
|
11
|
+
* - Call AI APIs directly (OpenAI, Anthropic, etc.)
|
|
12
|
+
* - Format responses for Alexa
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const SarvamClient = require('./sarvam_client');
|
|
16
|
+
const GoogleTranslateClient = require('./google_translate_client');
|
|
17
|
+
const { hashValue } = require('./logger');
|
|
18
|
+
|
|
19
|
+
class AlexaRequestHandlerNoTMLPD {
|
|
20
|
+
constructor(options = {}) {
|
|
21
|
+
this.sarvamClient = new SarvamClient(options.sarvamApiKey);
|
|
22
|
+
this.googleTranslateClient = new GoogleTranslateClient(options.googleTranslate || {});
|
|
23
|
+
this.voiceConfig = options.voice || null;
|
|
24
|
+
this.logVerbose = Boolean(options.logVerbose);
|
|
25
|
+
this.userLanguagePrefs = new Map(); // userId -> language preference
|
|
26
|
+
|
|
27
|
+
// Language detection method tracking
|
|
28
|
+
this.detectionMethodStats = {
|
|
29
|
+
google: 0,
|
|
30
|
+
enhanced: 0,
|
|
31
|
+
fallback: 0,
|
|
32
|
+
total: 0
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
// AI API configuration
|
|
36
|
+
this.aiConfig = options.ai || {};
|
|
37
|
+
this.primaryAIProvider = options.ai?.primary || 'openai';
|
|
38
|
+
this.fallbackAIProviders = options.ai?.fallbacks || ['groq', 'anthropic', 'google', 'cerebras'];
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Initialize AI API connections
|
|
43
|
+
*/
|
|
44
|
+
async initialize() {
|
|
45
|
+
console.log('[Alexa] Initializing TMLPD-free Alexa handler...');
|
|
46
|
+
|
|
47
|
+
// Check Sarvam API
|
|
48
|
+
const sarvamStatus = await this.sarvamClient.healthCheck();
|
|
49
|
+
console.log('[Alexa] Sarvam API Status:', sarvamStatus.status);
|
|
50
|
+
|
|
51
|
+
// Check Google Translate API
|
|
52
|
+
const googleHealth = await this.googleTranslateClient.healthCheck();
|
|
53
|
+
console.log('[Alexa] Google Translate API Status:', googleHealth ? 'Healthy' : 'Unhealthy');
|
|
54
|
+
|
|
55
|
+
// Configure AI API clients
|
|
56
|
+
await this.initializeAIClients();
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Initialize direct AI API clients
|
|
61
|
+
*/
|
|
62
|
+
async initializeAIClients() {
|
|
63
|
+
const providers = ['openai', 'anthropic', 'google', 'groq', 'cerebras', 'claude', 'openrouter'];
|
|
64
|
+
|
|
65
|
+
for (const provider of providers) {
|
|
66
|
+
try {
|
|
67
|
+
const client = await this.createAIClient(provider);
|
|
68
|
+
if (client) {
|
|
69
|
+
console.log(`[Alexa] ${provider} AI client initialized`);
|
|
70
|
+
}
|
|
71
|
+
} catch (error) {
|
|
72
|
+
console.warn(`[Alexa] Failed to initialize ${provider} client:`, error.message);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Call Hugging Face Inference API (Free alternative)
|
|
79
|
+
*/
|
|
80
|
+
async callHuggingFaceAI(prompt, options = {}) {
|
|
81
|
+
const apiKey = process.env.HUGGINGFACE_API_KEY || this.aiConfig.huggingface?.apiKey;
|
|
82
|
+
if (!apiKey) {
|
|
83
|
+
throw new Error('Hugging Face API key not configured');
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const model = options.model || this.aiConfig.huggingface?.model || 'meta-llama/Llama-3.2-3B-Instruct';
|
|
87
|
+
|
|
88
|
+
console.log(`[Alexa] Calling Hugging Face API with model: ${model}`);
|
|
89
|
+
|
|
90
|
+
const response = await fetch(`https://api-inference.huggingface.co/models/${model}/v1/chat/completions`, {
|
|
91
|
+
method: 'POST',
|
|
92
|
+
headers: {
|
|
93
|
+
'Content-Type': 'application/json',
|
|
94
|
+
'Authorization': `Bearer ${apiKey}`
|
|
95
|
+
},
|
|
96
|
+
body: JSON.stringify({
|
|
97
|
+
model: model,
|
|
98
|
+
messages: [{ role: 'user', content: prompt }],
|
|
99
|
+
max_tokens: options.maxTokens || 500,
|
|
100
|
+
temperature: options.temperature || 0.7
|
|
101
|
+
})
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
if (!response.ok) {
|
|
105
|
+
const errorText = await response.text();
|
|
106
|
+
console.log(`[Alexa] Hugging Face API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
107
|
+
throw new Error(`Hugging Face API error: ${response.status}`);
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const data = await response.json();
|
|
111
|
+
console.log(`[Alexa] Hugging Face response structure:`, Object.keys(data));
|
|
112
|
+
|
|
113
|
+
return data.choices[0]?.message?.content || data[0]?.generated_text || 'I apologize, but I could not generate a response.';
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Create AI client for specific provider
|
|
118
|
+
*/
|
|
119
|
+
async createAIClient(provider) {
|
|
120
|
+
switch (provider) {
|
|
121
|
+
case 'openai':
|
|
122
|
+
return {
|
|
123
|
+
provider: 'openai',
|
|
124
|
+
execute: async (prompt, options = {}) => {
|
|
125
|
+
return await this.callOpenAI(prompt, options);
|
|
126
|
+
}
|
|
127
|
+
};
|
|
128
|
+
case 'anthropic':
|
|
129
|
+
return {
|
|
130
|
+
provider: 'anthropic',
|
|
131
|
+
execute: async (prompt, options = {}) => {
|
|
132
|
+
return await this.callAnthropic(prompt, options);
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
case 'google':
|
|
136
|
+
return {
|
|
137
|
+
provider: 'google',
|
|
138
|
+
execute: async (prompt, options = {}) => {
|
|
139
|
+
return await this.callGoogleAI(prompt, options);
|
|
140
|
+
}
|
|
141
|
+
};
|
|
142
|
+
case 'huggingface':
|
|
143
|
+
return {
|
|
144
|
+
provider: 'huggingface',
|
|
145
|
+
execute: async (prompt, options = {}) => {
|
|
146
|
+
return await this.callHuggingFaceAI(prompt, options);
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
case 'openrouter':
|
|
150
|
+
return {
|
|
151
|
+
provider: 'openrouter',
|
|
152
|
+
execute: async (prompt, options = {}) => {
|
|
153
|
+
return await this.callOpenRouter(prompt, options);
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
case 'cerebras':
|
|
157
|
+
return {
|
|
158
|
+
provider: 'cerebras',
|
|
159
|
+
execute: async (prompt, options = {}) => {
|
|
160
|
+
return await this.callCerebras(prompt, options);
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
case 'groq':
|
|
164
|
+
return {
|
|
165
|
+
provider: 'groq',
|
|
166
|
+
execute: async (prompt, options = {}) => {
|
|
167
|
+
return await this.callGroq(prompt, options);
|
|
168
|
+
}
|
|
169
|
+
};
|
|
170
|
+
default:
|
|
171
|
+
return null;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Call OpenAI API directly
|
|
177
|
+
*/
|
|
178
|
+
async callOpenAI(prompt, options = {}) {
|
|
179
|
+
const apiKey = process.env.OPENAI_API_KEY || this.aiConfig.openai?.apiKey;
|
|
180
|
+
if (!apiKey) {
|
|
181
|
+
throw new Error('OpenAI API key not configured');
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
console.log(`[Alexa] Calling OpenAI API with key starting with: ${apiKey.substring(0, 10)}...`);
|
|
185
|
+
|
|
186
|
+
// Use standard OpenAI API endpoint with fallback model
|
|
187
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
188
|
+
method: 'POST',
|
|
189
|
+
headers: {
|
|
190
|
+
'Content-Type': 'application/json',
|
|
191
|
+
'Authorization': `Bearer ${apiKey}`
|
|
192
|
+
},
|
|
193
|
+
body: JSON.stringify({
|
|
194
|
+
model: options.model || 'gpt-3.5-turbo',
|
|
195
|
+
messages: [{ role: 'user', content: prompt }],
|
|
196
|
+
max_tokens: options.maxTokens || 500,
|
|
197
|
+
temperature: options.temperature || 0.7
|
|
198
|
+
})
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
if (!response.ok) {
|
|
202
|
+
const errorText = await response.text();
|
|
203
|
+
console.log(`[Alexa] OpenAI API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
204
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
const data = await response.json();
|
|
208
|
+
console.log(`[Alexa] OpenAI response structure:`, Object.keys(data));
|
|
209
|
+
return data.choices[0]?.message?.content || 'I apologize, but I could not generate a response.';
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Call z.ai GLM models (Pro subscription)
|
|
214
|
+
*/
|
|
215
|
+
async callAnthropic(prompt, options = {}) {
|
|
216
|
+
const apiKey = process.env.ANTHROPIC_AUTH_TOKEN || process.env.ANTHROPIC_API_KEY || this.aiConfig.anthropic?.apiKey;
|
|
217
|
+
if (!apiKey) {
|
|
218
|
+
throw new Error('z.ai API key not configured');
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
console.log(`[Alexa] Calling z.ai GLM API...`);
|
|
222
|
+
|
|
223
|
+
// Try different z.ai API endpoints for GLM models
|
|
224
|
+
const endpoints = [
|
|
225
|
+
'https://api.z.ai/v1/chat/completions',
|
|
226
|
+
'https://api.z.ai/v1/glm/chat/completions',
|
|
227
|
+
'https://api.z.ai/api/v1/chat/completions'
|
|
228
|
+
];
|
|
229
|
+
|
|
230
|
+
for (const endpoint of endpoints) {
|
|
231
|
+
try {
|
|
232
|
+
console.log(`[Alexa] Trying z.ai endpoint: ${endpoint}`);
|
|
233
|
+
|
|
234
|
+
const response = await fetch(endpoint, {
|
|
235
|
+
method: 'POST',
|
|
236
|
+
headers: {
|
|
237
|
+
'Content-Type': 'application/json',
|
|
238
|
+
'Authorization': `Bearer ${apiKey}`
|
|
239
|
+
},
|
|
240
|
+
body: JSON.stringify({
|
|
241
|
+
model: options.model || 'glm-4-plus',
|
|
242
|
+
messages: [{ role: 'user', content: prompt }],
|
|
243
|
+
max_tokens: options.maxTokens || 500,
|
|
244
|
+
temperature: options.temperature || 0.7
|
|
245
|
+
})
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
if (response.ok) {
|
|
249
|
+
const data = await response.json();
|
|
250
|
+
console.log(`[Alexa] z.ai response structure:`, Object.keys(data));
|
|
251
|
+
|
|
252
|
+
// Handle different response formats
|
|
253
|
+
if (data.choices && Array.isArray(data.choices) && data.choices[0]) {
|
|
254
|
+
return data.choices[0].message?.content || data.choices[0].text || 'I apologize, but I could not generate a response.';
|
|
255
|
+
} else if (data.content && Array.isArray(data.content) && data.content[0]) {
|
|
256
|
+
return data.content[0].text || 'I apologize, but I could not generate a response.';
|
|
257
|
+
} else if (data.message) {
|
|
258
|
+
return data.message.content || data.message;
|
|
259
|
+
} else if (data.text) {
|
|
260
|
+
return data.text;
|
|
261
|
+
} else if (typeof data === 'string') {
|
|
262
|
+
return data;
|
|
263
|
+
} else {
|
|
264
|
+
console.log('[Alexa] z.ai full response:', JSON.stringify(data).substring(0, 500));
|
|
265
|
+
return 'I apologize, but I could not generate a response.';
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
} catch (error) {
|
|
269
|
+
console.log(`[Alexa] z.ai endpoint ${endpoint} failed:`, error.message);
|
|
270
|
+
continue;
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
throw new Error('All z.ai API endpoints failed');
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Call Google AI API directly using Gemini
|
|
279
|
+
*/
|
|
280
|
+
async callGoogleAI(prompt, options = {}) {
|
|
281
|
+
const apiKey = process.env.GOOGLE_AI_API_KEY || this.aiConfig.google?.apiKey;
|
|
282
|
+
const model = options.model || this.aiConfig.google?.model || 'gemini-2.0-flash';
|
|
283
|
+
|
|
284
|
+
if (!apiKey) {
|
|
285
|
+
throw new Error('Google AI API key not configured');
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
console.log(`[Alexa] Calling Google AI API with Gemini model: ${model}`);
|
|
289
|
+
|
|
290
|
+
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`, {
|
|
291
|
+
method: 'POST',
|
|
292
|
+
headers: {
|
|
293
|
+
'Content-Type': 'application/json'
|
|
294
|
+
},
|
|
295
|
+
body: JSON.stringify({
|
|
296
|
+
contents: [{
|
|
297
|
+
parts: [{ text: prompt }],
|
|
298
|
+
role: 'user'
|
|
299
|
+
}],
|
|
300
|
+
generationConfig: {
|
|
301
|
+
temperature: options.temperature || 0.7,
|
|
302
|
+
maxOutputTokens: options.maxTokens || 500
|
|
303
|
+
}
|
|
304
|
+
})
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
if (!response.ok) {
|
|
308
|
+
const errorText = await response.text();
|
|
309
|
+
console.log(`[Alexa] Google AI API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
310
|
+
throw new Error(`Google AI API error: ${response.status}`);
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
const data = await response.json();
|
|
314
|
+
console.log(`[Alexa] Google AI response structure:`, Object.keys(data));
|
|
315
|
+
|
|
316
|
+
return data.candidates[0]?.content?.parts[0]?.text || 'I apologize, but I could not generate a response.';
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Call Groq API for fast inference
|
|
321
|
+
*/
|
|
322
|
+
async callGroq(prompt, options = {}) {
|
|
323
|
+
const apiKey = process.env.GROQ_API_KEY || this.aiConfig.groq?.apiKey;
|
|
324
|
+
|
|
325
|
+
// Use fast Groq models optimized for speed
|
|
326
|
+
const model = options.model || this.aiConfig.groq?.model || 'llama-3.3-70b-versatile';
|
|
327
|
+
|
|
328
|
+
if (!apiKey) {
|
|
329
|
+
throw new Error('Groq API key not configured');
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
console.log(`[Alexa] Calling Groq API with fast model: ${model}`);
|
|
333
|
+
|
|
334
|
+
const response = await fetch('https://api.groq.com/openai/v1/chat/completions', {
|
|
335
|
+
method: 'POST',
|
|
336
|
+
headers: {
|
|
337
|
+
'Content-Type': 'application/json',
|
|
338
|
+
'Authorization': `Bearer ${apiKey}`
|
|
339
|
+
},
|
|
340
|
+
body: JSON.stringify({
|
|
341
|
+
model: model,
|
|
342
|
+
messages: [{ role: 'user', content: prompt }],
|
|
343
|
+
max_tokens: options.maxTokens || 500,
|
|
344
|
+
temperature: options.temperature || 0.7
|
|
345
|
+
})
|
|
346
|
+
});
|
|
347
|
+
|
|
348
|
+
if (!response.ok) {
|
|
349
|
+
const errorText = await response.text();
|
|
350
|
+
console.log(`[Alexa] Groq API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
351
|
+
|
|
352
|
+
// Handle specific Groq API errors
|
|
353
|
+
if (response.status === 401) {
|
|
354
|
+
throw new Error('Groq API authentication failed. Please check your API key.');
|
|
355
|
+
} else if (response.status === 429) {
|
|
356
|
+
throw new Error('Groq API rate limit exceeded. Please try again later.');
|
|
357
|
+
} else if (response.status === 500) {
|
|
358
|
+
throw new Error('Groq API server error. Please try again later.');
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
throw new Error(`Groq API error: ${response.status}`);
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
const data = await response.json();
|
|
365
|
+
console.log(`[Alexa] Groq response structure:`, Object.keys(data));
|
|
366
|
+
|
|
367
|
+
return data.choices[0]?.message?.content || 'I apologize, but I could not generate a response.';
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* Call OpenRouter API for unified access to multiple LLM models
|
|
372
|
+
*/
|
|
373
|
+
async callOpenRouter(prompt, options = {}) {
|
|
374
|
+
const apiKey = process.env.OPENROUTER_API_KEY || this.aiConfig.openrouter?.apiKey;
|
|
375
|
+
if (!apiKey) {
|
|
376
|
+
throw new Error('OpenRouter API key not configured');
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
// Support multiple models through OpenRouter's unified interface
|
|
380
|
+
const model = options.model || this.aiConfig.openrouter?.model || 'openai/gpt-3.5-turbo';
|
|
381
|
+
|
|
382
|
+
console.log(`[Alexa] Calling OpenRouter API with model: ${model}`);
|
|
383
|
+
console.log(`[Alexa] API key starting with: ${apiKey.substring(0, 10)}...`);
|
|
384
|
+
|
|
385
|
+
const response = await fetch('https://openrouter.ai/api/v1/chat/completions', {
|
|
386
|
+
method: 'POST',
|
|
387
|
+
headers: {
|
|
388
|
+
'Content-Type': 'application/json',
|
|
389
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
390
|
+
'HTTP-Referer': 'https://github.com/Subho/openclaw-alexa-bridge',
|
|
391
|
+
'X-Title': 'OpenClaw Alexa Bridge'
|
|
392
|
+
},
|
|
393
|
+
body: JSON.stringify({
|
|
394
|
+
model: model,
|
|
395
|
+
messages: [{ role: 'user', content: prompt }],
|
|
396
|
+
max_tokens: options.maxTokens || 500,
|
|
397
|
+
temperature: options.temperature || 0.7
|
|
398
|
+
})
|
|
399
|
+
});
|
|
400
|
+
|
|
401
|
+
if (!response.ok) {
|
|
402
|
+
const errorText = await response.text();
|
|
403
|
+
console.log(`[Alexa] OpenRouter API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
404
|
+
|
|
405
|
+
// Handle specific OpenRouter error scenarios
|
|
406
|
+
if (response.status === 401) {
|
|
407
|
+
throw new Error('OpenRouter authentication failed. Check your API key.');
|
|
408
|
+
} else if (response.status === 402) {
|
|
409
|
+
throw new Error('OpenRouter insufficient credits. Please check your account balance.');
|
|
410
|
+
} else if (response.status === 429) {
|
|
411
|
+
throw new Error('OpenRouter rate limit exceeded. Please try again later.');
|
|
412
|
+
} else if (response.status === 400) {
|
|
413
|
+
throw new Error('OpenRouter invalid request. Check the model name and parameters.');
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
throw new Error(`OpenRouter API error: ${response.status}`);
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
const data = await response.json();
|
|
420
|
+
console.log(`[Alexa] OpenRouter response structure:`, Object.keys(data));
|
|
421
|
+
|
|
422
|
+
// Handle OpenRouter response format (similar to OpenAI)
|
|
423
|
+
return data.choices[0]?.message?.content || 'I apologize, but I could not generate a response.';
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* Call Cerebras API directly for ultra-fast inference
|
|
428
|
+
*/
|
|
429
|
+
async callCerebras(prompt, options = {}) {
|
|
430
|
+
const apiKey = process.env.CEREBRAS_API_KEY || this.aiConfig.cerebras?.apiKey;
|
|
431
|
+
if (!apiKey) {
|
|
432
|
+
throw new Error('Cerebras API key not configured');
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// Use fast Llama models on Cerebras
|
|
436
|
+
const model = options.model || this.aiConfig.cerebras?.model || 'llama-3.3-70b-8192';
|
|
437
|
+
|
|
438
|
+
console.log(`[Alexa] Calling Cerebras API with fast model: ${model}`);
|
|
439
|
+
console.log(`[Alexa] API key starting with: ${apiKey.substring(0, 10)}...`);
|
|
440
|
+
|
|
441
|
+
const response = await fetch('https://api.cerebras.ai/v1/chat/completions', {
|
|
442
|
+
method: 'POST',
|
|
443
|
+
headers: {
|
|
444
|
+
'Content-Type': 'application/json',
|
|
445
|
+
'Authorization': `Bearer ${apiKey}`
|
|
446
|
+
},
|
|
447
|
+
body: JSON.stringify({
|
|
448
|
+
model: model,
|
|
449
|
+
messages: [{ role: 'user', content: prompt }],
|
|
450
|
+
max_tokens: options.maxTokens || 500,
|
|
451
|
+
temperature: options.temperature || 0.7
|
|
452
|
+
})
|
|
453
|
+
});
|
|
454
|
+
|
|
455
|
+
if (!response.ok) {
|
|
456
|
+
const errorText = await response.text();
|
|
457
|
+
console.log(`[Alexa] Cerebras API error ${response.status}: ${errorText.substring(0, 200)}`);
|
|
458
|
+
|
|
459
|
+
// Handle specific Cerebras error scenarios
|
|
460
|
+
if (response.status === 401) {
|
|
461
|
+
throw new Error('Cerebras authentication failed. Check your API key.');
|
|
462
|
+
} else if (response.status === 403) {
|
|
463
|
+
throw new Error('Cerebras access forbidden. Check your API permissions.');
|
|
464
|
+
} else if (response.status === 429) {
|
|
465
|
+
throw new Error('Cerebras rate limit exceeded. Please try again later.');
|
|
466
|
+
} else if (response.status === 400) {
|
|
467
|
+
throw new Error('Cerebras invalid request. Check the model name and parameters.');
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
throw new Error(`Cerebras API error: ${response.status}`);
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
const data = await response.json();
|
|
474
|
+
console.log(`[Alexa] Cerebras response structure:`, Object.keys(data));
|
|
475
|
+
|
|
476
|
+
// Handle Cerebras response format (similar to OpenAI)
|
|
477
|
+
return data.choices[0]?.message?.content || 'I apologize, but I could not generate a response.';
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
/**
|
|
481
|
+
* Handle incoming Alexa request
|
|
482
|
+
*/
|
|
483
|
+
async handleRequest(request) {
|
|
484
|
+
try {
|
|
485
|
+
// 1. Parse Alexa request
|
|
486
|
+
const intent = this.parseIntent(request);
|
|
487
|
+
const userId = this.getUserId(request);
|
|
488
|
+
const query = this.extractQuery(request, intent);
|
|
489
|
+
|
|
490
|
+
const userHash = hashValue(userId);
|
|
491
|
+
console.log(`[Alexa] Request: intent=${intent.name}, user=${userHash || 'unknown'}`);
|
|
492
|
+
if (this.logVerbose) {
|
|
493
|
+
console.log(`[Alexa] Query: ${query}`);
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
// 2. Detect language using Google Translate API with fallback
|
|
497
|
+
const detectionResult = await this.detectLanguageWithFallback(query);
|
|
498
|
+
const detectedLang = detectionResult.language;
|
|
499
|
+
const detectionMethod = detectionResult.method;
|
|
500
|
+
const detectionConfidence = detectionResult.confidence;
|
|
501
|
+
|
|
502
|
+
// Track detection method statistics
|
|
503
|
+
this.trackDetectionMethod(detectionMethod);
|
|
504
|
+
|
|
505
|
+
const userLangPref = this.userLanguagePrefs.get(userId);
|
|
506
|
+
const targetLang = userLangPref || detectedLang;
|
|
507
|
+
|
|
508
|
+
let translatedQuery = query;
|
|
509
|
+
let originalLanguage = detectedLang;
|
|
510
|
+
const isMixedLanguage = detectionResult.isTransliterated || detectionResult.enhancedDetails?.isMixedLanguage;
|
|
511
|
+
|
|
512
|
+
// 3. Translate if needed
|
|
513
|
+
if (isMixedLanguage) {
|
|
514
|
+
console.log(`[Alexa] Mixed language detected (${detectedLang}-EN) using ${detectionMethod}`);
|
|
515
|
+
console.log(`[Alexa] Detection confidence: ${(detectionConfidence * 100).toFixed(1)}%`);
|
|
516
|
+
|
|
517
|
+
if (detectedLang !== 'en') {
|
|
518
|
+
translatedQuery = await this.translateWithSarvamOrFallback(query, detectedLang);
|
|
519
|
+
}
|
|
520
|
+
} else if (detectedLang !== 'en') {
|
|
521
|
+
console.log(`[Alexa] Language detected: ${detectedLang} using ${detectionMethod} (confidence: ${(detectionConfidence * 100).toFixed(1)}%)`);
|
|
522
|
+
translatedQuery = await this.translateWithSarvamOrFallback(query, detectedLang);
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
if (this.logVerbose) {
|
|
526
|
+
console.log(`[Alexa] Original query: ${query}`);
|
|
527
|
+
console.log(`[Alexa] Translated query: ${translatedQuery}`);
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// 4. Execute via direct AI API (simplified flow)
|
|
531
|
+
const aiResponse = await this.executeViaDirectAI(translatedQuery, detectedLang);
|
|
532
|
+
|
|
533
|
+
// 5. Translate response back to original language if needed
|
|
534
|
+
let finalResponse = aiResponse;
|
|
535
|
+
if (originalLanguage !== 'en' && aiResponse) {
|
|
536
|
+
console.log(`[Alexa] Translating response back to ${originalLanguage}...`);
|
|
537
|
+
const translatedBack = await this.sarvamClient.translateFromEnglish(
|
|
538
|
+
aiResponse,
|
|
539
|
+
originalLanguage
|
|
540
|
+
);
|
|
541
|
+
finalResponse = {
|
|
542
|
+
originalEnglish: aiResponse,
|
|
543
|
+
content: translatedBack,
|
|
544
|
+
language: originalLanguage
|
|
545
|
+
};
|
|
546
|
+
if (this.logVerbose) {
|
|
547
|
+
console.log(`[Alexa] Translated response: ${translatedBack.substring(0, 100)}...`);
|
|
548
|
+
}
|
|
549
|
+
} else {
|
|
550
|
+
finalResponse = {
|
|
551
|
+
content: aiResponse,
|
|
552
|
+
language: 'en'
|
|
553
|
+
};
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// 6. Format for Alexa
|
|
557
|
+
return this.formatAlexaResponse(finalResponse);
|
|
558
|
+
|
|
559
|
+
} catch (error) {
|
|
560
|
+
console.error('[Alexa] Request failed:', error);
|
|
561
|
+
return this.handleError(error);
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
/**
|
|
566
|
+
* Execute query via direct AI API calls
|
|
567
|
+
*/
|
|
568
|
+
async executeViaDirectAI(query, language, options = {}) {
|
|
569
|
+
console.log(`[Alexa] Executing via direct AI API (language: ${language})`);
|
|
570
|
+
|
|
571
|
+
// Build prompt for AI with context
|
|
572
|
+
const prompt = this.buildAIPrompt(query, language, options);
|
|
573
|
+
|
|
574
|
+
// Try primary AI provider first
|
|
575
|
+
try {
|
|
576
|
+
const primaryClient = await this.createAIClient(this.primaryAIProvider);
|
|
577
|
+
if (primaryClient) {
|
|
578
|
+
console.log(`[Alexa] Using primary AI provider: ${this.primaryAIProvider}`);
|
|
579
|
+
return await primaryClient.execute(prompt, options);
|
|
580
|
+
}
|
|
581
|
+
} catch (primaryError) {
|
|
582
|
+
console.warn(`[Alexa] Primary AI provider failed: ${primaryError.message}`);
|
|
583
|
+
|
|
584
|
+
// Try fallback providers
|
|
585
|
+
for (const fallbackProvider of this.fallbackAIProviders) {
|
|
586
|
+
try {
|
|
587
|
+
const fallbackClient = await this.createAIClient(fallbackProvider);
|
|
588
|
+
if (fallbackClient) {
|
|
589
|
+
console.log(`[Alexa] Using fallback AI provider: ${fallbackProvider}`);
|
|
590
|
+
return await fallbackClient.execute(prompt, options);
|
|
591
|
+
}
|
|
592
|
+
} catch (fallbackError) {
|
|
593
|
+
console.warn(`[Alexa] Fallback provider ${fallbackProvider} failed: ${fallbackError.message}`);
|
|
594
|
+
continue;
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
// All AI providers failed - return helpful fallback response
|
|
599
|
+
throw new Error('All AI providers unavailable');
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
/**
|
|
604
|
+
* Build prompt for AI API
|
|
605
|
+
*/
|
|
606
|
+
buildAIPrompt(query, language, options = {}) {
|
|
607
|
+
let prompt = query;
|
|
608
|
+
|
|
609
|
+
// Add language context
|
|
610
|
+
if (language !== 'en') {
|
|
611
|
+
prompt += `\n\n[Language Context: Query is in ${language === 'hi' ? 'Hindi' : language === 'bn' ? 'Bengali' : language}]`;
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
// Add platform context
|
|
615
|
+
prompt += '\n\n[Platform: Alexa Voice Assistant]';
|
|
616
|
+
|
|
617
|
+
// Add system prompt for better responses
|
|
618
|
+
prompt += '\n\nYou are a helpful Alexa voice assistant. Provide clear, concise responses that work well with voice output. Avoid markdown, code blocks, or complex formatting.';
|
|
619
|
+
|
|
620
|
+
return prompt;
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
/**
|
|
624
|
+
* Parse intent from Alexa request
|
|
625
|
+
*/
|
|
626
|
+
parseIntent(request) {
|
|
627
|
+
const intent = request.request.intent;
|
|
628
|
+
return {
|
|
629
|
+
name: intent ? intent.name : 'GeneralQueryIntent',
|
|
630
|
+
slots: intent ? intent.slots : {},
|
|
631
|
+
confirmationStatus: intent ? intent.confirmationStatus : 'NONE'
|
|
632
|
+
};
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
/**
|
|
636
|
+
* Extract user ID from Alexa request
|
|
637
|
+
*/
|
|
638
|
+
getUserId(request) {
|
|
639
|
+
return request.session.user.userId;
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
/**
|
|
643
|
+
* Extract query from Alexa request
|
|
644
|
+
*/
|
|
645
|
+
extractQuery(request, intent) {
|
|
646
|
+
// Try to get from intent slot (handle both uppercase and lowercase slot names)
|
|
647
|
+
if (intent.slots) {
|
|
648
|
+
// Check for lowercase 'query' slot (Alexa standard)
|
|
649
|
+
if (intent.slots.query && intent.slots.query.value) {
|
|
650
|
+
return intent.slots.query.value;
|
|
651
|
+
}
|
|
652
|
+
// Check for uppercase 'Query' slot (fallback)
|
|
653
|
+
if (intent.slots.Query && intent.slots.Query.value) {
|
|
654
|
+
return intent.slots.Query.value;
|
|
655
|
+
}
|
|
656
|
+
// Check for WhatsApp 'fullRequest' slot
|
|
657
|
+
if (intent.slots.fullRequest && intent.slots.fullRequest.value) {
|
|
658
|
+
return intent.slots.fullRequest.value;
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
// Fallback to request body
|
|
663
|
+
return request.request.intent?.name || 'General query';
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
/**
|
|
667
|
+
* Detect language using Google Translate API with intelligent fallback
|
|
668
|
+
*/
|
|
669
|
+
async detectLanguageWithFallback(text) {
|
|
670
|
+
try {
|
|
671
|
+
// Try Google Translate API first
|
|
672
|
+
console.log(`[Alexa] Attempting Google Translate API detection...`);
|
|
673
|
+
const googleResult = await this.googleTranslateClient.detectLanguage(text);
|
|
674
|
+
|
|
675
|
+
console.log(`[Alexa] Google Translate detection successful: ${googleResult.language} (confidence: ${(googleResult.confidence * 100).toFixed(1)}%)`);
|
|
676
|
+
|
|
677
|
+
// Return enhanced result with metadata
|
|
678
|
+
return {
|
|
679
|
+
language: googleResult.language,
|
|
680
|
+
confidence: googleResult.confidence,
|
|
681
|
+
method: 'google',
|
|
682
|
+
processingTime: googleResult.processingTime || 0,
|
|
683
|
+
isTransliterated: googleResult.isTransliterated || false,
|
|
684
|
+
googleLanguage: googleResult.googleLanguage || googleResult.language,
|
|
685
|
+
enhancedDetails: null
|
|
686
|
+
};
|
|
687
|
+
|
|
688
|
+
} catch (error) {
|
|
689
|
+
console.log(`[Alexa] Google Translate API failed: ${error.message}`);
|
|
690
|
+
console.log(`[Alexa] Falling back to enhanced language detector...`);
|
|
691
|
+
|
|
692
|
+
// Fallback to enhanced detector
|
|
693
|
+
try {
|
|
694
|
+
const enhancedResult = await this.sarvamClient.enhancedDetector.detectLanguage(text);
|
|
695
|
+
|
|
696
|
+
return {
|
|
697
|
+
language: enhancedResult.language,
|
|
698
|
+
confidence: enhancedResult.confidence,
|
|
699
|
+
method: 'enhanced_fallback',
|
|
700
|
+
processingTime: enhancedResult.details?.processingTime || 'unknown',
|
|
701
|
+
isTransliterated: enhancedResult.isMixedLanguage || false,
|
|
702
|
+
googleLanguage: null,
|
|
703
|
+
enhancedDetails: enhancedResult
|
|
704
|
+
};
|
|
705
|
+
} catch (fallbackError) {
|
|
706
|
+
console.error(`[Alexa] Both detection methods failed: ${fallbackError.message}`);
|
|
707
|
+
|
|
708
|
+
// Ultimate fallback to basic detection
|
|
709
|
+
return {
|
|
710
|
+
language: 'en',
|
|
711
|
+
confidence: 0.5,
|
|
712
|
+
method: 'ultimate_fallback',
|
|
713
|
+
processingTime: 0,
|
|
714
|
+
isTransliterated: false,
|
|
715
|
+
googleLanguage: null,
|
|
716
|
+
enhancedDetails: null,
|
|
717
|
+
error: fallbackError.message
|
|
718
|
+
};
|
|
719
|
+
}
|
|
720
|
+
}
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
/**
|
|
724
|
+
* Translate using Sarvam API with fallback to alternative services
|
|
725
|
+
*/
|
|
726
|
+
async translateWithSarvamOrFallback(text, sourceLang) {
|
|
727
|
+
try {
|
|
728
|
+
// Primary: Use Sarvam API for translation
|
|
729
|
+
console.log(`[Alexa] Attempting Sarvam API translation from ${sourceLang} to English...`);
|
|
730
|
+
|
|
731
|
+
const translated = await this.sarvamClient.translateToEnglish(text, sourceLang);
|
|
732
|
+
console.log(`[Alexa] Sarvam API translation successful`);
|
|
733
|
+
|
|
734
|
+
return translated;
|
|
735
|
+
|
|
736
|
+
} catch (sarvamError) {
|
|
737
|
+
console.warn(`[Alexa] Sarvam API translation failed: ${sarvamError.message}`);
|
|
738
|
+
console.log(`[Alexa] Falling back to alternative translation services...`);
|
|
739
|
+
|
|
740
|
+
// Secondary: Try LibreTranslate
|
|
741
|
+
try {
|
|
742
|
+
const libreTranslated = await this.translateWithLibreTranslate(text, sourceLang, 'en');
|
|
743
|
+
console.log(`[Alexa] LibreTranslate fallback successful`);
|
|
744
|
+
return libreTranslated;
|
|
745
|
+
} catch (libreError) {
|
|
746
|
+
console.warn(`[Alexa] LibreTranslate failed: ${libreError.message}`);
|
|
747
|
+
|
|
748
|
+
// Tertiary: Try MyMemory
|
|
749
|
+
try {
|
|
750
|
+
const memoryTranslated = await this.translateWithMyMemory(text, sourceLang, 'en');
|
|
751
|
+
console.log(`[Alexa] MyMemory fallback successful`);
|
|
752
|
+
return memoryTranslated;
|
|
753
|
+
} catch (memoryError) {
|
|
754
|
+
console.error(`[Alexa] All translation services failed: ${memoryError.message}`);
|
|
755
|
+
// Ultimate fallback: Return original text
|
|
756
|
+
return text;
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
/**
|
|
763
|
+
* Translate using LibreTranslate
|
|
764
|
+
*/
|
|
765
|
+
async translateWithLibreTranslate(text, source, target) {
|
|
766
|
+
const response = await fetch('https://libretranslate.de/translate', {
|
|
767
|
+
method: 'POST',
|
|
768
|
+
headers: {
|
|
769
|
+
'Content-Type': 'application/json',
|
|
770
|
+
},
|
|
771
|
+
body: JSON.stringify({
|
|
772
|
+
q: text,
|
|
773
|
+
source: source === 'hi' ? 'hi' : source === 'bn' ? 'bn' : 'en',
|
|
774
|
+
target: target === 'en' ? 'en' : 'en',
|
|
775
|
+
format: 'text'
|
|
776
|
+
})
|
|
777
|
+
});
|
|
778
|
+
|
|
779
|
+
if (!response.ok) {
|
|
780
|
+
throw new Error(`LibreTranslate error: ${response.status}`);
|
|
781
|
+
}
|
|
782
|
+
|
|
783
|
+
const data = await response.json();
|
|
784
|
+
return data.translatedText || text;
|
|
785
|
+
}
|
|
786
|
+
|
|
787
|
+
/**
|
|
788
|
+
* Translate using MyMemory
|
|
789
|
+
*/
|
|
790
|
+
async translateWithMyMemory(text, source, target) {
|
|
791
|
+
const response = await fetch('https://api.mymemory.translated.net/objs', {
|
|
792
|
+
method: 'POST',
|
|
793
|
+
headers: {
|
|
794
|
+
'Content-Type': 'application/json',
|
|
795
|
+
},
|
|
796
|
+
body: JSON.stringify({
|
|
797
|
+
source: source === 'hi' ? 'hi' : source === 'bn' ? 'bn' : 'en',
|
|
798
|
+
target: target === 'en' ? 'en' : 'en',
|
|
799
|
+
input: text,
|
|
800
|
+
format: 'text'
|
|
801
|
+
})
|
|
802
|
+
});
|
|
803
|
+
|
|
804
|
+
if (!response.ok) {
|
|
805
|
+
throw new Error(`MyMemory error: ${response.status}`);
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
const data = await response.json();
|
|
809
|
+
return data.responseData.translatedText || text;
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
/**
|
|
813
|
+
* Format AI response for Alexa
|
|
814
|
+
*/
|
|
815
|
+
formatAlexaResponse(aiResponse) {
|
|
816
|
+
let text;
|
|
817
|
+
|
|
818
|
+
// Handle different response formats
|
|
819
|
+
if (typeof aiResponse === 'string') {
|
|
820
|
+
text = aiResponse;
|
|
821
|
+
} else if (aiResponse.content) {
|
|
822
|
+
text = aiResponse.content;
|
|
823
|
+
} else if (aiResponse.responses && aiResponse.responses.length > 0) {
|
|
824
|
+
// Use best response from parallel execution
|
|
825
|
+
const bestResponse = aiResponse.responses[0];
|
|
826
|
+
text = bestResponse.content || bestResponse;
|
|
827
|
+
} else {
|
|
828
|
+
text = "I apologize, but I couldn't process that request.";
|
|
829
|
+
}
|
|
830
|
+
|
|
831
|
+
// Clean up text for voice output
|
|
832
|
+
text = this.cleanTextForAlexa(text);
|
|
833
|
+
|
|
834
|
+
// Build Alexa response
|
|
835
|
+
const response = {
|
|
836
|
+
version: "1.0",
|
|
837
|
+
response: {
|
|
838
|
+
outputSpeech: this.buildSpeech(text),
|
|
839
|
+
shouldEndSession: true
|
|
840
|
+
}
|
|
841
|
+
};
|
|
842
|
+
|
|
843
|
+
return response;
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
/**
|
|
847
|
+
* Build speech output (with or without voice)
|
|
848
|
+
*/
|
|
849
|
+
buildSpeech(text) {
|
|
850
|
+
const escapedText = this.escapeXml(text);
|
|
851
|
+
|
|
852
|
+
if (this.voiceConfig && this.voiceConfig.voice) {
|
|
853
|
+
return {
|
|
854
|
+
type: 'SSML',
|
|
855
|
+
ssml: `<speak><voice name="${this.voiceConfig.voice}">${escapedText}</voice></speak>`
|
|
856
|
+
};
|
|
857
|
+
}
|
|
858
|
+
|
|
859
|
+
return {
|
|
860
|
+
type: 'PlainText',
|
|
861
|
+
text: text
|
|
862
|
+
};
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
/**
|
|
866
|
+
* Clean text for Alexa speech
|
|
867
|
+
*/
|
|
868
|
+
cleanTextForAlexa(text) {
|
|
869
|
+
return text
|
|
870
|
+
.replace(/```[\s\S]*?```/g, 'See code for details.') // Remove code blocks
|
|
871
|
+
.replace(/\*\*([^*]+)\*\*/g, '$1') // Remove bold markdown
|
|
872
|
+
.replace(/\*([^*]+)\*/g, '$1') // Remove italic markdown
|
|
873
|
+
.replace(/#{1,6}\s/g, '') // Remove headers
|
|
874
|
+
.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1') // Remove links
|
|
875
|
+
.replace(/\n{3,}/g, '\n\n') // Reduce excessive newlines
|
|
876
|
+
.trim();
|
|
877
|
+
}
|
|
878
|
+
|
|
879
|
+
/**
|
|
880
|
+
* Escape XML for SSML
|
|
881
|
+
*/
|
|
882
|
+
escapeXml(text) {
|
|
883
|
+
return text
|
|
884
|
+
.replace(/&/g, '&')
|
|
885
|
+
.replace(/</g, '<')
|
|
886
|
+
.replace(/>/g, '>')
|
|
887
|
+
.replace(/"/g, '"')
|
|
888
|
+
.replace(/'/g, ''');
|
|
889
|
+
}
|
|
890
|
+
|
|
891
|
+
/**
|
|
892
|
+
* Handle errors with enhanced recovery logic
|
|
893
|
+
*/
|
|
894
|
+
handleError(error) {
|
|
895
|
+
console.error('[Alexa] Error:', error);
|
|
896
|
+
|
|
897
|
+
// Determine error type and recovery strategy
|
|
898
|
+
let recoveryStrategy = this.determineRecoveryStrategy(error);
|
|
899
|
+
|
|
900
|
+
switch (recoveryStrategy) {
|
|
901
|
+
case 'translation_error':
|
|
902
|
+
return this.handleTranslationError(error);
|
|
903
|
+
|
|
904
|
+
case 'ai_error':
|
|
905
|
+
return this.handleAIError(error);
|
|
906
|
+
|
|
907
|
+
case 'network_error':
|
|
908
|
+
return this.handleNetworkError(error);
|
|
909
|
+
|
|
910
|
+
case 'user_input_error':
|
|
911
|
+
return this.handleUserInputError(error);
|
|
912
|
+
|
|
913
|
+
case 'system_error':
|
|
914
|
+
return this.handleSystemError(error);
|
|
915
|
+
|
|
916
|
+
default:
|
|
917
|
+
return this.handleGenericError(error);
|
|
918
|
+
}
|
|
919
|
+
}
|
|
920
|
+
|
|
921
|
+
/**
|
|
922
|
+
* Determine recovery strategy based on error type
|
|
923
|
+
*/
|
|
924
|
+
determineRecoveryStrategy(error) {
|
|
925
|
+
const errorMessage = error.message?.toLowerCase() || error.toString().toLowerCase();
|
|
926
|
+
|
|
927
|
+
// AI API errors (check these first to avoid catching "api" in translation errors)
|
|
928
|
+
if (errorMessage.includes('ai') ||
|
|
929
|
+
errorMessage.includes('openai') ||
|
|
930
|
+
errorMessage.includes('anthropic') ||
|
|
931
|
+
errorMessage.includes('claude') ||
|
|
932
|
+
errorMessage.includes('google') ||
|
|
933
|
+
errorMessage.includes('openrouter') ||
|
|
934
|
+
errorMessage.includes('cerebras') ||
|
|
935
|
+
errorMessage.includes('groq') ||
|
|
936
|
+
errorMessage.includes('model')) {
|
|
937
|
+
return 'ai_error';
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
// Translation errors
|
|
941
|
+
if (errorMessage.includes('translation') ||
|
|
942
|
+
errorMessage.includes('sarvam') ||
|
|
943
|
+
errorMessage.includes('translate')) {
|
|
944
|
+
return 'translation_error';
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
// Generic API errors (only if not already classified)
|
|
948
|
+
if (errorMessage.includes('api')) {
|
|
949
|
+
return 'system_error';
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
// Network errors
|
|
953
|
+
if (errorMessage.includes('network') ||
|
|
954
|
+
errorMessage.includes('connection') ||
|
|
955
|
+
errorMessage.includes('timeout') ||
|
|
956
|
+
errorMessage.includes('fetch') ||
|
|
957
|
+
errorMessage.includes('econnrefused')) {
|
|
958
|
+
return 'network_error';
|
|
959
|
+
}
|
|
960
|
+
|
|
961
|
+
// User input errors
|
|
962
|
+
if (errorMessage.includes('invalid') ||
|
|
963
|
+
errorMessage.includes('undefined') ||
|
|
964
|
+
errorMessage.includes('null') ||
|
|
965
|
+
errorMessage.includes('format')) {
|
|
966
|
+
return 'user_input_error';
|
|
967
|
+
}
|
|
968
|
+
|
|
969
|
+
// System errors
|
|
970
|
+
return 'system_error';
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
/**
|
|
974
|
+
* Handle translation errors with user-friendly messages
|
|
975
|
+
*/
|
|
976
|
+
handleTranslationError(error) {
|
|
977
|
+
console.log('[Alexa] Translation error detected, activating recovery...');
|
|
978
|
+
|
|
979
|
+
const errorMessages = [
|
|
980
|
+
"I'm having trouble translating that right now. Let me try a different approach.",
|
|
981
|
+
"Translation service is temporarily unavailable. I'll use my fallback system.",
|
|
982
|
+
"I can process your request, but I'm having translation issues.",
|
|
983
|
+
"Let me help you with that using English instead."
|
|
984
|
+
];
|
|
985
|
+
|
|
986
|
+
const userMessage = errorMessages[Math.floor(Math.random() * errorMessages.length)];
|
|
987
|
+
|
|
988
|
+
return {
|
|
989
|
+
version: "1.0",
|
|
990
|
+
response: {
|
|
991
|
+
outputSpeech: {
|
|
992
|
+
type: 'PlainText',
|
|
993
|
+
text: userMessage
|
|
994
|
+
},
|
|
995
|
+
shouldEndSession: false,
|
|
996
|
+
card: {
|
|
997
|
+
type: 'Simple',
|
|
998
|
+
title: 'Translation Service Issue',
|
|
999
|
+
content: userMessage + " I'm working to improve this experience."
|
|
1000
|
+
}
|
|
1001
|
+
}
|
|
1002
|
+
};
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
/**
|
|
1006
|
+
* Handle AI API errors with fallback
|
|
1007
|
+
*/
|
|
1008
|
+
handleAIError(error) {
|
|
1009
|
+
console.log('[Alexa] AI API error detected, activating fallback...');
|
|
1010
|
+
|
|
1011
|
+
const fallbackResponses = [
|
|
1012
|
+
"I'm having some technical difficulties right now. Let me try to help you differently.",
|
|
1013
|
+
"Let me provide you with some general information instead.",
|
|
1014
|
+
"I'm working on improving my responses for you.",
|
|
1015
|
+
"Thank you for your patience while I resolve this issue."
|
|
1016
|
+
];
|
|
1017
|
+
|
|
1018
|
+
const fallbackMessage = fallbackResponses[Math.floor(Math.random() * fallbackResponses.length)];
|
|
1019
|
+
|
|
1020
|
+
return {
|
|
1021
|
+
version: "1.0",
|
|
1022
|
+
response: {
|
|
1023
|
+
outputSpeech: {
|
|
1024
|
+
type: 'PlainText',
|
|
1025
|
+
text: fallbackMessage
|
|
1026
|
+
},
|
|
1027
|
+
shouldEndSession: false,
|
|
1028
|
+
card: {
|
|
1029
|
+
type: 'Simple',
|
|
1030
|
+
title: 'Technical Issue',
|
|
1031
|
+
content: fallbackMessage + " I'm experiencing some technical challenges."
|
|
1032
|
+
}
|
|
1033
|
+
}
|
|
1034
|
+
};
|
|
1035
|
+
}
|
|
1036
|
+
|
|
1037
|
+
/**
|
|
1038
|
+
* Handle network errors with retry logic
|
|
1039
|
+
*/
|
|
1040
|
+
handleNetworkError(error) {
|
|
1041
|
+
console.log('[Alexa] Network error detected, implementing retry logic...');
|
|
1042
|
+
|
|
1043
|
+
const retryMessages = [
|
|
1044
|
+
"I'm having trouble connecting right now. Let me try again.",
|
|
1045
|
+
"Network connection seems unstable. Please try again in a moment.",
|
|
1046
|
+
"I'm experiencing some connectivity issues. Let me help you with what I can do."
|
|
1047
|
+
];
|
|
1048
|
+
|
|
1049
|
+
const retryMessage = retryMessages[Math.floor(Math.random() * retryMessages.length)];
|
|
1050
|
+
|
|
1051
|
+
return {
|
|
1052
|
+
version: "1.0",
|
|
1053
|
+
response: {
|
|
1054
|
+
outputSpeech: {
|
|
1055
|
+
type: 'PlainText',
|
|
1056
|
+
text: retryMessage
|
|
1057
|
+
},
|
|
1058
|
+
shouldEndSession: false,
|
|
1059
|
+
card: {
|
|
1060
|
+
type: 'Simple',
|
|
1061
|
+
title: 'Connection Issue',
|
|
1062
|
+
content: retryMessage + " I'll keep trying to provide the best service possible."
|
|
1063
|
+
}
|
|
1064
|
+
}
|
|
1065
|
+
};
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
/**
|
|
1069
|
+
* Handle user input errors with helpful guidance
|
|
1070
|
+
*/
|
|
1071
|
+
handleUserInputError(error) {
|
|
1072
|
+
console.log('[Alexa] User input error, providing guidance...');
|
|
1073
|
+
|
|
1074
|
+
const guidanceMessages = [
|
|
1075
|
+
"I didn't quite catch that. Could you please say it differently?",
|
|
1076
|
+
"I'm having some trouble understanding. Could you rephrase that?",
|
|
1077
|
+
"Let's try a different approach. What would you like to know about?",
|
|
1078
|
+
"I want to make sure I'm giving you the best help possible."
|
|
1079
|
+
];
|
|
1080
|
+
|
|
1081
|
+
const guidanceMessage = guidanceMessages[Math.floor(Math.random() * guidanceMessages.length)];
|
|
1082
|
+
|
|
1083
|
+
return {
|
|
1084
|
+
version: "1.0",
|
|
1085
|
+
response: {
|
|
1086
|
+
outputSpeech: {
|
|
1087
|
+
type: 'PlainText',
|
|
1088
|
+
text: guidanceMessage
|
|
1089
|
+
},
|
|
1090
|
+
shouldEndSession: false,
|
|
1091
|
+
card: {
|
|
1092
|
+
type: 'Simple',
|
|
1093
|
+
title: 'Understanding Issue',
|
|
1094
|
+
content: guidanceMessage + " I'm here to help you in the best way I can."
|
|
1095
|
+
}
|
|
1096
|
+
}
|
|
1097
|
+
};
|
|
1098
|
+
}
|
|
1099
|
+
|
|
1100
|
+
/**
|
|
1101
|
+
* Handle system errors gracefully
|
|
1102
|
+
*/
|
|
1103
|
+
handleSystemError(error) {
|
|
1104
|
+
console.log('[Alexa] System error, using graceful degradation...');
|
|
1105
|
+
|
|
1106
|
+
const gracefulMessages = [
|
|
1107
|
+
"I'm experiencing some technical difficulties. Thank you for your patience.",
|
|
1108
|
+
"My systems are working hard to serve you better.",
|
|
1109
|
+
"I apologize for the inconvenience. I'm improving my services.",
|
|
1110
|
+
"Thank you for your understanding as I work through this technical challenge."
|
|
1111
|
+
];
|
|
1112
|
+
|
|
1113
|
+
const gracefulMessage = gracefulMessages[Math.floor(Math.random() * gracefulMessages.length)];
|
|
1114
|
+
|
|
1115
|
+
return {
|
|
1116
|
+
version: "1.0",
|
|
1117
|
+
response: {
|
|
1118
|
+
outputSpeech: {
|
|
1119
|
+
type: 'PlainText',
|
|
1120
|
+
text: gracefulMessage
|
|
1121
|
+
},
|
|
1122
|
+
shouldEndSession: false,
|
|
1123
|
+
card: {
|
|
1124
|
+
type: 'Simple',
|
|
1125
|
+
title: 'Service Improvement',
|
|
1126
|
+
content: gracefulMessage + " I'm continuously working to enhance your experience."
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
};
|
|
1130
|
+
}
|
|
1131
|
+
|
|
1132
|
+
/**
|
|
1133
|
+
* Handle generic errors with fallback
|
|
1134
|
+
*/
|
|
1135
|
+
handleGenericError(error) {
|
|
1136
|
+
console.error('[Alexa] Unhandled error:', error);
|
|
1137
|
+
|
|
1138
|
+
return {
|
|
1139
|
+
version: "1.0",
|
|
1140
|
+
response: {
|
|
1141
|
+
outputSpeech: {
|
|
1142
|
+
type: 'PlainText',
|
|
1143
|
+
text: "I'm sorry, but I encountered an unexpected issue. Please try again."
|
|
1144
|
+
},
|
|
1145
|
+
shouldEndSession: true
|
|
1146
|
+
}
|
|
1147
|
+
};
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
/**
|
|
1151
|
+
* Track detection method usage statistics
|
|
1152
|
+
*/
|
|
1153
|
+
trackDetectionMethod(method) {
|
|
1154
|
+
this.detectionMethodStats.total++;
|
|
1155
|
+
|
|
1156
|
+
if (method === 'google') {
|
|
1157
|
+
this.detectionMethodStats.google++;
|
|
1158
|
+
} else if (method === 'enhanced_fallback') {
|
|
1159
|
+
this.detectionMethodStats.enhanced++;
|
|
1160
|
+
} else {
|
|
1161
|
+
this.detectionMethodStats.fallback++;
|
|
1162
|
+
}
|
|
1163
|
+
|
|
1164
|
+
// Log statistics periodically
|
|
1165
|
+
if (this.detectionMethodStats.total % 10 === 0) {
|
|
1166
|
+
console.log('[Alexa] Detection method statistics:', this.getDetectionMethodStats());
|
|
1167
|
+
}
|
|
1168
|
+
}
|
|
1169
|
+
|
|
1170
|
+
/**
|
|
1171
|
+
* Get detection method statistics
|
|
1172
|
+
*/
|
|
1173
|
+
getDetectionMethodStats() {
|
|
1174
|
+
const stats = { ...this.detectionMethodStats };
|
|
1175
|
+
const total = stats.total || 1; // Avoid division by zero
|
|
1176
|
+
|
|
1177
|
+
stats.googlePercentage = ((stats.google / total) * 100).toFixed(1) + '%';
|
|
1178
|
+
stats.enhancedPercentage = ((stats.enhanced / total) * 100).toFixed(1) + '%';
|
|
1179
|
+
stats.fallbackPercentage = ((stats.fallback / total) * 100).toFixed(1) + '%';
|
|
1180
|
+
|
|
1181
|
+
return stats;
|
|
1182
|
+
}
|
|
1183
|
+
|
|
1184
|
+
/**
|
|
1185
|
+
* Get comprehensive language detection performance metrics
|
|
1186
|
+
*/
|
|
1187
|
+
getLanguageDetectionMetrics() {
|
|
1188
|
+
return {
|
|
1189
|
+
googleTranslate: this.googleTranslateClient.getPerformanceMetrics(),
|
|
1190
|
+
enhancedDetector: this.sarvamClient.enhancedDetector.getPerformanceMetrics(),
|
|
1191
|
+
methodStatistics: this.getDetectionMethodStats(),
|
|
1192
|
+
overallSuccessRate: this.calculateOverallSuccessRate()
|
|
1193
|
+
};
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
/**
|
|
1197
|
+
* Calculate overall success rate across detection methods
|
|
1198
|
+
*/
|
|
1199
|
+
calculateOverallSuccessRate() {
|
|
1200
|
+
const googleMetrics = this.googleTranslateClient.getMetrics();
|
|
1201
|
+
const totalDetections = googleMetrics.totalDetections;
|
|
1202
|
+
const totalSuccesses = googleMetrics.totalSuccesses;
|
|
1203
|
+
|
|
1204
|
+
if (totalDetections > 0) {
|
|
1205
|
+
return ((totalSuccesses / totalDetections) * 100).toFixed(1) + '%';
|
|
1206
|
+
}
|
|
1207
|
+
return '0%';
|
|
1208
|
+
}
|
|
1209
|
+
|
|
1210
|
+
/**
|
|
1211
|
+
* Reset detection statistics
|
|
1212
|
+
*/
|
|
1213
|
+
resetDetectionStats() {
|
|
1214
|
+
this.detectionMethodStats = {
|
|
1215
|
+
google: 0,
|
|
1216
|
+
enhanced: 0,
|
|
1217
|
+
fallback: 0,
|
|
1218
|
+
total: 0
|
|
1219
|
+
};
|
|
1220
|
+
this.googleTranslateClient.resetMetrics();
|
|
1221
|
+
this.sarvamClient.enhancedDetector.resetMetrics();
|
|
1222
|
+
console.log('[Alexa] Detection statistics reset');
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
/**
|
|
1226
|
+
* Close connection
|
|
1227
|
+
*/
|
|
1228
|
+
close() {
|
|
1229
|
+
console.log('[Alexa] Shutting down TMLPD-free Alexa handler...');
|
|
1230
|
+
this.resetDetectionStats();
|
|
1231
|
+
}
|
|
1232
|
+
}
|
|
1233
|
+
|
|
1234
|
+
module.exports = AlexaRequestHandlerNoTMLPD;
|