@anh3d0nic/ice 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,40 @@
1
+ /**
2
+ * ✦ ICE v4.3 — Qwen Code Termux
3
+ * Persistent AI Brain with Multi-Provider Support
4
+ *
5
+ * By @anh3d0nic
6
+ * GitHub: github.com/anh3d0nic/qwen-code-termux-ice
7
+ */
8
+
9
+ // Core engine
10
+ export { ICEEngine } from './engine.js';
11
+
12
+ // Intent decoding
13
+ export { IntentDecoder } from './intent-decoder.js';
14
+
15
+ // Multi-provider API client
16
+ export { MultiProviderClient } from './multi-provider-client.js';
17
+ export type { ProviderConfig, ChatMessage, ChatResponse } from './multi-provider-client.js';
18
+
19
+ // D20 learning
20
+ export { D20Learner } from './d20-learner.js';
21
+ export type { LearningPattern, InteractionRecord } from './d20-learner.js';
22
+
23
+ // Agent crews
24
+ export { AgentCrews } from './agent-crews.js';
25
+ export type { Agent, Crew } from './agent-crews.js';
26
+ export { CrewType, AgentStatus } from './agent-crews.js';
27
+
28
+ // Types
29
+ export type {
30
+ IntentResult,
31
+ Entity,
32
+ ICEConfig,
33
+ ProcessingResult
34
+ } from './types.js';
35
+
36
+ export { IntentType } from './types.js';
37
+
38
+ // Version info
39
+ export const VERSION = '4.3.0-termux';
40
+ export const AUTHOR = '@anh3d0nic';
@@ -0,0 +1,200 @@
1
+ /**
2
+ * ICE v4.3 — Intent Decoder
3
+ * LLM-centric intent classification with few-shot learning
4
+ */
5
+
6
+ import { MultiProviderClient, ChatMessage } from './multi-provider-client.js';
7
+ import type { IntentResult } from './types.js';
8
+
9
+ const DEFAULT_EXAMPLES = [
10
+ { user: 'What is Python?', intent: 'question', confidence: 0.95 },
11
+ { user: 'Write a function to sort', intent: 'code_request', confidence: 0.92 },
12
+ { user: 'Does stress cause illness?', intent: 'causal', confidence: 0.90 },
13
+ { user: 'How to install Docker?', intent: 'how_to', confidence: 0.93 },
14
+ { user: 'Compare MySQL vs PostgreSQL', intent: 'comparison', confidence: 0.91 },
15
+ { user: 'Calculate 15 * 3 + 24 / 4', intent: 'math_problem', confidence: 0.94 }
16
+ ];
17
+
18
+ export class IntentDecoder {
19
+ private api: MultiProviderClient;
20
+ private examples: Array<{ user: string; intent: string; confidence: number }>;
21
+
22
+ constructor(api: MultiProviderClient) {
23
+ this.api = api;
24
+ this.examples = [...DEFAULT_EXAMPLES];
25
+ }
26
+
27
+ async decode(message: string, context?: string): Promise<IntentResult> {
28
+ const providers = this.api.getAvailableProviders();
29
+
30
+ if (providers.length > 0) {
31
+ return await this._decodeAPI(message, context, providers[0]);
32
+ }
33
+
34
+ return this._decodeFallback(message, context);
35
+ }
36
+
37
+ private async _decodeAPI(
38
+ message: string,
39
+ context: string | undefined,
40
+ provider: string
41
+ ): Promise<IntentResult> {
42
+ const examplesText = this.examples.slice(0, 5).map(
43
+ ex => `User: "${ex.user}" → Intent: ${ex.intent} (${(ex.confidence * 100).toFixed(0)}%)`
44
+ ).join('\n');
45
+
46
+ let prompt = `You are an intent classifier. Output ONLY JSON.
47
+
48
+ Examples:
49
+ ${examplesText}
50
+
51
+ Classify this:
52
+ User: "${message}"
53
+
54
+ Output JSON:
55
+ {
56
+ "primary_intent": "one of: question, code_request, math_problem, causal, how_to, comparison, command, creative, opinion, unknown",
57
+ "secondary_intents": [],
58
+ "confidence": 0.0-1.0,
59
+ "entities": {"key": "value"},
60
+ "rephrased_request": "clear version",
61
+ "needs_clarification": true/false,
62
+ "clarifying_questions": ["list"] or null,
63
+ "reasoning": "brief explanation"
64
+ }`;
65
+
66
+ if (context) {
67
+ prompt += `\nContext: ${context}`;
68
+ }
69
+
70
+ const messages: ChatMessage[] = [{ role: 'user', content: prompt }];
71
+ const response = await this.api.chat(messages, provider, undefined, 0.1, 500);
72
+
73
+ if (response?.content) {
74
+ try {
75
+ const jsonStart = response.content.indexOf('{');
76
+ const jsonEnd = response.content.lastIndexOf('}') + 1;
77
+
78
+ if (jsonStart >= 0 && jsonEnd > jsonStart) {
79
+ const data = JSON.parse(response.content.slice(jsonStart, jsonEnd));
80
+
81
+ if (data.confidence >= 0.8) {
82
+ this._storeExample(message, data);
83
+ }
84
+
85
+ return {
86
+ primary_intent: data.primary_intent || 'unknown',
87
+ secondary_intents: data.secondary_intents || [],
88
+ confidence: data.confidence || 0.5,
89
+ entities: data.entities || {},
90
+ rephrased_request: data.rephrased_request || message,
91
+ needs_clarification: data.needs_clarification || false,
92
+ clarifying_questions: data.clarifying_questions || null,
93
+ reasoning: data.reasoning || ''
94
+ };
95
+ }
96
+ } catch {
97
+ // Fall through to fallback
98
+ }
99
+ }
100
+
101
+ return this._decodeFallback(message, context);
102
+ }
103
+
104
+ private _decodeFallback(message: string, _context?: string): IntentResult {
105
+ const msgLower = message.toLowerCase();
106
+ const patterns: Record<string, string[]> = {
107
+ code_request: [
108
+ '\\b(write|create|build|code|function)\\b',
109
+ '\\b(python|javascript|java|react)\\b'
110
+ ],
111
+ question: [
112
+ '\\b(what|who|when|where|why|how)\\b.*\\b(\\?|is|are)\\b'
113
+ ],
114
+ causal: [
115
+ '\\b(cause|causes|effect|leads to)\\b',
116
+ '\\bwhy does\\b'
117
+ ],
118
+ how_to: [
119
+ '\\bhow (do|to|can)\\b',
120
+ '\\b(install|setup|configure)\\b'
121
+ ],
122
+ comparison: [
123
+ '\\b(compare|vs|versus|better)\\b'
124
+ ],
125
+ math_problem: [
126
+ '\\b(calculate|compute|solve)\\b',
127
+ '\\d+\\s*[\\+\\-\\*\\/]\\s*\\d+'
128
+ ]
129
+ };
130
+
131
+ const scores: Record<string, number> = {};
132
+
133
+ for (const [intent, patternList] of Object.entries(patterns)) {
134
+ for (const pattern of patternList) {
135
+ if (new RegExp(pattern).test(msgLower)) {
136
+ scores[intent] = (scores[intent] || 0) + 0.3;
137
+ }
138
+ }
139
+ }
140
+
141
+ if (Object.keys(scores).length === 0) {
142
+ return {
143
+ primary_intent: 'unknown',
144
+ secondary_intents: [],
145
+ confidence: 0.3,
146
+ entities: {},
147
+ rephrased_request: message,
148
+ needs_clarification: true,
149
+ clarifying_questions: ['Could you clarify?'],
150
+ reasoning: 'No pattern matched'
151
+ };
152
+ }
153
+
154
+ const primary = Object.entries(scores).sort((a, b) => b[1] - a[1])[0][0];
155
+ const confidence = Math.min(0.9, scores[primary]);
156
+
157
+ return {
158
+ primary_intent: primary,
159
+ secondary_intents: [],
160
+ confidence,
161
+ entities: this._extractEntities(message),
162
+ rephrased_request: message,
163
+ needs_clarification: confidence < 0.7,
164
+ clarifying_questions: confidence < 0.7 ? ['Could you provide more details?'] : null,
165
+ reasoning: `Matched ${primary} pattern`
166
+ };
167
+ }
168
+
169
+ private _extractEntities(message: string): Record<string, string[]> {
170
+ const entities: Record<string, string[]> = {};
171
+
172
+ const techs = message.match(/\b(Python|JavaScript|Java|React|Docker|AWS|MySQL|PostgreSQL|MongoDB)\b/gi);
173
+ if (techs) entities.technologies = techs;
174
+
175
+ const nums = message.match(/\d+(?:\.\d+)?/g);
176
+ if (nums) entities.numbers = nums;
177
+
178
+ const times = message.match(/\b(today|tomorrow|yesterday|next week|last month)\b/gi);
179
+ if (times) entities.time = times;
180
+
181
+ return entities;
182
+ }
183
+
184
+ private _storeExample(message: string, data: Record<string, unknown>): void {
185
+ const confidence = (data.confidence as number) || 0;
186
+ if (confidence < 0.8) return;
187
+
188
+ const exists = this.examples.some(
189
+ ex => message.toLowerCase().includes(ex.user.toLowerCase())
190
+ );
191
+
192
+ if (!exists && this.examples.length < 50) {
193
+ this.examples.push({
194
+ user: message,
195
+ intent: (data.primary_intent as string) || 'unknown',
196
+ confidence
197
+ });
198
+ }
199
+ }
200
+ }
@@ -0,0 +1,200 @@
1
+ /**
2
+ * ICE v4.3 — Multi-Provider API Client
3
+ * Supports: Groq, Gemini, Qwen OAuth
4
+ */
5
+
6
+ import axios, { AxiosInstance } from 'axios';
7
+ import dotenv from 'dotenv';
8
+
9
+ dotenv.config();
10
+
11
+ export interface ProviderConfig {
12
+ name: string;
13
+ apiKey?: string;
14
+ baseUrl: string;
15
+ models: {
16
+ primary: string;
17
+ fast: string;
18
+ };
19
+ }
20
+
21
+ export interface ChatMessage {
22
+ role: 'user' | 'assistant' | 'system';
23
+ content: string;
24
+ }
25
+
26
+ export interface ChatResponse {
27
+ content: string;
28
+ model: string;
29
+ provider: string;
30
+ usage?: {
31
+ promptTokens: number;
32
+ completionTokens: number;
33
+ totalTokens: number;
34
+ };
35
+ }
36
+
37
+ export class MultiProviderClient {
38
+ private groqClient: AxiosInstance;
39
+ private geminiClient: AxiosInstance;
40
+ private providers: Map<string, ProviderConfig>;
41
+
42
+ constructor() {
43
+ this.providers = new Map([
44
+ ['groq', {
45
+ name: 'Groq',
46
+ apiKey: process.env.GROQ_API_KEY,
47
+ baseUrl: 'https://api.groq.com/openai/v1',
48
+ models: {
49
+ primary: 'qwen/qwen3-32b',
50
+ fast: 'llama-3.1-8b-instant'
51
+ }
52
+ }],
53
+ ['gemini', {
54
+ name: 'Gemini',
55
+ apiKey: process.env.GEMINI_API_KEY,
56
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta/models',
57
+ models: {
58
+ primary: 'gemini-2.0-flash',
59
+ fast: 'gemini-2.0-flash-lite'
60
+ }
61
+ }]
62
+ ]);
63
+
64
+ this.groqClient = axios.create({
65
+ baseURL: 'https://api.groq.com/openai/v1',
66
+ headers: {
67
+ 'Authorization': `Bearer ${process.env.GROQ_API_KEY}`,
68
+ 'Content-Type': 'application/json'
69
+ },
70
+ timeout: 30000
71
+ });
72
+
73
+ this.geminiClient = axios.create({
74
+ baseURL: 'https://generativelanguage.googleapis.com/v1beta/models',
75
+ timeout: 30000
76
+ });
77
+ }
78
+
79
+ isAvailable(provider: string): boolean {
80
+ const config = this.providers.get(provider);
81
+ return !!config?.apiKey;
82
+ }
83
+
84
+ getAvailableProviders(): string[] {
85
+ const available: string[] = [];
86
+ for (const [name, config] of this.providers.entries()) {
87
+ if (config.apiKey) {
88
+ available.push(name);
89
+ }
90
+ }
91
+ return available;
92
+ }
93
+
94
+ async chat(
95
+ messages: ChatMessage[],
96
+ provider: string = 'groq',
97
+ model?: string,
98
+ temperature: number = 0.4,
99
+ maxTokens: number = 2048
100
+ ): Promise<ChatResponse | null> {
101
+ if (!this.isAvailable(provider)) {
102
+ return null;
103
+ }
104
+
105
+ try {
106
+ if (provider === 'groq') {
107
+ return await this._groqChat(messages, model, temperature, maxTokens);
108
+ } else if (provider === 'gemini') {
109
+ return await this._geminiChat(messages, model, temperature, maxTokens);
110
+ }
111
+ return null;
112
+ } catch (error) {
113
+ console.error(`${provider} API Error:`, error instanceof Error ? error.message : error);
114
+ return null;
115
+ }
116
+ }
117
+
118
+ private async _groqChat(
119
+ messages: ChatMessage[],
120
+ model?: string,
121
+ temperature: number = 0.4,
122
+ maxTokens: number = 2048
123
+ ): Promise<ChatResponse> {
124
+ const config = this.providers.get('groq')!;
125
+ const response = await this.groqClient.post('/chat/completions', {
126
+ model: model || config.models.primary,
127
+ messages,
128
+ temperature,
129
+ max_tokens: maxTokens
130
+ });
131
+
132
+ const data = response.data;
133
+ return {
134
+ content: data.choices[0].message.content,
135
+ model: data.model,
136
+ provider: 'groq',
137
+ usage: {
138
+ promptTokens: data.usage?.prompt_tokens || 0,
139
+ completionTokens: data.usage?.completion_tokens || 0,
140
+ totalTokens: data.usage?.total_tokens || 0
141
+ }
142
+ };
143
+ }
144
+
145
+ private async _geminiChat(
146
+ messages: ChatMessage[],
147
+ model?: string,
148
+ temperature: number = 0.4,
149
+ maxTokens: number = 2048
150
+ ): Promise<ChatResponse> {
151
+ const config = this.providers.get('gemini')!;
152
+ const apiKey = config.apiKey;
153
+ const modelName = model || config.models.primary;
154
+
155
+ const geminiMessages = messages.map(msg => ({
156
+ role: msg.role === 'user' || msg.role === 'system' ? 'user' : 'model',
157
+ parts: [{ text: msg.content }]
158
+ }));
159
+
160
+ const response = await this.geminiClient.post(
161
+ `/${modelName}:generateContent?key=${apiKey}`,
162
+ {
163
+ contents: geminiMessages,
164
+ generationConfig: {
165
+ temperature,
166
+ maxOutputTokens: maxTokens
167
+ }
168
+ }
169
+ );
170
+
171
+ const data = response.data;
172
+ return {
173
+ content: data.candidates[0].content.parts[0].text,
174
+ model: modelName,
175
+ provider: 'gemini'
176
+ };
177
+ }
178
+
179
+ selectBestProvider(intent: string, complexity: string): string {
180
+ const available = this.getAvailableProviders();
181
+ if (available.length === 0) return 'fallback';
182
+
183
+ // Code tasks → Groq
184
+ if (intent === 'code_request') {
185
+ return available.includes('groq') ? 'groq' : available[0];
186
+ }
187
+
188
+ // Complex reasoning → Groq
189
+ if (complexity === 'complex' || complexity === 'critical') {
190
+ return available.includes('groq') ? 'groq' : available[0];
191
+ }
192
+
193
+ // Simple tasks → Fast provider
194
+ if (complexity === 'simple') {
195
+ return available.includes('gemini') ? 'gemini' : available[0];
196
+ }
197
+
198
+ return available[0];
199
+ }
200
+ }
@@ -0,0 +1,166 @@
1
+ /**
2
+ * ICE v4.3 — Chain of Thought Reasoning
3
+ * Step-by-step reasoning with confidence tracking
4
+ *
5
+ * Based on: "Chain-of-Thought Prompting Elicits Reasoning in LLMs" (Wei et al.)
6
+ */
7
+
8
+ import type { ReasoningStep, ReasoningConfig } from './types.js';
9
+
10
+ const DEFAULT_CONFIG: ReasoningConfig = {
11
+ maxSteps: 10,
12
+ maxBranches: 3,
13
+ minConfidence: 0.7,
14
+ enableSelfConsistency: true,
15
+ enableCritique: true
16
+ };
17
+
18
+ export class ChainOfThought {
19
+ private steps: ReasoningStep[] = [];
20
+ private config: ReasoningConfig;
21
+
22
+ constructor(config?: Partial<ReasoningConfig>) {
23
+ this.config = { ...DEFAULT_CONFIG, ...config };
24
+ }
25
+
26
+ /**
27
+ * Reason through a problem step-by-step
28
+ */
29
+ async reason(problem: string, context?: string): Promise<ReasoningStep[]> {
30
+ this.steps = [];
31
+
32
+ // Step 1: Understand the problem
33
+ this.addStep({
34
+ id: 1,
35
+ thought: `Understanding: ${problem}`,
36
+ confidence: 1.0,
37
+ evidence: context ? [context] : []
38
+ });
39
+
40
+ // Step 2: Break down into sub-problems
41
+ const subProblems = await this._decompose(problem);
42
+ this.addStep({
43
+ id: 2,
44
+ thought: `Decomposition: ${subProblems.length} sub-problems identified`,
45
+ confidence: 0.9,
46
+ evidence: subProblems
47
+ });
48
+
49
+ // Step 3: Solve each sub-problem
50
+ for (let i = 0; i < subProblems.length && this.steps.length < this.config.maxSteps; i++) {
51
+ const solution = await this._solveSubProblem(subProblems[i], context);
52
+ this.addStep({
53
+ id: this.steps.length + 1,
54
+ thought: `Solution ${i + 1}: ${solution}`,
55
+ confidence: 0.8,
56
+ evidence: [subProblems[i]]
57
+ });
58
+ }
59
+
60
+ // Step 4: Synthesize final answer
61
+ const synthesis = await this._synthesize(problem);
62
+ this.addStep({
63
+ id: this.steps.length + 1,
64
+ thought: `Synthesis: ${synthesis}`,
65
+ confidence: this._calculateFinalConfidence(),
66
+ evidence: this.steps.map(s => s.thought)
67
+ });
68
+
69
+ // Step 5: Self-consistency check (run multiple times)
70
+ if (this.config.enableSelfConsistency) {
71
+ const alternative = await this.reason(problem, context);
72
+ const consistency = this._checkConsistency(alternative);
73
+ this.addStep({
74
+ id: this.steps.length + 1,
75
+ thought: `Self-Consistency: ${consistency ? 'CONSISTENT' : 'INCONSISTENT'}`,
76
+ confidence: consistency ? 0.95 : 0.6
77
+ });
78
+ }
79
+
80
+ return this.steps;
81
+ }
82
+
83
+ private addStep(step: ReasoningStep): void {
84
+ this.steps.push(step);
85
+ }
86
+
87
+ private async _decompose(problem: string): Promise<string[]> {
88
+ // Decomposition heuristics
89
+ const subProblems: string[] = [];
90
+
91
+ // Check for multiple questions
92
+ const questions = problem.match(/\?/g);
93
+ if (questions && questions.length > 1) {
94
+ const parts = problem.split(/[?.]/).filter(p => p.trim().length > 0);
95
+ subProblems.push(...parts.map(p => `${p.trim()}?`));
96
+ }
97
+
98
+ // Check for compound tasks
99
+ if (problem.includes(' and ') || problem.includes(' then ')) {
100
+ const connectors = problem.split(/\s+(and|then)\s+/);
101
+ subProblems.push(...connectors.filter(c => c.length > 10));
102
+ }
103
+
104
+ // Default: treat as single problem
105
+ if (subProblems.length === 0) {
106
+ subProblems.push(problem);
107
+ }
108
+
109
+ return subProblems.slice(0, 5);
110
+ }
111
+
112
+ private async _solveSubProblem(subProblem: string, context?: string): Promise<string> {
113
+ // Simplified solver - in production would call LLM
114
+ return `Solved: ${subProblem.substring(0, 50)}...`;
115
+ }
116
+
117
+ private async _synthesize(problem: string): Promise<string> {
118
+ const evidence = this.steps.map(s => s.thought).join(' → ');
119
+ return `Based on: ${evidence.substring(0, 200)}...`;
120
+ }
121
+
122
+ private _calculateFinalConfidence(): number {
123
+ if (this.steps.length === 0) return 0.5;
124
+
125
+ const confidences = this.steps.map(s => s.confidence);
126
+ const avg = confidences.reduce((a, b) => a + b, 0) / confidences.length;
127
+
128
+ // Penalize long chains
129
+ const lengthPenalty = Math.max(0.8, 1 - (this.steps.length * 0.02));
130
+
131
+ return Math.min(0.95, avg * lengthPenalty);
132
+ }
133
+
134
+ private _checkConsistency(alternative: ReasoningStep[]): boolean {
135
+ if (alternative.length === 0) return true;
136
+
137
+ const finalStep = this.steps[this.steps.length - 1];
138
+ const altFinalStep = alternative[alternative.length - 1];
139
+
140
+ // Check if conclusions are similar
141
+ const similarity = this._textSimilarity(finalStep.thought, altFinalStep.thought);
142
+ return similarity > 0.7;
143
+ }
144
+
145
+ private _textSimilarity(a: string, b: string): number {
146
+ const aWords = new Set(a.toLowerCase().split(/\s+/));
147
+ const bWords = new Set(b.toLowerCase().split(/\s+/));
148
+
149
+ const intersection = [...aWords].filter(w => bWords.has(w)).length;
150
+ const union = new Set([...aWords, ...bWords]).size;
151
+
152
+ return union > 0 ? intersection / union : 0;
153
+ }
154
+
155
+ getSteps(): ReasoningStep[] {
156
+ return this.steps;
157
+ }
158
+
159
+ getFinalConfidence(): number {
160
+ return this._calculateFinalConfidence();
161
+ }
162
+
163
+ reset(): void {
164
+ this.steps = [];
165
+ }
166
+ }
@@ -0,0 +1,16 @@
1
+ /**
2
+ * ICE v4.3 — Advanced Reasoning Module
3
+ * Scientific reasoning techniques for smarter AI
4
+ */
5
+
6
+ export { ChainOfThought } from './chain-of-thought.js';
7
+ export { TreeOfThoughts } from './tree-of-thoughts.js';
8
+ export { SelfReflection } from './self-reflection.js';
9
+ export { SocraticReviewer } from './socratic-reviewer.js';
10
+
11
+ export type {
12
+ ReasoningStep,
13
+ ThoughtBranch,
14
+ ReflectionResult,
15
+ ReasoningConfig
16
+ } from './types.js';