gthinking 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/GEMINI.md +68 -0
- package/README.md +34 -1
- package/analysis.ts +78 -1
- package/creativity.ts +62 -9
- package/dist/analysis.d.ts +2 -0
- package/dist/analysis.d.ts.map +1 -1
- package/dist/analysis.js +75 -1
- package/dist/analysis.js.map +1 -1
- package/dist/creativity.d.ts +2 -1
- package/dist/creativity.d.ts.map +1 -1
- package/dist/creativity.js +57 -7
- package/dist/creativity.js.map +1 -1
- package/dist/engine.d.ts.map +1 -1
- package/dist/engine.js +14 -10
- package/dist/engine.js.map +1 -1
- package/dist/examples.js +1 -1
- package/dist/examples.js.map +1 -1
- package/dist/llm-service.d.ts +20 -0
- package/dist/llm-service.d.ts.map +1 -0
- package/dist/llm-service.js +82 -0
- package/dist/llm-service.js.map +1 -0
- package/dist/reasoning.d.ts +1 -0
- package/dist/reasoning.d.ts.map +1 -1
- package/dist/reasoning.js +80 -27
- package/dist/reasoning.js.map +1 -1
- package/dist/search-discovery.d.ts.map +1 -1
- package/dist/search-discovery.js +43 -0
- package/dist/search-discovery.js.map +1 -1
- package/engine.ts +20 -15
- package/examples.ts +1 -1
- package/llm-service.ts +99 -0
- package/package.json +1 -1
- package/reasoning.ts +88 -28
- package/search-discovery.ts +46 -0
package/llm-service.ts
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
|
|
2
|
+
import { exec } from 'child_process';
|
|
3
|
+
import { promisify } from 'util';
|
|
4
|
+
|
|
5
|
+
const execAsync = promisify(exec);
|
|
6
|
+
|
|
7
|
+
export type LLMProvider = 'gemini' | 'claude' | 'kimi' | 'opencode' | 'openai';
|
|
8
|
+
|
|
9
|
+
export interface LLMRequest {
|
|
10
|
+
prompt: string;
|
|
11
|
+
provider?: LLMProvider;
|
|
12
|
+
systemPrompt?: string;
|
|
13
|
+
model?: string;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export class LLMService {
|
|
17
|
+
private defaultProvider: LLMProvider = 'gemini';
|
|
18
|
+
|
|
19
|
+
constructor() {
|
|
20
|
+
this.detectProvider();
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
private async detectProvider() {
|
|
24
|
+
if (process.env.LLM_PROVIDER) {
|
|
25
|
+
this.defaultProvider = process.env.LLM_PROVIDER as LLMProvider;
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Simple detection based on available commands
|
|
30
|
+
const providers: LLMProvider[] = ['gemini', 'claude', 'kimi', 'opencode'];
|
|
31
|
+
|
|
32
|
+
for (const p of providers) {
|
|
33
|
+
try {
|
|
34
|
+
await execAsync(`which ${p}`);
|
|
35
|
+
this.defaultProvider = p;
|
|
36
|
+
break;
|
|
37
|
+
} catch (e) {
|
|
38
|
+
// Command not found, continue
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
async generateText(request: LLMRequest): Promise<string> {
|
|
44
|
+
const provider = request.provider || this.defaultProvider;
|
|
45
|
+
const prompt = this.constructPrompt(request);
|
|
46
|
+
|
|
47
|
+
try {
|
|
48
|
+
switch (provider) {
|
|
49
|
+
case 'gemini':
|
|
50
|
+
return await this.callGemini(prompt);
|
|
51
|
+
case 'claude':
|
|
52
|
+
return await this.callClaude(prompt);
|
|
53
|
+
case 'kimi':
|
|
54
|
+
return await this.callKimi(prompt);
|
|
55
|
+
case 'opencode':
|
|
56
|
+
return await this.callOpenCode(prompt);
|
|
57
|
+
default:
|
|
58
|
+
return await this.callGemini(prompt); // Fallback
|
|
59
|
+
}
|
|
60
|
+
} catch (error) {
|
|
61
|
+
console.error(`LLM call failed (${provider}):`, error);
|
|
62
|
+
throw new Error(`Failed to generate text using ${provider}: ${error instanceof Error ? error.message : String(error)}`);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
private constructPrompt(request: LLMRequest): string {
|
|
67
|
+
if (request.systemPrompt) {
|
|
68
|
+
return `System: ${request.systemPrompt}\n\nUser: ${request.prompt}`;
|
|
69
|
+
}
|
|
70
|
+
return request.prompt;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
private async callGemini(prompt: string): Promise<string> {
|
|
74
|
+
// Escape double quotes for shell safety (basic)
|
|
75
|
+
const safePrompt = prompt.replace(/"/g, '\\"');
|
|
76
|
+
const { stdout } = await execAsync(`gemini "${safePrompt}"`);
|
|
77
|
+
return stdout.trim();
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
private async callClaude(prompt: string): Promise<string> {
|
|
81
|
+
const safePrompt = prompt.replace(/"/g, '\\"');
|
|
82
|
+
const { stdout } = await execAsync(`claude "${safePrompt}"`);
|
|
83
|
+
return stdout.trim();
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private async callKimi(prompt: string): Promise<string> {
|
|
87
|
+
const safePrompt = prompt.replace(/"/g, '\\"');
|
|
88
|
+
const { stdout } = await execAsync(`kimi "${safePrompt}"`);
|
|
89
|
+
return stdout.trim();
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
private async callOpenCode(prompt: string): Promise<string> {
|
|
93
|
+
const safePrompt = prompt.replace(/"/g, '\\"');
|
|
94
|
+
const { stdout } = await execAsync(`opencode "${safePrompt}"`);
|
|
95
|
+
return stdout.trim();
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export const llmService = new LLMService();
|
package/package.json
CHANGED
package/reasoning.ts
CHANGED
|
@@ -21,6 +21,7 @@ import {
|
|
|
21
21
|
ThinkingStage
|
|
22
22
|
} from './types';
|
|
23
23
|
import { EventEmitter } from 'events';
|
|
24
|
+
import { llmService } from './llm-service';
|
|
24
25
|
|
|
25
26
|
// ============================================================================
|
|
26
27
|
// LOGICAL RULES ENGINE
|
|
@@ -541,37 +542,44 @@ export class ReasoningEngine extends EventEmitter {
|
|
|
541
542
|
} as ThinkingEvent);
|
|
542
543
|
|
|
543
544
|
try {
|
|
544
|
-
// Generate Chain of Thought if enabled
|
|
545
|
+
// Generate Chain of Thought if enabled (via LLM implicitly or explicitly)
|
|
545
546
|
let cot: ChainOfThought | undefined;
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
547
|
+
|
|
548
|
+
// Try LLM-based reasoning first
|
|
549
|
+
await this.executeReasoningWithLLM(session, maxSteps);
|
|
549
550
|
|
|
550
|
-
//
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
case ReasoningType.INDUCTIVE:
|
|
556
|
-
await this.executeInductiveReasoning(session, maxSteps);
|
|
557
|
-
break;
|
|
558
|
-
case ReasoningType.ABDUCTIVE:
|
|
559
|
-
await this.executeAbductiveReasoning(session, maxSteps);
|
|
560
|
-
break;
|
|
561
|
-
case ReasoningType.ANALOGICAL:
|
|
562
|
-
await this.executeAnalogicalReasoning(session, maxSteps);
|
|
563
|
-
break;
|
|
564
|
-
case ReasoningType.CAUSAL:
|
|
565
|
-
await this.executeCausalReasoning(session, maxSteps);
|
|
566
|
-
break;
|
|
567
|
-
case ReasoningType.COUNTERFACTUAL:
|
|
568
|
-
await this.executeCounterfactualReasoning(session, maxSteps);
|
|
569
|
-
break;
|
|
570
|
-
}
|
|
551
|
+
// If LLM failed (empty steps), fallback to internal engines
|
|
552
|
+
if (session.steps.length === 0) {
|
|
553
|
+
if (generateCOT) {
|
|
554
|
+
cot = this.cotGenerator.generate(problem, maxSteps);
|
|
555
|
+
}
|
|
571
556
|
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
557
|
+
switch (type) {
|
|
558
|
+
case ReasoningType.DEDUCTIVE:
|
|
559
|
+
await this.executeDeductiveReasoning(session, maxSteps);
|
|
560
|
+
break;
|
|
561
|
+
case ReasoningType.INDUCTIVE:
|
|
562
|
+
await this.executeInductiveReasoning(session, maxSteps);
|
|
563
|
+
break;
|
|
564
|
+
case ReasoningType.ABDUCTIVE:
|
|
565
|
+
await this.executeAbductiveReasoning(session, maxSteps);
|
|
566
|
+
break;
|
|
567
|
+
case ReasoningType.ANALOGICAL:
|
|
568
|
+
await this.executeAnalogicalReasoning(session, maxSteps);
|
|
569
|
+
break;
|
|
570
|
+
case ReasoningType.CAUSAL:
|
|
571
|
+
await this.executeCausalReasoning(session, maxSteps);
|
|
572
|
+
break;
|
|
573
|
+
case ReasoningType.COUNTERFACTUAL:
|
|
574
|
+
await this.executeCounterfactualReasoning(session, maxSteps);
|
|
575
|
+
break;
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
// Generate conclusion for fallback
|
|
579
|
+
session.conclusion = this.generateConclusion(session, cot);
|
|
580
|
+
session.confidence = this.calculateSessionConfidence(session);
|
|
581
|
+
}
|
|
582
|
+
|
|
575
583
|
session.endTime = new Date();
|
|
576
584
|
|
|
577
585
|
this.emit('reasoning_complete', {
|
|
@@ -604,6 +612,58 @@ export class ReasoningEngine extends EventEmitter {
|
|
|
604
612
|
}
|
|
605
613
|
}
|
|
606
614
|
|
|
615
|
+
private async executeReasoningWithLLM(session: ReasoningSession, maxSteps: number): Promise<void> {
|
|
616
|
+
const prompt = `
|
|
617
|
+
You are an expert reasoning engine.
|
|
618
|
+
Problem: "${session.problem}"
|
|
619
|
+
Task: Perform ${session.type} reasoning with approximately ${maxSteps} steps.
|
|
620
|
+
|
|
621
|
+
Return a JSON object with this exact structure (no markdown, just JSON):
|
|
622
|
+
{
|
|
623
|
+
"steps": [
|
|
624
|
+
{
|
|
625
|
+
"stepNumber": number,
|
|
626
|
+
"premise": "string (the basis for this step)",
|
|
627
|
+
"inference": "string (the logical deduction)",
|
|
628
|
+
"confidence": number (0.0 to 1.0)
|
|
629
|
+
}
|
|
630
|
+
],
|
|
631
|
+
"conclusion": "string (final conclusion)",
|
|
632
|
+
"overallConfidence": number (0.0 to 1.0)
|
|
633
|
+
}
|
|
634
|
+
`;
|
|
635
|
+
|
|
636
|
+
try {
|
|
637
|
+
const response = await llmService.generateText({ prompt });
|
|
638
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
639
|
+
|
|
640
|
+
if (jsonMatch) {
|
|
641
|
+
const result = JSON.parse(jsonMatch[0]);
|
|
642
|
+
|
|
643
|
+
if (Array.isArray(result.steps)) {
|
|
644
|
+
result.steps.forEach((s: any, i: number) => {
|
|
645
|
+
session.steps.push({
|
|
646
|
+
id: `step_${i}`,
|
|
647
|
+
stepNumber: s.stepNumber || i + 1,
|
|
648
|
+
premise: s.premise || '',
|
|
649
|
+
inference: s.inference || '',
|
|
650
|
+
evidence: [],
|
|
651
|
+
assumptions: [],
|
|
652
|
+
confidence: s.confidence || 0.8,
|
|
653
|
+
nextSteps: []
|
|
654
|
+
});
|
|
655
|
+
});
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
session.conclusion = result.conclusion;
|
|
659
|
+
session.confidence = result.overallConfidence || 0.8;
|
|
660
|
+
}
|
|
661
|
+
} catch (error) {
|
|
662
|
+
console.warn('LLM reasoning failed, falling back to internal engine', error);
|
|
663
|
+
// Fallback handled in caller
|
|
664
|
+
}
|
|
665
|
+
}
|
|
666
|
+
|
|
607
667
|
private async executeDeductiveReasoning(session: ReasoningSession, maxSteps: number): Promise<void> {
|
|
608
668
|
const facts = this.extractFacts(session.problem);
|
|
609
669
|
|
package/search-discovery.ts
CHANGED
|
@@ -17,6 +17,7 @@ import {
|
|
|
17
17
|
ThinkingStage
|
|
18
18
|
} from './types';
|
|
19
19
|
import { EventEmitter } from 'events';
|
|
20
|
+
import { llmService } from './llm-service';
|
|
20
21
|
|
|
21
22
|
// ============================================================================
|
|
22
23
|
// SEARCH PROVIDER INTERFACES
|
|
@@ -40,6 +41,15 @@ class WebSearchProvider implements SearchProvider {
|
|
|
40
41
|
filters: SearchFilters,
|
|
41
42
|
maxResults: number
|
|
42
43
|
): Promise<SearchResult[]> {
|
|
44
|
+
try {
|
|
45
|
+
const results = await this.searchWithLLM(query, maxResults);
|
|
46
|
+
if (results.length > 0) {
|
|
47
|
+
return this.applyFilters(results, filters);
|
|
48
|
+
}
|
|
49
|
+
} catch (error) {
|
|
50
|
+
console.warn('LLM search failed, falling back to simulation', error);
|
|
51
|
+
}
|
|
52
|
+
|
|
43
53
|
// In real implementation, this would call actual search APIs
|
|
44
54
|
// For demonstration, we simulate search results
|
|
45
55
|
const simulatedResults: SearchResult[] = [];
|
|
@@ -66,6 +76,42 @@ class WebSearchProvider implements SearchProvider {
|
|
|
66
76
|
return this.applyFilters(simulatedResults, filters);
|
|
67
77
|
}
|
|
68
78
|
|
|
79
|
+
private async searchWithLLM(query: string, maxResults: number): Promise<SearchResult[]> {
|
|
80
|
+
const prompt = `
|
|
81
|
+
Act as a search engine. Query: "${query}"
|
|
82
|
+
Generate ${maxResults} relevant search results based on your knowledge.
|
|
83
|
+
|
|
84
|
+
Return strictly JSON with an array of objects:
|
|
85
|
+
[
|
|
86
|
+
{
|
|
87
|
+
"title": "string",
|
|
88
|
+
"url": "https://... (invent a plausible URL)",
|
|
89
|
+
"snippet": "string (1-2 sentences summarizing the content)",
|
|
90
|
+
"credibility": number (0-1)
|
|
91
|
+
}
|
|
92
|
+
]
|
|
93
|
+
`;
|
|
94
|
+
|
|
95
|
+
const response = await llmService.generateText({ prompt });
|
|
96
|
+
const jsonMatch = response.match(/\[[\s\S]*\]/); // Match array
|
|
97
|
+
|
|
98
|
+
if (jsonMatch) {
|
|
99
|
+
const data = JSON.parse(jsonMatch[0]);
|
|
100
|
+
return data.map((item: any, i: number) => ({
|
|
101
|
+
id: `web_llm_${Date.now()}_${i}`,
|
|
102
|
+
title: item.title,
|
|
103
|
+
url: item.url,
|
|
104
|
+
snippet: item.snippet,
|
|
105
|
+
source: SourceType.WEB,
|
|
106
|
+
credibility: item.credibility || 0.7,
|
|
107
|
+
relevance: 0.9, // Assumed relevant
|
|
108
|
+
timestamp: new Date(),
|
|
109
|
+
metadata: { wordCount: 1000, citations: 10 }
|
|
110
|
+
}));
|
|
111
|
+
}
|
|
112
|
+
return [];
|
|
113
|
+
}
|
|
114
|
+
|
|
69
115
|
getCredibilityScore(url: string): number {
|
|
70
116
|
const credibleDomains = [
|
|
71
117
|
'edu', 'gov', 'ac.uk', 'ac.jp', 'arxiv.org',
|