gthinking 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +283 -0
  2. package/analysis.ts +986 -0
  3. package/creativity.ts +1002 -0
  4. package/dist/analysis.d.ts +52 -0
  5. package/dist/analysis.d.ts.map +1 -0
  6. package/dist/analysis.js +792 -0
  7. package/dist/analysis.js.map +1 -0
  8. package/dist/creativity.d.ts +80 -0
  9. package/dist/creativity.d.ts.map +1 -0
  10. package/dist/creativity.js +778 -0
  11. package/dist/creativity.js.map +1 -0
  12. package/dist/engine.d.ts +76 -0
  13. package/dist/engine.d.ts.map +1 -0
  14. package/dist/engine.js +675 -0
  15. package/dist/engine.js.map +1 -0
  16. package/dist/examples.d.ts +7 -0
  17. package/dist/examples.d.ts.map +1 -0
  18. package/dist/examples.js +506 -0
  19. package/dist/examples.js.map +1 -0
  20. package/dist/index.d.ts +38 -0
  21. package/dist/index.d.ts.map +1 -0
  22. package/dist/index.js +126 -0
  23. package/dist/index.js.map +1 -0
  24. package/dist/learning.d.ts +72 -0
  25. package/dist/learning.d.ts.map +1 -0
  26. package/dist/learning.js +615 -0
  27. package/dist/learning.js.map +1 -0
  28. package/dist/planning.d.ts +58 -0
  29. package/dist/planning.d.ts.map +1 -0
  30. package/dist/planning.js +824 -0
  31. package/dist/planning.js.map +1 -0
  32. package/dist/reasoning.d.ts +72 -0
  33. package/dist/reasoning.d.ts.map +1 -0
  34. package/dist/reasoning.js +792 -0
  35. package/dist/reasoning.js.map +1 -0
  36. package/dist/search-discovery.d.ts +73 -0
  37. package/dist/search-discovery.d.ts.map +1 -0
  38. package/dist/search-discovery.js +505 -0
  39. package/dist/search-discovery.js.map +1 -0
  40. package/dist/types.d.ts +535 -0
  41. package/dist/types.d.ts.map +1 -0
  42. package/dist/types.js +77 -0
  43. package/dist/types.js.map +1 -0
  44. package/engine.ts +928 -0
  45. package/examples.ts +717 -0
  46. package/index.ts +106 -0
  47. package/learning.ts +779 -0
  48. package/package.json +51 -0
  49. package/planning.ts +1028 -0
  50. package/reasoning.ts +1019 -0
  51. package/search-discovery.ts +654 -0
  52. package/tsconfig.json +25 -0
  53. package/types.ts +674 -0
package/analysis.ts ADDED
@@ -0,0 +1,986 @@
1
+ /**
2
+ * Analysis Module
3
+ * Deep content analysis with multi-dimensional insights extraction
4
+ */
5
+
6
+ import {
7
+ AnalysisRequest,
8
+ AnalysisResult,
9
+ AnalysisType,
10
+ Finding,
11
+ Evidence,
12
+ ComparisonResult,
13
+ ComparisonPoint,
14
+ FactCheckResult,
15
+ SearchResult,
16
+ ConfidenceLevel,
17
+ ThinkingEvent,
18
+ ThinkingError,
19
+ ThinkingStage
20
+ } from './types';
21
+ import { EventEmitter } from 'events';
22
+
23
+ // ============================================================================
24
+ // SENTIMENT ANALYZER
25
+ // ============================================================================
26
+
27
+ interface SentimentScore {
28
+ positive: number;
29
+ negative: number;
30
+ neutral: number;
31
+ compound: number;
32
+ }
33
+
34
+ class SentimentAnalyzer {
35
+ private positiveWords: Set<string> = new Set([
36
+ 'excellent', 'great', 'amazing', 'wonderful', 'fantastic', 'good', 'best',
37
+ 'love', 'like', 'happy', 'success', 'benefit', 'advantage', 'improve',
38
+ 'progress', 'achieve', 'win', 'positive', 'effective', 'efficient'
39
+ ]);
40
+
41
+ private negativeWords: Set<string> = new Set([
42
+ 'bad', 'terrible', 'awful', 'worst', 'hate', 'dislike', 'sad', 'fail',
43
+ 'problem', 'issue', 'disadvantage', 'worse', 'decline', 'lose', 'negative',
44
+ 'ineffective', 'inefficient', 'difficult', 'challenging', 'concern'
45
+ ]);
46
+
47
+ analyze(text: string): SentimentScore {
48
+ const words = text.toLowerCase().match(/\b\w+\b/g) || [];
49
+ let positive = 0;
50
+ let negative = 0;
51
+ let neutral = 0;
52
+
53
+ words.forEach(word => {
54
+ if (this.positiveWords.has(word)) positive++;
55
+ else if (this.negativeWords.has(word)) negative++;
56
+ else neutral++;
57
+ });
58
+
59
+ const total = words.length || 1;
60
+ const compound = (positive - negative) / total;
61
+
62
+ return {
63
+ positive: positive / total,
64
+ negative: negative / total,
65
+ neutral: neutral / total,
66
+ compound
67
+ };
68
+ }
69
+
70
+ getSentimentLabel(score: SentimentScore): string {
71
+ if (score.compound > 0.5) return 'very_positive';
72
+ if (score.compound > 0.1) return 'positive';
73
+ if (score.compound < -0.5) return 'very_negative';
74
+ if (score.compound < -0.1) return 'negative';
75
+ return 'neutral';
76
+ }
77
+ }
78
+
79
+ // ============================================================================
80
+ // ENTITY EXTRACTOR
81
+ // ============================================================================
82
+
83
+ interface Entity {
84
+ name: string;
85
+ type: 'person' | 'organization' | 'location' | 'product' | 'concept' | 'event';
86
+ confidence: number;
87
+ mentions: number;
88
+ }
89
+
90
+ class EntityExtractor {
91
+ private entityPatterns: Map<string, RegExp[]> = new Map([
92
+ ['person', [/\b[A-Z][a-z]+ [A-Z][a-z]+\b/g, /\b(Mr|Mrs|Ms|Dr|Prof)\.? [A-Z][a-z]+\b/g]],
93
+ ['organization', [/\b[A-Z][a-z]* (Inc|Corp|Ltd|LLC|Company|Organization)\b/g]],
94
+ ['location', [/\b(in|at|from) ([A-Z][a-z]+( [A-Z][a-z]+)?)\b/g]],
95
+ ['product', [/\b[A-Z][a-z]*[0-9]+\b/g, /\b(the|a|an) ([A-Z][a-z]+ (Pro|Max|Ultra|Plus))\b/gi]]
96
+ ]);
97
+
98
+ extract(text: string): Entity[] {
99
+ const entities: Map<string, Entity> = new Map();
100
+ const words = text.split(/\s+/);
101
+
102
+ // Simple entity extraction based on patterns and capitalization
103
+ words.forEach((word, index) => {
104
+ // Check for capitalized words (potential proper nouns)
105
+ if (/^[A-Z][a-z]+$/.test(word) && word.length > 2) {
106
+ const context = words.slice(Math.max(0, index - 3), index + 4).join(' ');
107
+ const type = this.classifyEntity(word, context);
108
+
109
+ if (entities.has(word)) {
110
+ const existing = entities.get(word)!;
111
+ existing.mentions++;
112
+ existing.confidence = Math.min(0.95, existing.confidence + 0.1);
113
+ } else {
114
+ entities.set(word, {
115
+ name: word,
116
+ type,
117
+ confidence: 0.6,
118
+ mentions: 1
119
+ });
120
+ }
121
+ }
122
+ });
123
+
124
+ return Array.from(entities.values()).sort((a, b) => b.mentions - a.mentions);
125
+ }
126
+
127
+ private classifyEntity(word: string, context: string): Entity['type'] {
128
+ const contextLower = context.toLowerCase();
129
+
130
+ if (/\b(said|stated|announced|CEO|founder|president)\b/i.test(contextLower)) {
131
+ return 'person';
132
+ }
133
+ if (/\b(company|organization|firm|corporation)\b/i.test(contextLower)) {
134
+ return 'organization';
135
+ }
136
+ if (/\b(in|at|from|located|city|country|region)\b/i.test(contextLower)) {
137
+ return 'location';
138
+ }
139
+ if (/\b(product|launched|released|device|software)\b/i.test(contextLower)) {
140
+ return 'product';
141
+ }
142
+ if (/\b(event|conference|meeting|summit|festival)\b/i.test(contextLower)) {
143
+ return 'event';
144
+ }
145
+
146
+ return 'concept';
147
+ }
148
+ }
149
+
150
+ // ============================================================================
151
+ // TOPIC EXTRACTOR
152
+ // ============================================================================
153
+
154
+ interface Topic {
155
+ name: string;
156
+ relevance: number;
157
+ keywords: string[];
158
+ subtopics: string[];
159
+ }
160
+
161
+ class TopicExtractor {
162
+ private stopWords: Set<string> = new Set([
163
+ 'the', 'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
164
+ 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
165
+ 'should', 'may', 'might', 'must', 'can', 'this', 'that', 'these', 'those'
166
+ ]);
167
+
168
+ extract(text: string): Topic[] {
169
+ const words = text.toLowerCase().match(/\b\w{4,}\b/g) || [];
170
+ const wordFreq = new Map<string, number>();
171
+
172
+ // Calculate word frequencies
173
+ words.forEach(word => {
174
+ if (!this.stopWords.has(word)) {
175
+ wordFreq.set(word, (wordFreq.get(word) || 0) + 1);
176
+ }
177
+ });
178
+
179
+ // Group related words into topics
180
+ const topics: Topic[] = [];
181
+ const sortedWords = Array.from(wordFreq.entries())
182
+ .sort((a, b) => b[1] - a[1])
183
+ .slice(0, 20);
184
+
185
+ // Simple topic clustering
186
+ const usedWords = new Set<string>();
187
+ sortedWords.forEach(([word, freq]) => {
188
+ if (!usedWords.has(word)) {
189
+ const relatedWords = this.findRelatedWords(word, sortedWords, usedWords);
190
+ topics.push({
191
+ name: word,
192
+ relevance: freq / words.length,
193
+ keywords: [word, ...relatedWords],
194
+ subtopics: relatedWords.slice(0, 3)
195
+ });
196
+ usedWords.add(word);
197
+ relatedWords.forEach(w => usedWords.add(w));
198
+ }
199
+ });
200
+
201
+ return topics.slice(0, 5);
202
+ }
203
+
204
+ private findRelatedWords(
205
+ mainWord: string,
206
+ allWords: [string, number][],
207
+ usedWords: Set<string>
208
+ ): string[] {
209
+ const related: string[] = [];
210
+ const mainPrefix = mainWord.substring(0, 3);
211
+
212
+ allWords.forEach(([word, _]) => {
213
+ if (word !== mainWord && !usedWords.has(word)) {
214
+ // Check for semantic similarity (simplified)
215
+ if (word.startsWith(mainPrefix) || mainWord.startsWith(word.substring(0, 3))) {
216
+ related.push(word);
217
+ }
218
+ }
219
+ });
220
+
221
+ return related.slice(0, 5);
222
+ }
223
+ }
224
+
225
+ // ============================================================================
226
+ // KEYWORD EXTRACTOR
227
+ // ============================================================================
228
+
229
+ interface Keyword {
230
+ term: string;
231
+ frequency: number;
232
+ importance: number;
233
+ tfidf: number;
234
+ }
235
+
236
+ class KeywordExtractor {
237
+ private documentFrequency: Map<string, number> = new Map();
238
+ private totalDocuments = 0;
239
+
240
+ extract(text: string, corpus?: string[]): Keyword[] {
241
+ const words = text.toLowerCase().match(/\b\w{3,}\b/g) || [];
242
+ const wordFreq = new Map<string, number>();
243
+
244
+ words.forEach(word => {
245
+ wordFreq.set(word, (wordFreq.get(word) || 0) + 1);
246
+ });
247
+
248
+ // Calculate TF-IDF if corpus is provided
249
+ const keywords: Keyword[] = [];
250
+ wordFreq.forEach((freq, word) => {
251
+ const tf = freq / words.length;
252
+ const idf = corpus ? this.calculateIDF(word, corpus) : 1;
253
+
254
+ keywords.push({
255
+ term: word,
256
+ frequency: freq,
257
+ importance: tf * (1 + Math.log(1 + freq)),
258
+ tfidf: tf * idf
259
+ });
260
+ });
261
+
262
+ return keywords
263
+ .sort((a, b) => b.importance - a.importance)
264
+ .slice(0, 15);
265
+ }
266
+
267
+ private calculateIDF(word: string, corpus: string[]): number {
268
+ const docsWithWord = corpus.filter(doc =>
269
+ doc.toLowerCase().includes(word)
270
+ ).length;
271
+
272
+ return Math.log(corpus.length / (1 + docsWithWord));
273
+ }
274
+ }
275
+
276
+ // ============================================================================
277
+ // SUMMARIZER
278
+ // ============================================================================
279
+
280
+ class Summarizer {
281
+ summarize(text: string, maxLength: number = 200): string {
282
+ const sentences = this.splitIntoSentences(text);
283
+
284
+ if (sentences.length <= 3) {
285
+ return text;
286
+ }
287
+
288
+ // Score sentences based on importance
289
+ const sentenceScores = sentences.map((sentence, index) => ({
290
+ sentence,
291
+ index,
292
+ score: this.scoreSentence(sentence, text, index, sentences.length)
293
+ }));
294
+
295
+ // Select top sentences
296
+ const topSentences = sentenceScores
297
+ .sort((a, b) => b.score - a.score)
298
+ .slice(0, Math.ceil(sentences.length * 0.3))
299
+ .sort((a, b) => a.index - b.index);
300
+
301
+ const summary = topSentences.map(s => s.sentence).join(' ');
302
+
303
+ return summary.length > maxLength
304
+ ? summary.substring(0, maxLength - 3) + '...'
305
+ : summary;
306
+ }
307
+
308
+ private splitIntoSentences(text: string): string[] {
309
+ return text
310
+ .replace(/([.!?])\s+/g, "$1|")
311
+ .split("|")
312
+ .filter(s => s.trim().length > 10);
313
+ }
314
+
315
+ private scoreSentence(sentence: string, fullText: string, index: number, total: number): number {
316
+ let score = 0;
317
+
318
+ // Position score (first and last sentences are often important)
319
+ if (index === 0 || index === total - 1) score += 2;
320
+ if (index === 1 || index === total - 2) score += 1;
321
+
322
+ // Length score (avoid very short or very long sentences)
323
+ const wordCount = sentence.split(/\s+/).length;
324
+ if (wordCount >= 8 && wordCount <= 25) score += 1;
325
+
326
+ // Keyword density score
327
+ const words = sentence.toLowerCase().match(/\b\w+\b/g) || [];
328
+ const importantWords = words.filter(w => w.length > 5);
329
+ score += importantWords.length / words.length;
330
+
331
+ // Presence of numerical data
332
+ if (/\d+/.test(sentence)) score += 0.5;
333
+
334
+ return score;
335
+ }
336
+ }
337
+
338
+ // ============================================================================
339
+ // FACT CHECKER
340
+ // ============================================================================
341
+
342
+ class FactChecker {
343
+ private knownFacts: Map<string, { value: boolean; confidence: number }> = new Map();
344
+
345
+ async checkClaim(claim: string, sources: SearchResult[]): Promise<FactCheckResult> {
346
+ // Normalize the claim
347
+ const normalizedClaim = claim.toLowerCase().trim();
348
+
349
+ // Check against known facts
350
+ if (this.knownFacts.has(normalizedClaim)) {
351
+ const fact = this.knownFacts.get(normalizedClaim)!;
352
+ return {
353
+ claim,
354
+ verdict: fact.value ? 'true' : 'false',
355
+ confidence: fact.confidence,
356
+ sources: [],
357
+ explanation: 'Based on verified knowledge base'
358
+ };
359
+ }
360
+
361
+ // Analyze sources for verification
362
+ const sourceAnalysis = this.analyzeSources(sources, claim);
363
+
364
+ // Determine verdict based on source analysis
365
+ let verdict: FactCheckResult['verdict'];
366
+ let confidence = sourceAnalysis.agreement;
367
+
368
+ if (sourceAnalysis.supporting > sourceAnalysis.contradicting * 2) {
369
+ verdict = 'true';
370
+ } else if (sourceAnalysis.contradicting > sourceAnalysis.supporting * 2) {
371
+ verdict = 'false';
372
+ } else if (sourceAnalysis.supporting > 0 || sourceAnalysis.contradicting > 0) {
373
+ verdict = 'partially_true';
374
+ confidence *= 0.7;
375
+ } else {
376
+ verdict = 'unverifiable';
377
+ confidence = 0.3;
378
+ }
379
+
380
+ return {
381
+ claim,
382
+ verdict,
383
+ confidence,
384
+ sources: sourceAnalysis.relevantSources,
385
+ explanation: this.generateExplanation(verdict, sourceAnalysis),
386
+ corrections: sourceAnalysis.corrections
387
+ };
388
+ }
389
+
390
+ private analyzeSources(sources: SearchResult[], claim: string) {
391
+ const claimWords = claim.toLowerCase().split(/\s+/);
392
+ let supporting = 0;
393
+ let contradicting = 0;
394
+ let neutral = 0;
395
+ const relevantSources: SearchResult[] = [];
396
+ const corrections: string[] = [];
397
+
398
+ sources.forEach(source => {
399
+ const content = (source.title + ' ' + source.snippet).toLowerCase();
400
+ const relevance = claimWords.filter(w => content.includes(w)).length / claimWords.length;
401
+
402
+ if (relevance > 0.5) {
403
+ relevantSources.push(source);
404
+
405
+ // Check for supporting/contradicting indicators
406
+ if (this.isSupporting(content, claim)) {
407
+ supporting++;
408
+ } else if (this.isContradicting(content, claim)) {
409
+ contradicting++;
410
+ const correction = this.extractCorrection(content, claim);
411
+ if (correction) corrections.push(correction);
412
+ } else {
413
+ neutral++;
414
+ }
415
+ }
416
+ });
417
+
418
+ const total = supporting + contradicting + neutral || 1;
419
+ const agreement = Math.max(supporting, contradicting) / total;
420
+
421
+ return { supporting, contradicting, neutral, agreement, relevantSources, corrections };
422
+ }
423
+
424
+ private isSupporting(content: string, _claim: string): boolean {
425
+ const positiveIndicators = ['confirmed', 'true', 'correct', 'yes', 'indeed', 'verified'];
426
+ return positiveIndicators.some(ind => content.includes(ind));
427
+ }
428
+
429
+ private isContradicting(content: string, _claim: string): boolean {
430
+ const negativeIndicators = ['false', 'incorrect', 'not true', 'myth', 'misleading', 'wrong'];
431
+ return negativeIndicators.some(ind => content.includes(ind));
432
+ }
433
+
434
+ private extractCorrection(content: string, _claim: string): string | undefined {
435
+ // Extract potential correction from content
436
+ const correctionMatch = content.match(/(?:actually|in fact|correctly|the truth is)[^.]+/i);
437
+ return correctionMatch ? correctionMatch[0].trim() : undefined;
438
+ }
439
+
440
+ private generateExplanation(verdict: string, analysis: any): string {
441
+ switch (verdict) {
442
+ case 'true':
443
+ return `Supported by ${analysis.supporting} reliable sources with high agreement.`;
444
+ case 'false':
445
+ return `Contradicted by ${analysis.contradicting} sources. ${analysis.corrections.length > 0 ? 'Corrections found.' : ''}`;
446
+ case 'partially_true':
447
+ return `Mixed evidence: ${analysis.supporting} supporting, ${analysis.contradicting} contradicting sources.`;
448
+ case 'unverifiable':
449
+ return 'Insufficient reliable sources to verify this claim.';
450
+ default:
451
+ return 'Unable to determine veracity.';
452
+ }
453
+ }
454
+
455
+ addFact(claim: string, value: boolean, confidence: number): void {
456
+ this.knownFacts.set(claim.toLowerCase().trim(), { value, confidence });
457
+ }
458
+ }
459
+
460
+ // ============================================================================
461
+ // COMPARISON ENGINE
462
+ // ============================================================================
463
+
464
+ class ComparisonEngine {
465
+ compare(subjects: string[], contents: string[]): ComparisonResult {
466
+ const similarities: ComparisonPoint[] = [];
467
+ const differences: ComparisonPoint[] = [];
468
+
469
+ // Extract features from each subject
470
+ const features = subjects.map((subject, index) => ({
471
+ subject,
472
+ content: contents[index],
473
+ entities: this.extractEntities(contents[index]),
474
+ topics: this.extractTopics(contents[index]),
475
+ keywords: this.extractKeywords(contents[index]),
476
+ sentiment: this.analyzeSentiment(contents[index])
477
+ }));
478
+
479
+ // Compare features
480
+ const aspects = ['entities', 'topics', 'keywords', 'sentiment'];
481
+
482
+ aspects.forEach(aspect => {
483
+ const values: Record<string, unknown> = {};
484
+ features.forEach(f => {
485
+ values[f.subject] = f[aspect as keyof typeof f];
486
+ });
487
+
488
+ const similarity = this.calculateSimilarity(values);
489
+
490
+ if (similarity > 0.6) {
491
+ similarities.push({
492
+ aspect,
493
+ values,
494
+ significance: similarity
495
+ });
496
+ } else {
497
+ differences.push({
498
+ aspect,
499
+ values,
500
+ significance: 1 - similarity
501
+ });
502
+ }
503
+ });
504
+
505
+ // Generate conclusion
506
+ const conclusion = this.generateComparisonConclusion(subjects, similarities, differences);
507
+ const confidence = similarities.length / (similarities.length + differences.length);
508
+
509
+ return {
510
+ id: `comparison_${Date.now()}`,
511
+ subjects,
512
+ similarities,
513
+ differences,
514
+ conclusion,
515
+ confidence
516
+ };
517
+ }
518
+
519
+ private extractEntities(text: string): string[] {
520
+ const words = text.match(/\b[A-Z][a-z]+\b/g) || [];
521
+ return [...new Set(words)];
522
+ }
523
+
524
+ private extractTopics(text: string): string[] {
525
+ const words = text.toLowerCase().match(/\b\w{5,}\b/g) || [];
526
+ const freq = new Map<string, number>();
527
+ words.forEach(w => freq.set(w, (freq.get(w) || 0) + 1));
528
+ return Array.from(freq.entries())
529
+ .sort((a, b) => b[1] - a[1])
530
+ .slice(0, 5)
531
+ .map(([w]) => w);
532
+ }
533
+
534
+ private extractKeywords(text: string): string[] {
535
+ return this.extractTopics(text).slice(0, 10);
536
+ }
537
+
538
+ private analyzeSentiment(text: string): string {
539
+ const positive = /\b(good|great|excellent|positive|benefit|advantage)\b/gi;
540
+ const negative = /\b(bad|poor|negative|problem|issue|disadvantage)\b/gi;
541
+
542
+ const posCount = (text.match(positive) || []).length;
543
+ const negCount = (text.match(negative) || []).length;
544
+
545
+ if (posCount > negCount) return 'positive';
546
+ if (negCount > posCount) return 'negative';
547
+ return 'neutral';
548
+ }
549
+
550
+ private calculateSimilarity(values: Record<string, unknown>): number {
551
+ const subjects = Object.keys(values);
552
+ if (subjects.length < 2) return 1;
553
+
554
+ let totalSimilarity = 0;
555
+ let comparisons = 0;
556
+
557
+ for (let i = 0; i < subjects.length; i++) {
558
+ for (let j = i + 1; j < subjects.length; j++) {
559
+ const val1 = values[subjects[i]];
560
+ const val2 = values[subjects[j]];
561
+
562
+ if (Array.isArray(val1) && Array.isArray(val2)) {
563
+ const intersection = val1.filter(v => val2.includes(v));
564
+ const union = [...new Set([...val1, ...val2])];
565
+ totalSimilarity += intersection.length / union.length;
566
+ } else if (typeof val1 === 'string' && typeof val2 === 'string') {
567
+ totalSimilarity += val1 === val2 ? 1 : 0;
568
+ }
569
+
570
+ comparisons++;
571
+ }
572
+ }
573
+
574
+ return comparisons > 0 ? totalSimilarity / comparisons : 0;
575
+ }
576
+
577
+ private generateComparisonConclusion(
578
+ subjects: string[],
579
+ similarities: ComparisonPoint[],
580
+ differences: ComparisonPoint[]
581
+ ): string {
582
+ const similarityRatio = similarities.length / (similarities.length + differences.length);
583
+
584
+ if (similarityRatio > 0.7) {
585
+ return `${subjects.join(' and ')} share significant similarities, particularly in ${similarities.map(s => s.aspect).join(', ')}.`;
586
+ } else if (similarityRatio < 0.3) {
587
+ return `${subjects.join(' and ')} are notably different across ${differences.map(d => d.aspect).join(', ')}.`;
588
+ } else {
589
+ return `${subjects.join(' and ')} show both similarities and differences, with overlap in ${similarities.map(s => s.aspect).join(', ')} but divergence in ${differences.map(d => d.aspect).join(', ')}.`;
590
+ }
591
+ }
592
+ }
593
+
594
+ // ============================================================================
595
+ // MAIN ANALYSIS ENGINE
596
+ // ============================================================================
597
+
598
+ export class AnalysisEngine extends EventEmitter {
599
+ private sentimentAnalyzer: SentimentAnalyzer;
600
+ private entityExtractor: EntityExtractor;
601
+ private topicExtractor: TopicExtractor;
602
+ private keywordExtractor: KeywordExtractor;
603
+ private summarizer: Summarizer;
604
+ private factChecker: FactChecker;
605
+ private comparisonEngine: ComparisonEngine;
606
+
607
+ constructor() {
608
+ super();
609
+ this.sentimentAnalyzer = new SentimentAnalyzer();
610
+ this.entityExtractor = new EntityExtractor();
611
+ this.topicExtractor = new TopicExtractor();
612
+ this.keywordExtractor = new KeywordExtractor();
613
+ this.summarizer = new Summarizer();
614
+ this.factChecker = new FactChecker();
615
+ this.comparisonEngine = new ComparisonEngine();
616
+ }
617
+
618
+ /**
619
+ * Perform comprehensive analysis on content
620
+ */
621
+ async analyze(
622
+ content: string,
623
+ options: {
624
+ types?: AnalysisType[];
625
+ depth?: 'surface' | 'moderate' | 'deep';
626
+ context?: string;
627
+ } = {}
628
+ ): Promise<AnalysisResult[]> {
629
+ const {
630
+ types = ['sentiment', 'entity', 'topic', 'keyword', 'summary'],
631
+ depth = 'moderate',
632
+ context
633
+ } = options;
634
+
635
+ const requestId = this.generateId();
636
+ const startTime = Date.now();
637
+
638
+ this.emit('analysis_start', {
639
+ id: requestId,
640
+ stage: ThinkingStage.ANALYSIS,
641
+ timestamp: new Date(),
642
+ data: { content: content.substring(0, 100) + '...', types }
643
+ } as ThinkingEvent);
644
+
645
+ const results: AnalysisResult[] = [];
646
+
647
+ try {
648
+ for (const type of types) {
649
+ const result = await this.analyzeByType(type, content, context, depth);
650
+ results.push(result);
651
+ }
652
+
653
+ const processingTime = Date.now() - startTime;
654
+
655
+ this.emit('analysis_complete', {
656
+ id: requestId,
657
+ stage: ThinkingStage.ANALYSIS,
658
+ timestamp: new Date(),
659
+ data: { results, processingTime }
660
+ } as ThinkingEvent);
661
+
662
+ return results;
663
+
664
+ } catch (error) {
665
+ this.emit('analysis_error', {
666
+ id: requestId,
667
+ stage: ThinkingStage.ANALYSIS,
668
+ timestamp: new Date(),
669
+ data: { error }
670
+ } as ThinkingEvent);
671
+
672
+ throw new ThinkingError(
673
+ `Analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
674
+ ThinkingStage.ANALYSIS,
675
+ true,
676
+ error instanceof Error ? error : undefined
677
+ );
678
+ }
679
+ }
680
+
681
+ /**
682
+ * Analyze by specific type
683
+ */
684
+ private async analyzeByType(
685
+ type: AnalysisType,
686
+ content: string,
687
+ context?: string,
688
+ depth: 'surface' | 'moderate' | 'deep' = 'moderate'
689
+ ): Promise<AnalysisResult> {
690
+ const startTime = Date.now();
691
+ let findings: Finding[] = [];
692
+
693
+ switch (type) {
694
+ case 'sentiment':
695
+ findings = this.analyzeSentiment(content);
696
+ break;
697
+ case 'entity':
698
+ findings = this.analyzeEntities(content);
699
+ break;
700
+ case 'topic':
701
+ findings = this.analyzeTopics(content);
702
+ break;
703
+ case 'keyword':
704
+ findings = this.analyzeKeywords(content);
705
+ break;
706
+ case 'summary':
707
+ findings = this.generateSummary(content, depth);
708
+ break;
709
+ case 'fact_check':
710
+ findings = await this.factCheck(content);
711
+ break;
712
+ case 'comparison':
713
+ findings = this.compareContent(content, context);
714
+ break;
715
+ case 'trend':
716
+ findings = this.analyzeTrends(content);
717
+ break;
718
+ }
719
+
720
+ return {
721
+ id: this.generateId(),
722
+ requestId: this.generateId(),
723
+ type,
724
+ findings,
725
+ confidence: this.calculateConfidence(findings),
726
+ processingTime: Date.now() - startTime,
727
+ timestamp: new Date()
728
+ };
729
+ }
730
+
731
+ private analyzeSentiment(content: string): Finding[] {
732
+ const score = this.sentimentAnalyzer.analyze(content);
733
+ const label = this.sentimentAnalyzer.getSentimentLabel(score);
734
+
735
+ return [{
736
+ id: this.generateId(),
737
+ type: 'sentiment_analysis',
738
+ value: { score, label },
739
+ confidence: 0.85,
740
+ evidence: [{
741
+ source: 'sentiment_analyzer',
742
+ excerpt: content.substring(0, 100),
743
+ location: 'full_text',
744
+ strength: 0.8
745
+ }],
746
+ relatedFindings: []
747
+ }];
748
+ }
749
+
750
+ private analyzeEntities(content: string): Finding[] {
751
+ const entities = this.entityExtractor.extract(content);
752
+
753
+ return entities.slice(0, 10).map(entity => ({
754
+ id: this.generateId(),
755
+ type: 'entity',
756
+ value: entity,
757
+ confidence: entity.confidence,
758
+ evidence: [{
759
+ source: 'entity_extractor',
760
+ excerpt: `Found ${entity.name} (${entity.type}) mentioned ${entity.mentions} times`,
761
+ location: 'content',
762
+ strength: entity.confidence
763
+ }],
764
+ relatedFindings: []
765
+ }));
766
+ }
767
+
768
+ private analyzeTopics(content: string): Finding[] {
769
+ const topics = this.topicExtractor.extract(content);
770
+
771
+ return topics.map(topic => ({
772
+ id: this.generateId(),
773
+ type: 'topic',
774
+ value: topic,
775
+ confidence: topic.relevance,
776
+ evidence: [{
777
+ source: 'topic_extractor',
778
+ excerpt: `Topic: ${topic.name} with keywords: ${topic.keywords.join(', ')}`,
779
+ location: 'content',
780
+ strength: topic.relevance
781
+ }],
782
+ relatedFindings: []
783
+ }));
784
+ }
785
+
786
+ private analyzeKeywords(content: string): Finding[] {
787
+ const keywords = this.keywordExtractor.extract(content);
788
+
789
+ return keywords.map(kw => ({
790
+ id: this.generateId(),
791
+ type: 'keyword',
792
+ value: kw,
793
+ confidence: kw.importance,
794
+ evidence: [{
795
+ source: 'keyword_extractor',
796
+ excerpt: `Keyword "${kw.term}" appears ${kw.frequency} times`,
797
+ location: 'content',
798
+ strength: kw.importance
799
+ }],
800
+ relatedFindings: []
801
+ }));
802
+ }
803
+
804
+ private generateSummary(content: string, depth: string): Finding[] {
805
+ const maxLength = depth === 'surface' ? 100 : depth === 'deep' ? 500 : 250;
806
+ const summary = this.summarizer.summarize(content, maxLength);
807
+
808
+ return [{
809
+ id: this.generateId(),
810
+ type: 'summary',
811
+ value: { summary, originalLength: content.length, compressionRatio: summary.length / content.length },
812
+ confidence: 0.8,
813
+ evidence: [{
814
+ source: 'summarizer',
815
+ excerpt: summary.substring(0, 100),
816
+ location: 'generated_summary',
817
+ strength: 0.75
818
+ }],
819
+ relatedFindings: []
820
+ }];
821
+ }
822
+
823
+ private async factCheck(content: string): Promise<Finding[]> {
824
+ // Extract claims from content (simplified)
825
+ const sentences = content.match(/[^.!?]+[.!?]+/g) || [];
826
+ const claims = sentences.filter(s =>
827
+ s.length > 20 &&
828
+ !s.toLowerCase().includes('i think') &&
829
+ !s.toLowerCase().includes('maybe')
830
+ ).slice(0, 3);
831
+
832
+ const findings: Finding[] = [];
833
+
834
+ for (const claim of claims) {
835
+ const factCheck = await this.factChecker.checkClaim(claim, []);
836
+ findings.push({
837
+ id: this.generateId(),
838
+ type: 'fact_check',
839
+ value: factCheck,
840
+ confidence: factCheck.confidence,
841
+ evidence: factCheck.sources.map(s => ({
842
+ source: s.url,
843
+ excerpt: s.snippet,
844
+ location: s.url,
845
+ strength: s.credibility
846
+ })),
847
+ relatedFindings: []
848
+ });
849
+ }
850
+
851
+ return findings;
852
+ }
853
+
854
+ private compareContent(content: string, context?: string): Finding[] {
855
+ if (!context) {
856
+ return [{
857
+ id: this.generateId(),
858
+ type: 'comparison',
859
+ value: { error: 'No comparison context provided' },
860
+ confidence: 0,
861
+ evidence: [],
862
+ relatedFindings: []
863
+ }];
864
+ }
865
+
866
+ const comparison = this.comparisonEngine.compare(
867
+ ['Current Content', 'Context'],
868
+ [content, context]
869
+ );
870
+
871
+ return [{
872
+ id: this.generateId(),
873
+ type: 'comparison',
874
+ value: comparison,
875
+ confidence: comparison.confidence,
876
+ evidence: [{
877
+ source: 'comparison_engine',
878
+ excerpt: comparison.conclusion,
879
+ location: 'comparison_result',
880
+ strength: comparison.confidence
881
+ }],
882
+ relatedFindings: []
883
+ }];
884
+ }
885
+
886
+ private analyzeTrends(content: string): Finding[] {
887
+ // Simple trend analysis based on temporal references
888
+ const temporalPatterns = [
889
+ { pattern: /\b(increasing|growing|rising|upward)\b/gi, trend: 'upward' },
890
+ { pattern: /\b(decreasing|declining|falling|downward)\b/gi, trend: 'downward' },
891
+ { pattern: /\b(stable|constant|steady|unchanged)\b/gi, trend: 'stable' }
892
+ ];
893
+
894
+ const trends: Array<{ trend: string; count: number }> = [];
895
+
896
+ temporalPatterns.forEach(({ pattern, trend }) => {
897
+ const matches = content.match(pattern) || [];
898
+ if (matches.length > 0) {
899
+ trends.push({ trend, count: matches.length });
900
+ }
901
+ });
902
+
903
+ return [{
904
+ id: this.generateId(),
905
+ type: 'trend',
906
+ value: trends,
907
+ confidence: trends.length > 0 ? 0.7 : 0.3,
908
+ evidence: [{
909
+ source: 'trend_analyzer',
910
+ excerpt: `Detected trends: ${trends.map(t => t.trend).join(', ')}`,
911
+ location: 'content',
912
+ strength: 0.65
913
+ }],
914
+ relatedFindings: []
915
+ }];
916
+ }
917
+
918
+ private calculateConfidence(findings: Finding[]): number {
919
+ if (findings.length === 0) return 0;
920
+ const avgConfidence = findings.reduce((sum, f) => sum + f.confidence, 0) / findings.length;
921
+ return Math.min(0.95, avgConfidence);
922
+ }
923
+
924
+ /**
925
+ * Compare multiple contents
926
+ */
927
+ compare(subjects: string[], contents: string[]): ComparisonResult {
928
+ return this.comparisonEngine.compare(subjects, contents);
929
+ }
930
+
931
+ /**
932
+ * Fact check a claim
933
+ */
934
+ async factCheckClaim(claim: string, sources: SearchResult[]): Promise<FactCheckResult> {
935
+ return this.factChecker.checkClaim(claim, sources);
936
+ }
937
+
938
+ /**
939
+ * Add known fact for fact checking
940
+ */
941
+ addKnownFact(claim: string, value: boolean, confidence: number): void {
942
+ this.factChecker.addFact(claim, value, confidence);
943
+ }
944
+
945
+ private generateId(): string {
946
+ return `analysis_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
947
+ }
948
+ }
949
+
950
+ // ============================================================================
951
+ // EXPORT SINGLETON INSTANCE
952
+ // ============================================================================
953
+
954
+ export const analysisEngine = new AnalysisEngine();
955
+
956
+ // ============================================================================
957
+ // EXAMPLE USAGE
958
+ // ============================================================================
959
+
960
+ /*
961
+ // Comprehensive analysis
962
+ const results = await analysisEngine.analyze(
963
+ "Artificial Intelligence is revolutionizing healthcare. Machine learning algorithms can now detect diseases with 95% accuracy. " +
964
+ "However, there are concerns about privacy and data security. The future looks promising but challenges remain.",
965
+ {
966
+ types: ['sentiment', 'entity', 'topic', 'keyword', 'summary'],
967
+ depth: 'deep'
968
+ }
969
+ );
970
+
971
+ // Compare multiple texts
972
+ const comparison = analysisEngine.compare(
973
+ ['Article A', 'Article B', 'Article C'],
974
+ [
975
+ "AI is transforming industries with automation and efficiency gains.",
976
+ "Machine learning brings both opportunities and challenges to various sectors.",
977
+ "Automation through AI technology is reshaping the workplace landscape."
978
+ ]
979
+ );
980
+
981
+ // Fact check
982
+ const factCheck = await analysisEngine.factCheckClaim(
983
+ "AI can detect diseases with 95% accuracy",
984
+ searchResults
985
+ );
986
+ */