thelapyae 0.1.0 ā 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/config.js +41 -0
- package/bin/thelapyae.js +169 -6
- package/data/mental-models.json +982 -0
- package/package.json +6 -2
package/bin/config.js
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const os = require('os');
|
|
4
|
+
const readline = require('readline');
|
|
5
|
+
|
|
6
|
+
const CONFIG_PATH = path.join(os.homedir(), '.thelapyaerc');
|
|
7
|
+
|
|
8
|
+
function loadConfig() {
|
|
9
|
+
if (fs.existsSync(CONFIG_PATH)) {
|
|
10
|
+
try {
|
|
11
|
+
return JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf8'));
|
|
12
|
+
} catch (e) {
|
|
13
|
+
return {};
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
return {};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function saveConfig(config) {
|
|
20
|
+
fs.writeFileSync(CONFIG_PATH, JSON.stringify(config, null, 2));
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
async function configure() {
|
|
24
|
+
const rl = readline.createInterface({
|
|
25
|
+
input: process.stdin,
|
|
26
|
+
output: process.stdout
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
return new Promise((resolve) => {
|
|
30
|
+
rl.question('š Enter your Gemini API Key: ', (key) => {
|
|
31
|
+
const config = loadConfig();
|
|
32
|
+
config.GEMINI_API_KEY = key.trim();
|
|
33
|
+
saveConfig(config);
|
|
34
|
+
console.log('ā
API Key saved successfully!');
|
|
35
|
+
rl.close();
|
|
36
|
+
resolve();
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
module.exports = { loadConfig, configure };
|
package/bin/thelapyae.js
CHANGED
|
@@ -1,13 +1,176 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
const { GoogleGenerativeAI } = require("@google/generative-ai");
|
|
4
|
+
const { loadConfig, configure } = require("./config");
|
|
5
|
+
const models = require("../data/mental-models.json");
|
|
6
|
+
|
|
7
|
+
const args = process.argv.slice(2);
|
|
8
|
+
const input = args.join(" ");
|
|
9
|
+
|
|
10
|
+
function randomModel() {
|
|
11
|
+
const model = models[Math.floor(Math.random() * models.length)];
|
|
12
|
+
printModel(model);
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
function searchModels(query, limit = 1) {
|
|
16
|
+
const terms = query.toLowerCase().split(/\s+/).filter(w => w.length > 2);
|
|
17
|
+
|
|
18
|
+
if (terms.length === 0) return [];
|
|
19
|
+
|
|
20
|
+
const scored = models.map(m => {
|
|
21
|
+
let score = 0;
|
|
22
|
+
const textTitle = m.title.toLowerCase();
|
|
23
|
+
const textProblem = (m.problem || "").toLowerCase();
|
|
24
|
+
const textExplanation = (m.explanation || "").toLowerCase();
|
|
25
|
+
|
|
26
|
+
terms.forEach(term => {
|
|
27
|
+
if (textTitle.includes(term)) score += 10;
|
|
28
|
+
if (textProblem.includes(term)) score += 5;
|
|
29
|
+
if (textExplanation.includes(term)) score += 1;
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
return { model: m, score };
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
scored.sort((a, b) => b.score - a.score);
|
|
36
|
+
return scored.filter(s => s.score > 0).slice(0, limit).map(s => s.model);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function cleanText(text) {
|
|
40
|
+
if (!text) return "";
|
|
41
|
+
return text.replace(/\*\*/g, "").replace(/\*/g, "").replace(/__/g, "").replace(/_/g, "");
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function printModel(m) {
|
|
45
|
+
console.log(`
|
|
46
|
+
š§ ${cleanText(m.title)}
|
|
47
|
+
Category: ${cleanText(m.category)}
|
|
48
|
+
|
|
49
|
+
Problem:
|
|
50
|
+
${cleanText(m.problem)}
|
|
51
|
+
|
|
52
|
+
Question:
|
|
53
|
+
${cleanText(m.question)}
|
|
54
|
+
|
|
55
|
+
Explanation:
|
|
56
|
+
${cleanText(m.explanation)}
|
|
57
|
+
|
|
58
|
+
Example:
|
|
59
|
+
${cleanText(m.example)}
|
|
60
|
+
|
|
61
|
+
Action:
|
|
62
|
+
š ${cleanText(m.action)}
|
|
63
|
+
`);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async function consultGemini(query) {
|
|
67
|
+
let config = loadConfig();
|
|
68
|
+
|
|
69
|
+
if (!config.GEMINI_API_KEY) {
|
|
70
|
+
console.log("ā¹ļø No Gemini API Key found.");
|
|
71
|
+
const readline = require('readline');
|
|
72
|
+
const rl = readline.createInterface({
|
|
73
|
+
input: process.stdin,
|
|
74
|
+
output: process.stdout
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
const answer = await new Promise(resolve => {
|
|
78
|
+
rl.question('š Do you want to set it up now to get AI consulting? (y/N) ', resolve);
|
|
79
|
+
});
|
|
80
|
+
rl.close();
|
|
81
|
+
|
|
82
|
+
if (answer.toLowerCase().startsWith('y')) {
|
|
83
|
+
await configure();
|
|
84
|
+
config = loadConfig(); // Reload config
|
|
85
|
+
} else {
|
|
86
|
+
console.log("š Falling back to local search (offline mode).\n");
|
|
87
|
+
const results = searchModels(query);
|
|
88
|
+
if (results.length > 0) {
|
|
89
|
+
console.log(`š Finding suitable mental model for: "${query}"...\n`);
|
|
90
|
+
printModel(results[0]);
|
|
91
|
+
} else {
|
|
92
|
+
console.log(`ā No mental model found for: "${query}"`);
|
|
93
|
+
}
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
const matches = searchModels(query, 5); // Get top 5 candidates
|
|
99
|
+
|
|
100
|
+
if (matches.length === 0) {
|
|
101
|
+
console.log(`ā No relevant mental models found for: "${query}" locally to send to AI.`);
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
console.log(`š¤ Thinking...`);
|
|
106
|
+
|
|
107
|
+
const genAI = new GoogleGenerativeAI(config.GEMINI_API_KEY);
|
|
108
|
+
const model = genAI.getGenerativeModel({ model: "gemini-2.5-flash" });
|
|
109
|
+
|
|
110
|
+
const prompt = `
|
|
111
|
+
You are an expert consultant using specific mental models to help people.
|
|
112
|
+
User Problem: "${query}"
|
|
5
113
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
114
|
+
Here are the most relevant mental models from your database:
|
|
115
|
+
${JSON.stringify(matches.map(m => ({ title: m.title, explanation: m.explanation, action: m.action })))}
|
|
116
|
+
|
|
117
|
+
Task:
|
|
118
|
+
1. Analyze the user's problem.
|
|
119
|
+
2. Select the top 3 most relevant mental models from the list to offer a multi-perspective solution.
|
|
120
|
+
3. Synthesize a comprehensive answer. Reference the models by name (e.g., "Perspective 1: [Model Name]").
|
|
121
|
+
4. Provide a "Consolidated Action Plan" with 3-5 concise, actionable bullet points.
|
|
122
|
+
|
|
123
|
+
Format:
|
|
124
|
+
- Start with a direct, empathetic opening.
|
|
125
|
+
- Use emojis for structure.
|
|
126
|
+
- Do NOT use markdown formatting (no bolding **, no headers #). Use uppercase for emphasis or bullet points like "ā¢".
|
|
127
|
+
- Keep the action steps short and punchy.
|
|
128
|
+
`;
|
|
129
|
+
|
|
130
|
+
try {
|
|
131
|
+
const result = await model.generateContent(prompt);
|
|
132
|
+
const response = await result.response;
|
|
133
|
+
console.log(response.text());
|
|
134
|
+
} catch (error) {
|
|
135
|
+
console.error("ā AI Error:", error.message);
|
|
136
|
+
if (matches.length > 0) {
|
|
137
|
+
console.log("\nš Falling back to top local match:");
|
|
138
|
+
printModel(matches[0]);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
async function main() {
|
|
144
|
+
if (!input) {
|
|
145
|
+
const figlet = require("figlet");
|
|
146
|
+
console.log(figlet.textSync("thelapyae", { horizontalLayout: "full" }));
|
|
147
|
+
console.log(`
|
|
148
|
+
š Hi, I'm La Pyae (@thelapyae)
|
|
9
149
|
|
|
10
150
|
Try:
|
|
11
151
|
npx thelapyae random
|
|
12
|
-
npx thelapyae
|
|
152
|
+
npx thelapyae list
|
|
153
|
+
npx thelapyae "I feel anxious"
|
|
154
|
+
npx thelapyae config (Setup AI Key)
|
|
13
155
|
`);
|
|
156
|
+
process.exit(0);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
if (input === "config") {
|
|
160
|
+
await configure();
|
|
161
|
+
} else if (input === "random") {
|
|
162
|
+
randomModel();
|
|
163
|
+
} else if (input === "list") {
|
|
164
|
+
models.forEach(m => console.log(`- ${m.id} (${m.title})`));
|
|
165
|
+
} else {
|
|
166
|
+
// Try exact ID match first
|
|
167
|
+
const exactMatch = models.find(m => m.id === input);
|
|
168
|
+
if (exactMatch) {
|
|
169
|
+
printModel(exactMatch);
|
|
170
|
+
} else {
|
|
171
|
+
await consultGemini(input);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
main();
|