hasina-gemini-cli 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +4 -0
- package/README.md +334 -0
- package/bin/gemini-cli.js +3 -0
- package/data/sessions.json +3 -0
- package/package.json +51 -0
- package/src/app.js +285 -0
- package/src/config/env.js +93 -0
- package/src/config/gemini.js +294 -0
- package/src/index.js +23 -0
- package/src/services/chat.service.js +51 -0
- package/src/services/command.service.js +298 -0
- package/src/services/history.service.js +83 -0
- package/src/services/session.service.js +165 -0
- package/src/utils/banner.js +314 -0
- package/src/utils/file.js +57 -0
- package/src/utils/printer.js +147 -0
- package/src/utils/validators.js +67 -0
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const os = require('os');
|
|
3
|
+
const path = require('path');
|
|
4
|
+
const dotenv = require('dotenv');
|
|
5
|
+
const {
|
|
6
|
+
assertNonEmptyString,
|
|
7
|
+
normalizeModelName,
|
|
8
|
+
parsePositiveInteger,
|
|
9
|
+
} = require('../utils/validators');
|
|
10
|
+
|
|
11
|
+
const rootDir = path.resolve(__dirname, '../..');
|
|
12
|
+
const isInstalledPackage = rootDir.includes(`${path.sep}node_modules${path.sep}`);
|
|
13
|
+
|
|
14
|
+
function resolveConfigDir() {
|
|
15
|
+
const customDir = process.env.GEMINI_CLI_HOME && process.env.GEMINI_CLI_HOME.trim();
|
|
16
|
+
|
|
17
|
+
if (customDir) {
|
|
18
|
+
return path.resolve(customDir);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
if (process.platform === 'win32' && process.env.APPDATA) {
|
|
22
|
+
return path.join(process.env.APPDATA, 'GeminiCli');
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (process.platform === 'darwin') {
|
|
26
|
+
return path.join(os.homedir(), 'Library', 'Application Support', 'GeminiCli');
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
return path.join(os.homedir(), '.gemini-cli');
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
function loadEnvFile(filePath, lockedKeys) {
|
|
33
|
+
if (!fs.existsSync(filePath)) {
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const parsed = dotenv.parse(fs.readFileSync(filePath));
|
|
38
|
+
|
|
39
|
+
Object.entries(parsed).forEach(([key, value]) => {
|
|
40
|
+
if (lockedKeys.has(key)) {
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
process.env[key] = value;
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const configDir = resolveConfigDir();
|
|
49
|
+
const configEnvPath = path.join(configDir, '.env');
|
|
50
|
+
const rootEnvPath = path.join(rootDir, '.env');
|
|
51
|
+
const cwdEnvPath = path.join(process.cwd(), '.env');
|
|
52
|
+
const lockedKeys = new Set(Object.keys(process.env));
|
|
53
|
+
|
|
54
|
+
loadEnvFile(configEnvPath, lockedKeys);
|
|
55
|
+
loadEnvFile(rootEnvPath, lockedKeys);
|
|
56
|
+
|
|
57
|
+
if (cwdEnvPath !== rootEnvPath) {
|
|
58
|
+
loadEnvFile(cwdEnvPath, lockedKeys);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function loadEnvConfig() {
|
|
62
|
+
try {
|
|
63
|
+
const sessionsFilePath = process.env.GEMINI_CLI_SESSIONS_FILE
|
|
64
|
+
? path.resolve(process.env.GEMINI_CLI_SESSIONS_FILE)
|
|
65
|
+
: isInstalledPackage
|
|
66
|
+
? path.join(configDir, 'sessions.json')
|
|
67
|
+
: path.join(rootDir, 'data', 'sessions.json');
|
|
68
|
+
|
|
69
|
+
return Object.freeze({
|
|
70
|
+
rootDir,
|
|
71
|
+
configDir,
|
|
72
|
+
configEnvPath,
|
|
73
|
+
isInstalledPackage,
|
|
74
|
+
geminiApiKey: assertNonEmptyString(process.env.GEMINI_API_KEY, 'GEMINI_API_KEY'),
|
|
75
|
+
defaultModel: normalizeModelName(
|
|
76
|
+
assertNonEmptyString(process.env.DEFAULT_MODEL, 'DEFAULT_MODEL')
|
|
77
|
+
),
|
|
78
|
+
maxHistoryMessages: parsePositiveInteger(
|
|
79
|
+
process.env.MAX_HISTORY_MESSAGES,
|
|
80
|
+
'MAX_HISTORY_MESSAGES'
|
|
81
|
+
),
|
|
82
|
+
systemPrompt: assertNonEmptyString(process.env.SYSTEM_PROMPT, 'SYSTEM_PROMPT'),
|
|
83
|
+
sessionsFilePath,
|
|
84
|
+
});
|
|
85
|
+
} catch (error) {
|
|
86
|
+
throw new Error(
|
|
87
|
+
`Environment configuration error: ${error.message} Update your .env file before starting the app.`,
|
|
88
|
+
{ cause: error }
|
|
89
|
+
);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
module.exports = loadEnvConfig();
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
const { GoogleGenAI } = require('@google/genai');
|
|
2
|
+
const { normalizeModelName } = require('../utils/validators');
|
|
3
|
+
|
|
4
|
+
const NON_CHAT_MODEL_KEYWORDS = [
|
|
5
|
+
'embedding',
|
|
6
|
+
'image',
|
|
7
|
+
'audio',
|
|
8
|
+
'tts',
|
|
9
|
+
'native-audio',
|
|
10
|
+
'robotics',
|
|
11
|
+
'computer-use',
|
|
12
|
+
];
|
|
13
|
+
|
|
14
|
+
function buildGeminiChatParams({ model, systemPrompt, history }) {
|
|
15
|
+
const params = {
|
|
16
|
+
model: normalizeModelName(model),
|
|
17
|
+
history: mapHistoryToGemini(history),
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
if (systemPrompt && systemPrompt.trim()) {
|
|
21
|
+
params.config = {
|
|
22
|
+
systemInstruction: systemPrompt.trim(),
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
return params;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function mapHistoryToGemini(messages = []) {
|
|
30
|
+
return messages.map((message) => ({
|
|
31
|
+
role: message.role === 'assistant' ? 'model' : 'user',
|
|
32
|
+
parts: [{ text: message.content }],
|
|
33
|
+
}));
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function extractResponseText(response) {
|
|
37
|
+
if (!response) {
|
|
38
|
+
return '';
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
if (typeof response.text === 'string') {
|
|
42
|
+
return response.text;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const parts = response.candidates?.[0]?.content?.parts;
|
|
46
|
+
|
|
47
|
+
if (!Array.isArray(parts)) {
|
|
48
|
+
return '';
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return parts
|
|
52
|
+
.map((part) => (typeof part?.text === 'string' ? part.text : ''))
|
|
53
|
+
.join('');
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
function normalizeListedModel(model) {
|
|
57
|
+
const id = String(model?.name || '').replace(/^models\//, '').trim();
|
|
58
|
+
|
|
59
|
+
if (!id) {
|
|
60
|
+
return null;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
id,
|
|
65
|
+
displayName: model.displayName || id,
|
|
66
|
+
description: model.description || '',
|
|
67
|
+
inputTokenLimit: Number.isFinite(model.inputTokenLimit) ? model.inputTokenLimit : null,
|
|
68
|
+
outputTokenLimit: Number.isFinite(model.outputTokenLimit) ? model.outputTokenLimit : null,
|
|
69
|
+
supportedActions: Array.isArray(model.supportedActions) ? model.supportedActions : [],
|
|
70
|
+
isPreview: /preview|exp|experimental/i.test(id),
|
|
71
|
+
isLatest: /-latest$/i.test(id),
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function isChatCapableModel(model) {
|
|
76
|
+
return (
|
|
77
|
+
model &&
|
|
78
|
+
model.id.startsWith('gemini') &&
|
|
79
|
+
model.supportedActions.includes('generateContent') &&
|
|
80
|
+
!NON_CHAT_MODEL_KEYWORDS.some((keyword) => model.id.includes(keyword))
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function sortModels(models, currentModel) {
|
|
85
|
+
return models.slice().sort((left, right) => {
|
|
86
|
+
const leftPriority = [
|
|
87
|
+
left.id === currentModel ? 0 : 1,
|
|
88
|
+
left.isPreview ? 1 : 0,
|
|
89
|
+
left.isLatest ? 1 : 0,
|
|
90
|
+
left.id,
|
|
91
|
+
];
|
|
92
|
+
const rightPriority = [
|
|
93
|
+
right.id === currentModel ? 0 : 1,
|
|
94
|
+
right.isPreview ? 1 : 0,
|
|
95
|
+
right.isLatest ? 1 : 0,
|
|
96
|
+
right.id,
|
|
97
|
+
];
|
|
98
|
+
|
|
99
|
+
return leftPriority.join('|').localeCompare(rightPriority.join('|'));
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
function createFriendlyGeminiError(error, fallbackMessage) {
|
|
104
|
+
const status = typeof error?.status === 'number' ? error.status : undefined;
|
|
105
|
+
const message = typeof error?.message === 'string' ? error.message.trim() : '';
|
|
106
|
+
const lowerMessage = message.toLowerCase();
|
|
107
|
+
|
|
108
|
+
if (status === 400) {
|
|
109
|
+
if (lowerMessage.includes('model')) {
|
|
110
|
+
return new Error(
|
|
111
|
+
'Gemini rejected the selected model. Use a valid model name such as "gemini-2.5-flash".',
|
|
112
|
+
{ cause: error }
|
|
113
|
+
);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return new Error(`Gemini rejected the request: ${message || 'bad request.'}`, {
|
|
117
|
+
cause: error,
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if (status === 401) {
|
|
122
|
+
return new Error('Authentication failed. Check GEMINI_API_KEY in your .env file.', {
|
|
123
|
+
cause: error,
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (status === 403) {
|
|
128
|
+
if (lowerMessage.includes('quota') || lowerMessage.includes('billing')) {
|
|
129
|
+
return new Error(
|
|
130
|
+
'Gemini quota or billing limits were reached. Check your Google AI Studio quota and billing settings.',
|
|
131
|
+
{ cause: error }
|
|
132
|
+
);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
return new Error(
|
|
136
|
+
'Access to the Gemini API was denied. Verify that your API key has access to the selected model.',
|
|
137
|
+
{ cause: error }
|
|
138
|
+
);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (status === 404) {
|
|
142
|
+
return new Error(
|
|
143
|
+
'The requested Gemini model was not found. Check the model name and try again.',
|
|
144
|
+
{ cause: error }
|
|
145
|
+
);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if (status === 429) {
|
|
149
|
+
return new Error('Gemini rate limits were reached. Wait a moment and try again.', {
|
|
150
|
+
cause: error,
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (status >= 500) {
|
|
155
|
+
return new Error('Gemini is temporarily unavailable. Try again in a few moments.', {
|
|
156
|
+
cause: error,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
if (
|
|
161
|
+
lowerMessage.includes('network') ||
|
|
162
|
+
lowerMessage.includes('fetch failed') ||
|
|
163
|
+
lowerMessage.includes('timed out') ||
|
|
164
|
+
lowerMessage.includes('timeout') ||
|
|
165
|
+
lowerMessage.includes('enotfound') ||
|
|
166
|
+
lowerMessage.includes('econnreset')
|
|
167
|
+
) {
|
|
168
|
+
return new Error('Network request to Gemini failed. Check your connection and try again.', {
|
|
169
|
+
cause: error,
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
return new Error(message ? `${fallbackMessage} ${message}` : fallbackMessage, {
|
|
174
|
+
cause: error,
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
function createGeminiProvider({ apiKey }) {
|
|
179
|
+
const client = new GoogleGenAI({ apiKey });
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
name: 'gemini',
|
|
183
|
+
|
|
184
|
+
async validateModel(model) {
|
|
185
|
+
const normalizedModel = normalizeModelName(model);
|
|
186
|
+
|
|
187
|
+
try {
|
|
188
|
+
return await client.models.get({ model: normalizedModel });
|
|
189
|
+
} catch (error) {
|
|
190
|
+
throw createFriendlyGeminiError(
|
|
191
|
+
error,
|
|
192
|
+
`Unable to validate model "${normalizedModel}".`
|
|
193
|
+
);
|
|
194
|
+
}
|
|
195
|
+
},
|
|
196
|
+
|
|
197
|
+
async listModels({ currentModel, pageSize = 100 } = {}) {
|
|
198
|
+
try {
|
|
199
|
+
const pager = await client.models.list({
|
|
200
|
+
config: {
|
|
201
|
+
pageSize,
|
|
202
|
+
},
|
|
203
|
+
});
|
|
204
|
+
const models = [];
|
|
205
|
+
|
|
206
|
+
for await (const model of pager) {
|
|
207
|
+
const normalized = normalizeListedModel(model);
|
|
208
|
+
|
|
209
|
+
if (!isChatCapableModel(normalized)) {
|
|
210
|
+
continue;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
models.push(normalized);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return sortModels(models, currentModel);
|
|
217
|
+
} catch (error) {
|
|
218
|
+
throw createFriendlyGeminiError(error, 'Unable to load the Gemini model list.');
|
|
219
|
+
}
|
|
220
|
+
},
|
|
221
|
+
|
|
222
|
+
async generateReply({ model, systemPrompt, history, message }) {
|
|
223
|
+
try {
|
|
224
|
+
const chat = client.chats.create(
|
|
225
|
+
buildGeminiChatParams({
|
|
226
|
+
model,
|
|
227
|
+
systemPrompt,
|
|
228
|
+
history,
|
|
229
|
+
})
|
|
230
|
+
);
|
|
231
|
+
|
|
232
|
+
const response = await chat.sendMessage({ message });
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
model,
|
|
236
|
+
streamed: false,
|
|
237
|
+
text: extractResponseText(response),
|
|
238
|
+
};
|
|
239
|
+
} catch (error) {
|
|
240
|
+
throw createFriendlyGeminiError(error, 'Gemini request failed.');
|
|
241
|
+
}
|
|
242
|
+
},
|
|
243
|
+
|
|
244
|
+
async streamReply({ model, systemPrompt, history, message, onTextChunk }) {
|
|
245
|
+
try {
|
|
246
|
+
const chat = client.chats.create(
|
|
247
|
+
buildGeminiChatParams({
|
|
248
|
+
model,
|
|
249
|
+
systemPrompt,
|
|
250
|
+
history,
|
|
251
|
+
})
|
|
252
|
+
);
|
|
253
|
+
|
|
254
|
+
const stream = await chat.sendMessageStream({ message });
|
|
255
|
+
let fullText = '';
|
|
256
|
+
|
|
257
|
+
for await (const chunk of stream) {
|
|
258
|
+
const chunkText = extractResponseText(chunk);
|
|
259
|
+
|
|
260
|
+
if (!chunkText) {
|
|
261
|
+
continue;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const delta = chunkText.startsWith(fullText)
|
|
265
|
+
? chunkText.slice(fullText.length)
|
|
266
|
+
: chunkText;
|
|
267
|
+
|
|
268
|
+
if (!delta) {
|
|
269
|
+
continue;
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
fullText += delta;
|
|
273
|
+
|
|
274
|
+
if (typeof onTextChunk === 'function') {
|
|
275
|
+
onTextChunk(delta);
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return {
|
|
280
|
+
model,
|
|
281
|
+
streamed: true,
|
|
282
|
+
text: fullText,
|
|
283
|
+
};
|
|
284
|
+
} catch (error) {
|
|
285
|
+
throw createFriendlyGeminiError(error, 'Gemini streaming request failed.');
|
|
286
|
+
}
|
|
287
|
+
},
|
|
288
|
+
};
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
module.exports = {
|
|
292
|
+
createFriendlyGeminiError,
|
|
293
|
+
createGeminiProvider,
|
|
294
|
+
};
|
package/src/index.js
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
const { createGeminiProvider } = require('./config/gemini');
|
|
2
|
+
const printer = require('./utils/printer');
|
|
3
|
+
const { App } = require('./app');
|
|
4
|
+
|
|
5
|
+
async function main() {
|
|
6
|
+
const config = require('./config/env');
|
|
7
|
+
const provider = createGeminiProvider({
|
|
8
|
+
apiKey: config.geminiApiKey,
|
|
9
|
+
});
|
|
10
|
+
|
|
11
|
+
const app = new App({
|
|
12
|
+
config,
|
|
13
|
+
provider,
|
|
14
|
+
printer,
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
await app.start();
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
main().catch((error) => {
|
|
21
|
+
printer.printError(error.message || 'Unexpected startup error.');
|
|
22
|
+
process.exit(1);
|
|
23
|
+
});
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
const { assertNonEmptyString, normalizeModelName } = require('../utils/validators');
|
|
2
|
+
|
|
3
|
+
class ChatService {
|
|
4
|
+
constructor({ provider, maxHistoryMessages }) {
|
|
5
|
+
this.provider = provider;
|
|
6
|
+
this.maxHistoryMessages = maxHistoryMessages;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
getBoundedHistory(messages = []) {
|
|
10
|
+
const slice = messages.slice(-this.maxHistoryMessages);
|
|
11
|
+
return slice[0]?.role === 'assistant' ? slice.slice(1) : slice;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
buildRequest({ model, systemPrompt, historyMessages, userMessage }) {
|
|
15
|
+
return {
|
|
16
|
+
model: normalizeModelName(model),
|
|
17
|
+
systemPrompt,
|
|
18
|
+
history: this.getBoundedHistory(historyMessages),
|
|
19
|
+
message: assertNonEmptyString(userMessage, 'User message'),
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
async sendMessage({
|
|
24
|
+
model,
|
|
25
|
+
systemPrompt,
|
|
26
|
+
historyMessages,
|
|
27
|
+
userMessage,
|
|
28
|
+
preferStreaming = true,
|
|
29
|
+
onTextChunk,
|
|
30
|
+
}) {
|
|
31
|
+
const request = this.buildRequest({
|
|
32
|
+
model,
|
|
33
|
+
systemPrompt,
|
|
34
|
+
historyMessages,
|
|
35
|
+
userMessage,
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
if (preferStreaming && typeof this.provider.streamReply === 'function') {
|
|
39
|
+
return this.provider.streamReply({
|
|
40
|
+
...request,
|
|
41
|
+
onTextChunk,
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return this.provider.generateReply(request);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
module.exports = {
|
|
50
|
+
ChatService,
|
|
51
|
+
};
|