hasina-gemini-cli 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hasina-gemini-cli",
3
- "version": "1.0.1",
3
+ "version": "1.0.2",
4
4
  "description": "Production-ready terminal AI chat application powered by the official Gemini API SDK.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
package/src/app.js CHANGED
@@ -75,6 +75,10 @@ class App {
75
75
  sessionId: await this.sessionService.generateSessionId(),
76
76
  sessionCreatedAt: new Date().toISOString(),
77
77
  model: this.config.defaultModel,
78
+ activeModelInfo: {
79
+ id: this.config.defaultModel,
80
+ version: null,
81
+ },
78
82
  systemPrompt: this.config.systemPrompt,
79
83
  historyService: this.historyService,
80
84
  };
@@ -209,6 +213,7 @@ class App {
209
213
  }
210
214
 
211
215
  this.state.model = selectedModel.id;
216
+ this.state.activeModelInfo = selectedModel;
212
217
  const versionSuffix = selectedModel.version ? ` (version ${selectedModel.version})` : '';
213
218
  this.printer.printSuccess(`Active model changed to "${selectedModel.id}"${versionSuffix}.`);
214
219
  return false;
@@ -296,20 +301,11 @@ class App {
296
301
  return false;
297
302
  }
298
303
 
299
- let message = `Identifiant du modele (local): ${this.state.model}.`;
304
+ const modelInfo = this.state.activeModelInfo || { id: this.state.model, version: null };
305
+ let message = `Identifiant du modele (local): ${modelInfo.id || this.state.model}.`;
300
306
 
301
- if (typeof this.provider.getModelInfo === 'function') {
302
- try {
303
- const modelInfo = await this.provider.getModelInfo(this.state.model);
304
-
305
- if (modelInfo?.version) {
306
- message = `Identifiant du modele (local): ${modelInfo.id} (version ${modelInfo.version}).`;
307
- } else if (modelInfo?.id) {
308
- message = `Identifiant du modele (local): ${modelInfo.id}.`;
309
- }
310
- } catch (_error) {
311
- // Ignore lookup failures and use the active model from local state.
312
- }
307
+ if (modelInfo?.version) {
308
+ message = `Identifiant du modele (local): ${modelInfo.id} (version ${modelInfo.version}).`;
313
309
  }
314
310
 
315
311
  this.printer.printAssistant(message);
@@ -10,6 +10,8 @@ const NON_CHAT_MODEL_KEYWORDS = [
10
10
  'robotics',
11
11
  'computer-use',
12
12
  ];
13
+ const TRANSIENT_STATUS_CODES = new Set([429, 500, 502, 503, 504]);
14
+ const RETRY_DELAYS_MS = [1200, 2500];
13
15
 
14
16
  function buildGeminiChatParams({ model, systemPrompt, history }) {
15
17
  const params = {
@@ -53,6 +55,71 @@ function extractResponseText(response) {
53
55
  .join('');
54
56
  }
55
57
 
58
+ function sleep(durationMs) {
59
+ return new Promise((resolve) => {
60
+ setTimeout(resolve, durationMs);
61
+ });
62
+ }
63
+
64
+ function getErrorStatus(error) {
65
+ if (typeof error?.status === 'number') {
66
+ return error.status;
67
+ }
68
+
69
+ if (typeof error?.cause?.status === 'number') {
70
+ return error.cause.status;
71
+ }
72
+
73
+ return undefined;
74
+ }
75
+
76
+ function getErrorMessage(error) {
77
+ if (typeof error?.message === 'string' && error.message.trim()) {
78
+ return error.message.trim();
79
+ }
80
+
81
+ if (typeof error?.cause?.message === 'string' && error.cause.message.trim()) {
82
+ return error.cause.message.trim();
83
+ }
84
+
85
+ return '';
86
+ }
87
+
88
+ function isPreviewModel(model) {
89
+ return typeof model === 'string' && /preview|exp|experimental/i.test(model);
90
+ }
91
+
92
+ function isTransientGeminiError(error) {
93
+ const status = getErrorStatus(error);
94
+ const lowerMessage = getErrorMessage(error).toLowerCase();
95
+
96
+ return (
97
+ TRANSIENT_STATUS_CODES.has(status) ||
98
+ lowerMessage.includes('high demand') ||
99
+ lowerMessage.includes('service unavailable') ||
100
+ lowerMessage.includes('temporarily unavailable') ||
101
+ lowerMessage.includes('unavailable') ||
102
+ lowerMessage.includes('rate limit') ||
103
+ lowerMessage.includes('fetch failed') ||
104
+ lowerMessage.includes('timeout') ||
105
+ lowerMessage.includes('timed out') ||
106
+ lowerMessage.includes('econnreset') ||
107
+ lowerMessage.includes('enotfound')
108
+ );
109
+ }
110
+
111
+ function buildTemporaryUnavailableMessage(model) {
112
+ const baseMessage = model
113
+ ? `Gemini is temporarily unavailable for "${model}".`
114
+ : 'Gemini is temporarily unavailable.';
115
+
116
+ if (isPreviewModel(model)) {
117
+ return `${baseMessage} Preview models can be under heavy demand. Retry in a few moments or switch with /use-model gemini-2.5-flash.`;
118
+ }
119
+
120
+ return `${baseMessage} Try again in a few moments.`;
121
+ }
122
+
56
123
  function normalizeModelInfo(model) {
57
124
  const id = String(model?.name || '').replace(/^models\//, '').trim();
58
125
 
@@ -102,9 +169,10 @@ function sortModels(models, currentModel) {
102
169
  });
103
170
  }
104
171
 
105
- function createFriendlyGeminiError(error, fallbackMessage) {
106
- const status = typeof error?.status === 'number' ? error.status : undefined;
107
- const message = typeof error?.message === 'string' ? error.message.trim() : '';
172
+ function createFriendlyGeminiError(error, fallbackMessage, options = {}) {
173
+ const model = options.model;
174
+ const status = getErrorStatus(error);
175
+ const message = getErrorMessage(error);
108
176
  const lowerMessage = message.toLowerCase();
109
177
 
110
178
  if (status === 400) {
@@ -154,7 +222,7 @@ function createFriendlyGeminiError(error, fallbackMessage) {
154
222
  }
155
223
 
156
224
  if (status >= 500) {
157
- return new Error('Gemini is temporarily unavailable. Try again in a few moments.', {
225
+ return new Error(buildTemporaryUnavailableMessage(model), {
158
226
  cause: error,
159
227
  });
160
228
  }
@@ -180,6 +248,26 @@ function createFriendlyGeminiError(error, fallbackMessage) {
180
248
  function createGeminiProvider({ apiKey }) {
181
249
  const client = new GoogleGenAI({ apiKey });
182
250
 
251
+ async function retryOperation(operation, { model, fallbackMessage }) {
252
+ let lastError = null;
253
+
254
+ for (let attempt = 0; attempt <= RETRY_DELAYS_MS.length; attempt += 1) {
255
+ try {
256
+ return await operation(attempt);
257
+ } catch (error) {
258
+ lastError = error;
259
+
260
+ if (!isTransientGeminiError(error) || attempt >= RETRY_DELAYS_MS.length) {
261
+ throw createFriendlyGeminiError(error, fallbackMessage, { model });
262
+ }
263
+
264
+ await sleep(RETRY_DELAYS_MS[attempt]);
265
+ }
266
+ }
267
+
268
+ throw createFriendlyGeminiError(lastError, fallbackMessage, { model });
269
+ }
270
+
183
271
  return {
184
272
  name: 'gemini',
185
273
 
@@ -191,7 +279,8 @@ function createGeminiProvider({ apiKey }) {
191
279
  } catch (error) {
192
280
  throw createFriendlyGeminiError(
193
281
  error,
194
- `Unable to validate model "${normalizedModel}".`
282
+ `Unable to validate model "${normalizedModel}".`,
283
+ { model: normalizedModel }
195
284
  );
196
285
  }
197
286
  },
@@ -205,7 +294,8 @@ function createGeminiProvider({ apiKey }) {
205
294
  } catch (error) {
206
295
  throw createFriendlyGeminiError(
207
296
  error,
208
- `Unable to load details for model "${normalizedModel}".`
297
+ `Unable to load details for model "${normalizedModel}".`,
298
+ { model: normalizedModel }
209
299
  );
210
300
  }
211
301
  },
@@ -236,70 +326,90 @@ function createGeminiProvider({ apiKey }) {
236
326
  },
237
327
 
238
328
  async generateReply({ model, systemPrompt, history, message }) {
239
- try {
240
- const chat = client.chats.create(
241
- buildGeminiChatParams({
329
+ return retryOperation(
330
+ async () => {
331
+ const chat = client.chats.create(
332
+ buildGeminiChatParams({
333
+ model,
334
+ systemPrompt,
335
+ history,
336
+ })
337
+ );
338
+
339
+ const response = await chat.sendMessage({ message });
340
+
341
+ return {
242
342
  model,
243
- systemPrompt,
244
- history,
245
- })
246
- );
247
-
248
- const response = await chat.sendMessage({ message });
249
-
250
- return {
343
+ streamed: false,
344
+ text: extractResponseText(response),
345
+ };
346
+ },
347
+ {
251
348
  model,
252
- streamed: false,
253
- text: extractResponseText(response),
254
- };
255
- } catch (error) {
256
- throw createFriendlyGeminiError(error, 'Gemini request failed.');
257
- }
349
+ fallbackMessage: 'Gemini request failed.',
350
+ }
351
+ );
258
352
  },
259
353
 
260
354
  async streamReply({ model, systemPrompt, history, message, onTextChunk }) {
261
- try {
262
- const chat = client.chats.create(
263
- buildGeminiChatParams({
264
- model,
265
- systemPrompt,
266
- history,
267
- })
268
- );
269
-
270
- const stream = await chat.sendMessageStream({ message });
271
- let fullText = '';
272
-
273
- for await (const chunk of stream) {
274
- const chunkText = extractResponseText(chunk);
275
-
276
- if (!chunkText) {
277
- continue;
355
+ return retryOperation(
356
+ async () => {
357
+ const chat = client.chats.create(
358
+ buildGeminiChatParams({
359
+ model,
360
+ systemPrompt,
361
+ history,
362
+ })
363
+ );
364
+
365
+ const stream = await chat.sendMessageStream({ message });
366
+ let fullText = '';
367
+ let emittedAnyChunk = false;
368
+
369
+ try {
370
+ for await (const chunk of stream) {
371
+ const chunkText = extractResponseText(chunk);
372
+
373
+ if (!chunkText) {
374
+ continue;
375
+ }
376
+
377
+ const delta = chunkText.startsWith(fullText)
378
+ ? chunkText.slice(fullText.length)
379
+ : chunkText;
380
+
381
+ if (!delta) {
382
+ continue;
383
+ }
384
+
385
+ fullText += delta;
386
+ emittedAnyChunk = true;
387
+
388
+ if (typeof onTextChunk === 'function') {
389
+ onTextChunk(delta);
390
+ }
391
+ }
392
+ } catch (error) {
393
+ if (emittedAnyChunk) {
394
+ throw createFriendlyGeminiError(error, 'Gemini streaming request failed.', {
395
+ model,
396
+ });
397
+ }
398
+
399
+ throw error;
278
400
  }
279
401
 
280
- const delta = chunkText.startsWith(fullText)
281
- ? chunkText.slice(fullText.length)
282
- : chunkText;
283
-
284
- if (!delta) {
285
- continue;
286
- }
287
-
288
- fullText += delta;
289
-
290
- if (typeof onTextChunk === 'function') {
291
- onTextChunk(delta);
292
- }
293
- }
294
-
295
- return {
402
+ return {
403
+ model,
404
+ streamed: true,
405
+ text: fullText,
406
+ };
407
+ },
408
+ {
296
409
  model,
297
- streamed: true,
298
- text: fullText,
299
- };
300
- } catch (error) {
301
- throw createFriendlyGeminiError(error, 'Gemini streaming request failed.');
302
- }
410
+ fallbackMessage: 'Gemini streaming request failed.',
411
+ }
412
+ );
303
413
  },
304
414
  };
305
415
  }
@@ -226,6 +226,7 @@ class CommandService {
226
226
  }
227
227
 
228
228
  const model = await this.provider.getModelInfo(state.model);
229
+ state.activeModelInfo = model;
229
230
  const tags = [];
230
231
 
231
232
  if (model.isPreview) {
@@ -284,6 +285,7 @@ class CommandService {
284
285
  ? await this.provider.getModelInfo(modelName)
285
286
  : await this.provider.validateModel(modelName);
286
287
  state.model = modelName;
288
+ state.activeModelInfo = model?.id ? model : { id: modelName, version: null };
287
289
 
288
290
  return messageResult(
289
291
  'success',
@@ -325,6 +327,7 @@ class CommandService {
325
327
  state.sessionId = session.id;
326
328
  state.sessionCreatedAt = session.createdAt;
327
329
  state.model = session.model;
330
+ state.activeModelInfo = { id: session.model, version: null };
328
331
  state.systemPrompt = session.systemPrompt;
329
332
 
330
333
  return messageResult(