omnikey-cli 1.0.37 → 1.0.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,9 +16,9 @@ const config_1 = require("./config");
16
16
  // Default model mapping
17
17
  // ---------------------------------------------------------------------------
18
18
  const DEFAULT_MODELS = {
19
- openai: { fast: 'gpt-4o-mini', smart: 'gpt-5.1' },
19
+ openai: { fast: 'gpt-4o-mini', smart: 'gpt-5.5' },
20
20
  gemini: { fast: 'gemini-2.5-flash', smart: 'gemini-2.5-pro' },
21
- anthropic: { fast: 'claude-haiku-4-5-20251001', smart: 'claude-sonnet-4-6' },
21
+ anthropic: { fast: 'claude-haiku-4-5-20251001', smart: 'claude-opus-4-7' },
22
22
  };
23
23
  function getDefaultModel(provider, tier) {
24
24
  return DEFAULT_MODELS[provider][tier];
@@ -29,7 +29,7 @@ function getDefaultModel(provider, tier) {
29
29
  * - anthropic: hard API-enforced string limit of 10,485,760 chars; we stay
30
30
  * just below it with a small safety buffer.
31
31
  * - openai: no documented per-string limit; bounded by the context window
32
- * (~272K tokens for GPT-5.1 ≈ ~1M chars). Use the history cap.
32
+ * (~272K tokens for GPT-5.5 ≈ ~1M chars). Use the history cap.
33
33
  * - gemini: no documented per-string limit; bounded by the 1M-token
34
34
  * context window (~4M chars). Use the history cap.
35
35
  */
@@ -43,9 +43,9 @@ const MAX_MESSAGE_CONTENT_LENGTH_BY_PROVIDER = {
43
43
  * history, derived from each provider's context-window size minus headroom
44
44
  * for the system prompt and max output tokens.
45
45
  *
46
- * - anthropic: Claude Sonnet 4.6 — 1M token ctx, 64K max output
46
+ * - anthropic: Claude Opus 4.7 — 1M token ctx, 64K max output
47
47
  * ≈ (1,000,000 - 64,000 - 10,000) tokens × 4 chars ≈ 3.7M chars
48
- * - openai: GPT-5.1 — ~272K token ctx, ~32K max output
48
+ * - openai: GPT-5.5 — ~272K token ctx, ~32K max output
49
49
  * ≈ (272,000 - 32,000 - 5,000) tokens × 4 chars ≈ 940K chars
50
50
  * - gemini: Gemini 2.5 Pro — 1M token ctx, ~32K max output
51
51
  * ≈ (1,000,000 - 32,000 - 10,000) tokens × 4 chars ≈ 3.8M chars
@@ -75,7 +75,7 @@ class OpenAIAdapter {
75
75
  model,
76
76
  messages: oaiMessages,
77
77
  tools: tools?.length ? tools : undefined,
78
- temperature: options.temperature ?? 0.2,
78
+ temperature: model === 'gpt-5.5' ? 1 : (options.temperature ?? 0.2),
79
79
  max_tokens: options.maxTokens,
80
80
  });
81
81
  const choice = completion.choices[0];
@@ -175,7 +175,7 @@ class AnthropicAdapter {
175
175
  ...(system ? { system } : {}),
176
176
  messages: anthropicMessages,
177
177
  ...(tools?.length ? { tools } : {}),
178
- temperature: options.temperature ?? 0.2,
178
+ ...(model === 'claude-opus-4-7' ? {} : { temperature: options.temperature ?? 0.2 }),
179
179
  });
180
180
  const textContent = response.content
181
181
  .filter((b) => b.type === 'text')
@@ -58,9 +58,9 @@ async function getPromptForCommand(logger, cmd, subscription) {
58
58
  function getModelForCommand(cmd) {
59
59
  const tier = cmd === 'task' ? 'smart' : 'fast';
60
60
  const models = {
61
- openai: { fast: 'gpt-4o-mini', smart: 'gpt-5.1' },
61
+ openai: { fast: 'gpt-4o-mini', smart: 'gpt-5.5' },
62
62
  gemini: { fast: 'gemini-2.5-flash', smart: 'gemini-2.5-pro' },
63
- anthropic: { fast: 'claude-haiku-4-5-20251001', smart: 'claude-sonnet-4-6' },
63
+ anthropic: { fast: 'claude-haiku-4-5-20251001', smart: 'claude-opus-4-7' },
64
64
  };
65
65
  return models[config_1.config.aiProvider]?.[tier] ?? 'gpt-4o-mini';
66
66
  }
@@ -74,8 +74,8 @@ app.get('/macos/appcast', (req, res) => {
74
74
  const appcastUrl = `${baseUrl}/macos/appcast`;
75
75
  // These should match the values embedded into the macOS app
76
76
  // Info.plist in macOS/build_release_dmg.sh.
77
- const bundleVersion = '25';
78
- const shortVersion = '1.0.24';
77
+ const bundleVersion = '26';
78
+ const shortVersion = '1.0.25';
79
79
  const xml = `<?xml version="1.0" encoding="utf-8"?>
80
80
  <rss version="2.0"
81
81
  xmlns:sparkle="http://www.andymatuschak.org/xml-namespaces/sparkle"
@@ -23,9 +23,9 @@ const SHELL_SCRIPT_RE = /<shell_script>([\s\S]*?)<\/shell_script>/;
23
23
  const FINAL_ANSWER_RE = /<final_answer>/;
24
24
  // Maximum time a single job may run before it is forcibly cancelled.
25
25
  const JOB_TIMEOUT_MS = 10 * 60 * 1000;
26
- // Cron jobs get more turns than interactive sessions so multi-step tasks
27
- // (web research shell commands final answer) can complete unattended.
28
- const MAX_CRON_TURNS = 30;
26
+ const MAX_AGENT_ERROR_RECOVERY_ATTEMPTS = 4;
27
+ // Single-process guard to avoid running the same job concurrently.
28
+ const RUNNING_JOB_IDS = new Set();
29
29
  function computeNextRunAt(cronExpression, runAt) {
30
30
  if (cronExpression) {
31
31
  try {
@@ -104,6 +104,7 @@ async function runScript(script) {
104
104
  }
105
105
  function runCronJob(job, subscription, sessionId) {
106
106
  return new Promise((resolve, reject) => {
107
+ let agentErrorRecoveryAttempts = 0;
107
108
  let settled = false;
108
109
  const settle = (err) => {
109
110
  if (settled)
@@ -119,11 +120,27 @@ function runCronJob(job, subscription, sessionId) {
119
120
  void (async () => {
120
121
  const content = msg.content ?? '';
121
122
  if (msg.is_error) {
122
- logger_1.logger.error('Cron job: agent returned error.', {
123
+ agentErrorRecoveryAttempts += 1;
124
+ logger_1.logger.warn('Cron job: agent returned error; attempting recovery.', {
123
125
  jobId: job.id,
126
+ attempt: agentErrorRecoveryAttempts,
124
127
  content: content.slice(0, 300),
125
128
  });
126
- settle(new Error(`Agent error: ${content.slice(0, 200)}`));
129
+ const shouldFailNow = FINAL_ANSWER_RE.test(content) ||
130
+ agentErrorRecoveryAttempts > MAX_AGENT_ERROR_RECOVERY_ATTEMPTS;
131
+ if (shouldFailNow) {
132
+ settle(new Error(`Agent error: ${content.slice(0, 200)}`));
133
+ return;
134
+ }
135
+ (0, agentServer_1.runAgentTurn)(sessionId, subscription, {
136
+ session_id: sessionId,
137
+ sender: 'user',
138
+ content: `Agent turn failed while processing this cron job. ` +
139
+ `Recover from the latest state and return the next ` +
140
+ `<shell_script> or a <final_answer>.\n\n` +
141
+ `Error details:\n${content}`,
142
+ is_error: true,
143
+ }, send, logger_1.logger, { isCronJob: true }).catch((err) => settle(err instanceof Error ? err : new Error(String(err))));
127
144
  return;
128
145
  }
129
146
  const scriptMatch = SHELL_SCRIPT_RE.exec(content);
@@ -147,10 +164,28 @@ function runCronJob(job, subscription, sessionId) {
147
164
  }, send, logger_1.logger, { isCronJob: true }).catch((err) => settle(err instanceof Error ? err : new Error(String(err))));
148
165
  return;
149
166
  }
167
+ if (msg.is_web_call || msg.is_image_rendering) {
168
+ logger_1.logger.debug('Cron job: received progress notification; waiting for next message.', {
169
+ jobId: job.id,
170
+ isWebCall: !!msg.is_web_call,
171
+ isImageRendering: !!msg.is_image_rendering,
172
+ });
173
+ return;
174
+ }
150
175
  if (FINAL_ANSWER_RE.test(content)) {
151
176
  logger_1.logger.info('Cron job: received final answer.', { jobId: job.id });
152
177
  settle();
178
+ return;
153
179
  }
180
+ if (content.trim()) {
181
+ logger_1.logger.warn('Cron job: received untagged agent content; treating as final answer.', {
182
+ jobId: job.id,
183
+ content: content.slice(0, 300),
184
+ });
185
+ settle();
186
+ return;
187
+ }
188
+ settle(new Error('Agent returned empty response with no shell script or final answer.'));
154
189
  })();
155
190
  };
156
191
  (0, agentServer_1.runAgentTurn)(sessionId, subscription, {
@@ -162,38 +197,51 @@ function runCronJob(job, subscription, sessionId) {
162
197
  });
163
198
  }
164
199
  async function executeJob(job) {
165
- logger_1.logger.info('Executing scheduled job.', { jobId: job.id, label: job.label });
166
- const subscription = await subscription_1.Subscription.findByPk(job.subscriptionId);
167
- if (!subscription) {
168
- logger_1.logger.error('Subscription not found for scheduled job; skipping.', {
200
+ if (RUNNING_JOB_IDS.has(job.id)) {
201
+ logger_1.logger.warn('Scheduled job is already running; skipping duplicate execution.', {
169
202
  jobId: job.id,
170
- subscriptionId: job.subscriptionId,
203
+ label: job.label,
171
204
  });
172
205
  return;
173
206
  }
174
- const sessionId = (0, cuid_1.default)();
207
+ RUNNING_JOB_IDS.add(job.id);
208
+ logger_1.logger.info('Executing scheduled job.', { jobId: job.id, label: job.label });
175
209
  try {
176
- await runCronJob(job, subscription, sessionId);
177
- logger_1.logger.info('Scheduled job completed.', { jobId: job.id, label: job.label });
178
- }
179
- catch (err) {
180
- logger_1.logger.error('Scheduled job failed.', { jobId: job.id, label: job.label, error: err });
181
- // Fall through — always update lastRunAt so the next poll does not re-run immediately.
182
- }
183
- const now = new Date();
184
- if (job.cronExpression) {
185
- await job.update({
186
- lastRunAt: now,
187
- nextRunAt: computeNextRunAt(job.cronExpression, null),
188
- lastRunSessionId: sessionId,
189
- });
210
+ const subscription = await subscription_1.Subscription.findByPk(job.subscriptionId);
211
+ if (!subscription) {
212
+ logger_1.logger.error('Subscription not found for scheduled job; skipping.', {
213
+ jobId: job.id,
214
+ subscriptionId: job.subscriptionId,
215
+ });
216
+ return;
217
+ }
218
+ const sessionId = (0, cuid_1.default)();
219
+ try {
220
+ await runCronJob(job, subscription, sessionId);
221
+ logger_1.logger.info('Scheduled job completed.', { jobId: job.id, label: job.label });
222
+ }
223
+ catch (err) {
224
+ logger_1.logger.error('Scheduled job failed.', { jobId: job.id, label: job.label, error: err });
225
+ // Fall through — always update lastRunAt so the next poll does not re-run immediately.
226
+ }
227
+ const now = new Date();
228
+ if (job.cronExpression) {
229
+ await job.update({
230
+ lastRunAt: now,
231
+ nextRunAt: computeNextRunAt(job.cronExpression, null),
232
+ lastRunSessionId: sessionId,
233
+ });
234
+ }
235
+ else {
236
+ await job.update({
237
+ lastRunAt: now,
238
+ isActive: false,
239
+ nextRunAt: null,
240
+ lastRunSessionId: sessionId,
241
+ });
242
+ }
190
243
  }
191
- else {
192
- await job.update({
193
- lastRunAt: now,
194
- isActive: false,
195
- nextRunAt: null,
196
- lastRunSessionId: sessionId,
197
- });
244
+ finally {
245
+ RUNNING_JOB_IDS.delete(job.id);
198
246
  }
199
247
  }
package/dist/onboard.js CHANGED
@@ -9,8 +9,8 @@ const fs_1 = __importDefault(require("fs"));
9
9
  const path_1 = __importDefault(require("path"));
10
10
  const utils_1 = require("./utils");
11
11
  const AI_PROVIDERS = [
12
- { name: 'OpenAI (gpt-4o-mini / gpt-5.1)', value: 'openai' },
13
- { name: 'Anthropic — Claude (claude-haiku / claude-sonnet)', value: 'anthropic' },
12
+ { name: 'OpenAI (gpt-4o-mini / gpt-5.5)', value: 'openai' },
13
+ { name: 'Anthropic — Claude (claude-haiku / claude-opus)', value: 'anthropic' },
14
14
  { name: 'Google Gemini (gemini-2.5-flash / gemini-2.5-pro)', value: 'gemini' },
15
15
  ];
16
16
  const SEARCH_PROVIDERS = [
package/package.json CHANGED
@@ -4,7 +4,7 @@
4
4
  "access": "public",
5
5
  "registry": "https://registry.npmjs.org/"
6
6
  },
7
- "version": "1.0.37",
7
+ "version": "1.0.39",
8
8
  "description": "CLI for onboarding users to Omnikey AI and configuring OPENAI_API_KEY. Use Yarn for install/build.",
9
9
  "engines": {
10
10
  "node": ">=14.0.0",
@@ -27,7 +27,7 @@
27
27
  "author": "Gurinder Rawala",
28
28
  "license": "MIT",
29
29
  "dependencies": {
30
- "@anthropic-ai/sdk": "^0.80.0",
30
+ "@anthropic-ai/sdk": "^0.96.0",
31
31
  "@google-cloud/storage": "^7.19.0",
32
32
  "@google/genai": "^1.46.0",
33
33
  "axios": "^1.13.5",
@@ -38,7 +38,7 @@
38
38
  "express": "^4.21.2",
39
39
  "inquirer": "^9.0.0",
40
40
  "jsonwebtoken": "^9.0.3",
41
- "openai": "^6.16.0",
41
+ "openai": "^6.37.0",
42
42
  "pg": "^8.18.0",
43
43
  "pg-hstore": "^2.3.4",
44
44
  "playwright-core": "^1.50.0",
package/src/onboard.ts CHANGED
@@ -4,8 +4,8 @@ import path from 'path';
4
4
  import { getConfigDir, getConfigPath } from './utils';
5
5
 
6
6
  const AI_PROVIDERS = [
7
- { name: 'OpenAI (gpt-4o-mini / gpt-5.1)', value: 'openai' },
8
- { name: 'Anthropic — Claude (claude-haiku / claude-sonnet)', value: 'anthropic' },
7
+ { name: 'OpenAI (gpt-4o-mini / gpt-5.5)', value: 'openai' },
8
+ { name: 'Anthropic — Claude (claude-haiku / claude-opus)', value: 'anthropic' },
9
9
  { name: 'Google Gemini (gemini-2.5-flash / gemini-2.5-pro)', value: 'gemini' },
10
10
  ];
11
11