rentabots-sdk 1.7.1 → 1.7.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,12 +1,6 @@
1
1
  import { z } from 'zod';
2
2
  import { EventEmitter } from 'events';
3
3
  import { ChildProcess } from 'child_process';
4
- export interface LLMConfig {
5
- provider: 'openclaw' | 'openai' | 'anthropic' | 'google' | 'groq' | 'mistral' | 'custom';
6
- apiKey?: string;
7
- model?: string;
8
- baseUrl?: string;
9
- }
10
4
  export declare const JobStatusSchema: z.ZodEnum<{
11
5
  open: "open";
12
6
  in_progress: "in_progress";
@@ -59,8 +53,6 @@ export interface AgentOptions {
59
53
  workspaceRoot?: string;
60
54
  workerScriptPath?: string;
61
55
  commandWhitelist?: string[];
62
- llm?: LLMConfig;
63
- capabilities?: string[];
64
56
  }
65
57
  export interface ExecuteOptions {
66
58
  timeout?: number;
@@ -74,7 +66,7 @@ export interface AutopilotOptions {
74
66
  bidTemplate?: string;
75
67
  }
76
68
  /**
77
- * 🦀 RENTABOTS MASTER SDK (v1.7.1)
69
+ * 🦀 RENTABOTS MASTER SDK (v1.5.7)
78
70
  * Robust, production-grade autonomous agent runtime.
79
71
  * UPDATES:
80
72
  * - Agent Isolation Protocol (Private workspaces).
@@ -86,7 +78,7 @@ export declare class Agent extends EventEmitter {
86
78
  private apiKey;
87
79
  readonly baseUrl: string;
88
80
  readonly socketUrl: string;
89
- agentId: string | null;
81
+ private agentId;
90
82
  private api;
91
83
  private socket;
92
84
  private debug;
@@ -98,11 +90,9 @@ export declare class Agent extends EventEmitter {
98
90
  completedMissions: Map<string, Job>;
99
91
  private bidCache;
100
92
  private seenMessages;
101
- workers: Map<string, ChildProcess>;
93
+ private workers;
102
94
  private autopilotTimer;
103
- autopilotPaused: boolean;
104
- private llmConfig;
105
- private capabilities;
95
+ private autopilotPaused;
106
96
  constructor(options?: AgentOptions);
107
97
  /**
108
98
  * Establish grid connection and initialize sockets.
@@ -135,8 +125,40 @@ export declare class Agent extends EventEmitter {
135
125
  error: any;
136
126
  jobId?: undefined;
137
127
  }>;
128
+ setProgress(jobId: string, percent: number): Promise<{
129
+ success: boolean;
130
+ error?: string;
131
+ }>;
132
+ createRepo(jobId: string, name?: string): Promise<{
133
+ success: boolean;
134
+ repo?: any;
135
+ error?: string;
136
+ }>;
137
+ getRepo(jobId: string): Promise<{
138
+ success: boolean;
139
+ exists: boolean;
140
+ repo?: any;
141
+ error?: string;
142
+ }>;
143
+ uploadRepoFile(jobId: string, filePath: string, content: string | Buffer, isBlob?: boolean): Promise<{
144
+ success: boolean;
145
+ repoId?: string;
146
+ error?: string;
147
+ }>;
148
+ downloadRepoFile(jobId: string, filePath: string): Promise<{
149
+ success: boolean;
150
+ content?: string;
151
+ isBlob?: boolean;
152
+ error?: string;
153
+ }>;
138
154
  private isBinaryFile;
139
155
  deliver(jobId: string, files: string[]): Promise<void>;
156
+ runTests(jobId: string, command?: string): Promise<{
157
+ success: boolean;
158
+ passed: boolean;
159
+ output?: string;
160
+ error?: string;
161
+ }>;
140
162
  verifyDeliverables(jobId: string): Promise<{
141
163
  success: boolean;
142
164
  verified: boolean;
@@ -147,16 +169,26 @@ export declare class Agent extends EventEmitter {
147
169
  preDeliveryCheck(jobId: string): Promise<{
148
170
  canDeliver: boolean;
149
171
  checks: any[];
172
+ recommendations?: string[];
150
173
  }>;
151
174
  markComplete(jobId: string): Promise<any>;
152
- initLLM(config?: Partial<LLMConfig>): Promise<void>;
175
+ private llmClient?;
176
+ initLLM(config?: {
177
+ provider?: 'groq' | 'openai' | 'anthropic' | 'google' | 'nvidia';
178
+ apiKey?: string;
179
+ model?: string;
180
+ }): Promise<void>;
153
181
  generate(prompt: string, options?: {
182
+ model?: string;
183
+ temperature?: number;
184
+ maxTokens?: number;
154
185
  system?: string;
155
186
  }): Promise<{
156
187
  success: boolean;
157
188
  text?: string;
158
189
  error?: string;
159
190
  }>;
191
+ askLLM(systemPrompt: string, userPrompt: string): Promise<string | null>;
160
192
  analyzeRequirements(job: Job): Promise<{
161
193
  success: boolean;
162
194
  requirements?: any;
@@ -176,106 +208,6 @@ export declare class Agent extends EventEmitter {
176
208
  bid(jobId: string, amount: number, message: string): Promise<any>;
177
209
  sendMessage(jobId: string, content: string): Promise<import("axios").AxiosResponse<any, any, {}>>;
178
210
  setTyping(jobId: string, isTyping?: boolean): Promise<void>;
179
- /**
180
- * Update mission progress (0-100%)
181
- */
182
- setProgress(jobId: string, progress: number): Promise<{
183
- success: boolean;
184
- error?: string;
185
- }>;
186
- /**
187
- * Initialize isolated workspace for a mission, returns the path
188
- */
189
- initializeMission(jobId: string): Promise<string>;
190
- /**
191
- * One-shot bid search: find open jobs matching skills and bid on them
192
- */
193
- findAndBid(options?: {
194
- skills?: string[];
195
- minBudget?: number;
196
- bidMessage?: string;
197
- }): Promise<{
198
- success: boolean;
199
- bidsPlaced: number;
200
- error?: undefined;
201
- } | {
202
- success: boolean;
203
- error: any;
204
- bidsPlaced?: undefined;
205
- }>;
206
- /**
207
- * Notify the agent's human owner (via API)
208
- */
209
- notifyOwner(message: string): Promise<import("axios").AxiosResponse<any, any, {}> | undefined>;
210
- /**
211
- * Log a message to the agent's server-side log
212
- */
213
- log(message: string, level?: string): Promise<import("axios").AxiosResponse<any, any, {}> | undefined>;
214
- /**
215
- * Alias: triggered when human hires this agent
216
- */
217
- onHired(callback: (job: Job) => void): void;
218
- /**
219
- * Alias: triggered on incoming chat message
220
- */
221
- onMessage(callback: (msg: Message) => void): void;
222
- /**
223
- * Upload a deliverable file to a mission
224
- */
225
- uploadDeliverable(jobId: string, url: string, name: string): Promise<import("axios").AxiosResponse<any, any, {}> | undefined>;
226
- /**
227
- * Create a private repository for a mission
228
- */
229
- createRepo(jobId: string, name?: string): Promise<{
230
- success: boolean;
231
- repo?: any;
232
- error?: string;
233
- }>;
234
- /**
235
- * Upload files to a mission's repository
236
- */
237
- uploadToRepo(jobId: string, files: {
238
- path: string;
239
- content: string;
240
- }[]): Promise<({
241
- path: string;
242
- success: boolean;
243
- error?: undefined;
244
- } | {
245
- path: string;
246
- success: boolean;
247
- error: any;
248
- })[]>;
249
- uploadRepoFile(jobId: string, filePath: string, content: string | Buffer, isBlob?: boolean): Promise<{
250
- success: boolean;
251
- repoId?: string;
252
- error?: string;
253
- }>;
254
- /**
255
- * Check if a repository exists for a mission and get its info
256
- */
257
- getRepo(jobId: string): Promise<{
258
- success: boolean;
259
- exists: boolean;
260
- repo?: any;
261
- error?: string;
262
- }>;
263
- /**
264
- * Ask the LLM brain a question. Works with OpenClaw or any custom API provider.
265
- * @param systemPrompt System-level instructions
266
- * @param userMessage The user/human message to respond to
267
- * @returns The LLM response text, or null on failure
268
- */
269
- askLLM(systemPrompt: string, userMessage: string): Promise<string | null>;
270
- /**
271
- * OpenClaw bridge: sends query to local OpenClaw instance
272
- */
273
- private askOpenClaw;
274
- /**
275
- * Custom LLM API: supports OpenAI-compatible endpoints (OpenAI, Groq, Mistral, etc.)
276
- * Also supports Anthropic and Google with adapter logic.
277
- */
278
- private askCustomLLM;
279
211
  private enrichJob;
280
212
  private syncFromCloud;
281
213
  private loadState;
package/dist/index.js CHANGED
@@ -38,7 +38,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
39
  exports.Agent = exports.MessageSchema = exports.JobSchema = exports.JobStatusSchema = void 0;
40
40
  const axios_1 = __importDefault(require("axios"));
41
- // import { LLMClient } from "./llm"; // Removed missing module
41
+ const llm_1 = require("./llm");
42
42
  const socket_io_client_1 = require("socket.io-client");
43
43
  const zod_1 = require("zod");
44
44
  const events_1 = require("events");
@@ -46,22 +46,6 @@ const fs = __importStar(require("fs"));
46
46
  const path = __importStar(require("path"));
47
47
  const child_process_1 = require("child_process");
48
48
  const sanitizer_1 = require("./utils/sanitizer");
49
- const DEFAULT_MODELS = {
50
- openclaw: 'default',
51
- openai: 'gpt-4o-mini',
52
- anthropic: 'claude-sonnet-4-20250514',
53
- google: 'gemini-2.0-flash',
54
- groq: 'llama-3.3-70b-versatile',
55
- mistral: 'mistral-large-latest',
56
- custom: 'gpt-4o-mini',
57
- };
58
- const PROVIDER_URLS = {
59
- openai: 'https://api.openai.com/v1',
60
- anthropic: 'https://api.anthropic.com/v1',
61
- google: 'https://generativelanguage.googleapis.com/v1beta',
62
- groq: 'https://api.groq.com/openai/v1',
63
- mistral: 'https://api.mistral.ai/v1',
64
- };
65
49
  // --- SCHEMAS & TYPES ---
66
50
  exports.JobStatusSchema = zod_1.z.enum(['open', 'in_progress', 'completed', 'archived', 'disputed']);
67
51
  exports.JobSchema = zod_1.z.object({
@@ -87,14 +71,14 @@ exports.MessageSchema = zod_1.z.object({
87
71
  })
88
72
  });
89
73
  // --- CORE SDK ENGINE ---
90
- let SDK_VERSION = '1.7.1'; // BUMP to v1.7.1
74
+ let SDK_VERSION = '1.7.7'; // fallback when package.json is unavailable
91
75
  try {
92
76
  const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '..', 'package.json'), 'utf8'));
93
77
  SDK_VERSION = pkg.version;
94
78
  }
95
79
  catch (e) { }
96
80
  /**
97
- * 🦀 RENTABOTS MASTER SDK (v1.7.1)
81
+ * 🦀 RENTABOTS MASTER SDK (v1.5.7)
98
82
  * Robust, production-grade autonomous agent runtime.
99
83
  * UPDATES:
100
84
  * - Agent Isolation Protocol (Private workspaces).
@@ -116,32 +100,11 @@ class Agent extends events_1.EventEmitter {
116
100
  // Autopilot state
117
101
  this.autopilotTimer = null;
118
102
  this.autopilotPaused = false;
119
- this.apiKey = options.apiKey || process.env.RENTABOTS_API_KEY || process.env.RENTABOTS_SECRET_KEY || '';
120
- this.baseUrl = (options.baseUrl || process.env.RENTABOTS_API_URL || 'https://rentabots.com/api').replace(/\/$/, '');
121
- // --- 🛡️ FIX: Robust Socket URL Construction ---
122
- // Don't rely on brittle .replace('/api', ''). Use proper URL parsing.
123
- if (process.env.RENTABOTS_SOCKET_URL) {
124
- this.socketUrl = process.env.RENTABOTS_SOCKET_URL.replace(/\/$/, '');
125
- }
126
- else {
127
- try {
128
- const parsed = new URL(this.baseUrl);
129
- this.socketUrl = `${parsed.protocol}//${parsed.host}`;
130
- }
131
- catch {
132
- this.socketUrl = this.baseUrl.replace(/\/api.*$/, '');
133
- }
134
- }
103
+ this.apiKey = options.apiKey || process.env.RENTABOTS_API_KEY || '';
104
+ this.baseUrl = (options.baseUrl || 'https://rentabots.com/api').replace(/\/$/, '');
105
+ this.socketUrl = (process.env.RENTABOTS_SOCKET_URL || this.baseUrl.replace('/api', '')).replace(/\/$/, '');
135
106
  this.debug = options.debug || false;
136
107
  this.commandWhitelist = options.commandWhitelist || null;
137
- this.capabilities = options.capabilities || [];
138
- // --- 🧠 LLM CONFIGURATION ---
139
- this.llmConfig = options.llm || {
140
- provider: process.env.LLM_PROVIDER || 'openclaw',
141
- apiKey: process.env.LLM_API_KEY || '',
142
- model: process.env.LLM_MODEL || '',
143
- baseUrl: process.env.LLM_BASE_URL || '',
144
- };
145
108
  // --- 🛡️ AGENT ISOLATION ---
146
109
  // By default, create a unique workspace root if not provided.
147
110
  // We will finalize this in connect() once we have the agentId.
@@ -372,26 +335,11 @@ class Agent extends events_1.EventEmitter {
372
335
  async spawnWorker(job) {
373
336
  if (!fs.existsSync(this.workerScriptPath))
374
337
  throw new Error("Worker script not found.");
375
- // --- 🛡️ FIX: Write job data to temp file instead of CLI args ---
376
- // CLI args have OS length limits (~8KB on Windows). Long job descriptions
377
- // would cause silent spawn failures or data truncation.
378
- const jobDataPath = path.join(this.workspaceRoot, `_job_${job.id}.json`);
379
- fs.writeFileSync(jobDataPath, JSON.stringify(job));
380
- const worker = (0, child_process_1.fork)(this.workerScriptPath, [jobDataPath], {
381
- // --- 🛡️ FIX: stdin set to 'ignore' for background/daemon safety ---
382
- // 'inherit' causes crashes when no TTY is available (nohup, systemd, PM2).
383
- stdio: ['ignore', 'pipe', 'pipe', 'ipc'],
338
+ const worker = (0, child_process_1.fork)(this.workerScriptPath, [JSON.stringify(job)], {
384
339
  env: { ...process.env, RENTABOTS_API_KEY: this.apiKey }
385
340
  });
386
341
  this.workers.set(job.id, worker);
387
- worker.on('exit', () => {
388
- this.workers.delete(job.id);
389
- // Cleanup temp file
390
- try {
391
- fs.unlinkSync(jobDataPath);
392
- }
393
- catch { }
394
- });
342
+ worker.on('exit', () => this.workers.delete(job.id));
395
343
  return worker;
396
344
  }
397
345
  // --- ⚡ ACTIONS ---
@@ -464,6 +412,71 @@ class Agent extends events_1.EventEmitter {
464
412
  return { success: false, error: e.response?.data?.error || e.message };
465
413
  }
466
414
  }
415
+ // --- 📊 PROGRESS TRACKING ---
416
+ async setProgress(jobId, percent) {
417
+ try {
418
+ const progress = Math.min(100, Math.max(0, Math.round(percent)));
419
+ await this.api.post(`jobs/${jobId}/progress`, { progress });
420
+ const job = this.activeMissions.get(jobId);
421
+ if (job) {
422
+ job.progress = progress;
423
+ this.activeMissions.set(jobId, job);
424
+ this.saveState();
425
+ }
426
+ return { success: true };
427
+ }
428
+ catch (e) {
429
+ return { success: false, error: e.response?.data?.error || e.message };
430
+ }
431
+ }
432
+ // --- 📦 REPO MANAGEMENT ---
433
+ async createRepo(jobId, name) {
434
+ try {
435
+ const res = await this.api.post(`jobs/${jobId}/repo`, { name });
436
+ this.logInternal(`Repo created: ${res.data.repo?.name}`);
437
+ return { success: true, repo: res.data.repo };
438
+ }
439
+ catch (e) {
440
+ return { success: false, error: e.response?.data?.error || e.message };
441
+ }
442
+ }
443
+ async getRepo(jobId) {
444
+ try {
445
+ const res = await this.api.get(`jobs/${jobId}/repo`);
446
+ return { success: true, exists: res.data.exists, repo: res.data.repo };
447
+ }
448
+ catch (e) {
449
+ if (e.response?.status === 404)
450
+ return { success: true, exists: false };
451
+ return { success: false, exists: false, error: e.response?.data?.error || e.message };
452
+ }
453
+ }
454
+ async uploadRepoFile(jobId, filePath, content, isBlob = false) {
455
+ try {
456
+ let finalContent;
457
+ if (Buffer.isBuffer(content)) {
458
+ finalContent = content.toString('base64');
459
+ isBlob = true;
460
+ }
461
+ else {
462
+ finalContent = String(content);
463
+ }
464
+ const res = await this.api.post(`jobs/${jobId}/repo/files`, { path: filePath, content: finalContent, isBlob });
465
+ return { success: true, repoId: res.data.repoId };
466
+ }
467
+ catch (e) {
468
+ return { success: false, error: e.response?.data?.error || e.message };
469
+ }
470
+ }
471
+ async downloadRepoFile(jobId, filePath) {
472
+ try {
473
+ const res = await this.api.get(`jobs/${jobId}/repo/files`, { params: { path: filePath } });
474
+ return { success: true, content: res.data.content, isBlob: res.data.isBlob };
475
+ }
476
+ catch (e) {
477
+ return { success: false, error: e.response?.data?.error || e.message };
478
+ }
479
+ }
467
480
  isBinaryFile(filePath) {
468
481
  const textExts = ['.js', '.ts', '.json', '.md', '.txt', '.html', '.css', '.yml', '.yaml', '.toml'];
469
482
  return !textExts.includes(path.extname(filePath).toLowerCase());
@@ -479,6 +492,29 @@ class Agent extends events_1.EventEmitter {
479
492
  }
480
493
  }
481
494
  // --- ✅ VERIFICATION FLOW ---
495
+ // --- 🧪 TEST RUNNER ---
496
+ async runTests(jobId, command = 'npm test') {
497
+ try {
498
+ const cwd = path.join(this.workspaceRoot, jobId);
499
+ if (!fs.existsSync(cwd)) {
500
+ return { success: false, passed: false, error: 'Workspace not found' };
501
+ }
502
+ // Check if test command exists in package.json
503
+ const pkgPath = path.join(cwd, 'package.json');
504
+ let testCmd = command;
505
+ if (fs.existsSync(pkgPath)) {
506
+ const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
507
+ if (!pkg.scripts?.test && command === 'npm test') {
508
+ return { success: true, passed: true, output: 'No tests configured' };
509
+ }
510
+ }
511
+ const result = await this.execute(jobId, testCmd.split(' ')[0], testCmd.split(' ').slice(1), { timeout: 120000 });
512
+ return { success: true, passed: result.exitCode === 0, output: result.output };
513
+ }
514
+ catch (e) {
515
+ return { success: false, passed: false, error: e.message };
516
+ }
517
+ }
482
518
  async verifyDeliverables(jobId) {
483
519
  try {
484
520
  const cwd = path.join(this.workspaceRoot, jobId);
@@ -510,10 +546,25 @@ class Agent extends events_1.EventEmitter {
510
546
  }
511
547
  async preDeliveryCheck(jobId) {
512
548
  const verify = await this.verifyDeliverables(jobId);
513
- const checks = [{ name: 'Deliverables', passed: verify.verified, details: verify.files?.length + ' files' }];
549
+ const testResult = await this.runTests(jobId);
550
+ const checks = [
551
+ { name: 'Deliverables', passed: verify.verified, details: (verify.files?.length || 0) + ' files' },
552
+ { name: 'Tests', passed: testResult.passed, details: testResult.passed ? 'Passed' : 'Failed/No tests' }
553
+ ];
514
554
  const job = this.activeMissions.get(jobId);
515
- checks.push({ name: 'Progress', passed: (job?.progress || 0) >= 80, details: (job?.progress || 0) + '%' });
516
- return { canDeliver: checks.every(c => c.passed), checks };
555
+ if (job?.progress && job.progress > 0) {
556
+ checks.push({ name: 'Progress', passed: job.progress >= 80, details: job.progress + '%' });
557
+ }
558
+ const recommendations = [];
559
+ if (!verify.verified && verify.issues)
560
+ recommendations.push(...verify.issues);
561
+ if (!testResult.passed && testResult.output)
562
+ recommendations.push('Tests failing: ' + testResult.output.slice(0, 100));
563
+ return {
564
+ canDeliver: checks.every(c => c.passed),
565
+ checks,
566
+ recommendations: recommendations.length > 0 ? recommendations : undefined
567
+ };
517
568
  }
518
569
  async markComplete(jobId) {
519
570
  const res = await this.api.post(`jobs/${jobId}/complete`, { userId: this.agentId, role: 'agent' });
@@ -526,21 +577,31 @@ class Agent extends events_1.EventEmitter {
526
577
  }
527
578
  return res.data;
528
579
  }
529
- // --- 🧠 LLM INTEGRATION ---
530
580
  async initLLM(config) {
531
- if (config) {
532
- this.llmConfig = { ...this.llmConfig, ...config };
533
- }
581
+ this.llmClient = new llm_1.LLMClient(config);
534
582
  }
535
583
  async generate(prompt, options = {}) {
536
- const res = await this.askLLM(options.system || 'You are a helpful assistant.', prompt);
537
- if (res)
538
- return { success: true, text: res };
539
- return { success: false, error: 'LLM request failed' };
584
+ if (!this.llmClient) {
585
+ try {
586
+ this.llmClient = new llm_1.LLMClient();
587
+ }
588
+ catch (e) {
589
+ return { success: false, error: e.message };
590
+ }
591
+ }
592
+ return this.llmClient.generate(prompt, options);
593
+ }
594
+ async askLLM(systemPrompt, userPrompt) {
595
+ const res = await this.generate(userPrompt, {
596
+ system: systemPrompt,
597
+ temperature: 0.4,
598
+ maxTokens: 1200,
599
+ });
600
+ return res.success ? (res.text || null) : null;
540
601
  }
541
602
  async analyzeRequirements(job) {
542
603
  const prompt = 'Analyze job, return JSON with techStack, features, deliverables, timeline, risks. Title: ' + job.title + ' Desc: ' + job.description.slice(0, 500);
543
- const res = await this.generate(prompt, { system: 'Extract requirements as JSON' });
604
+ const res = await this.generate(prompt, { system: 'Extract requirements as JSON', temperature: 0.3 });
544
605
  if (!res.success)
545
606
  return res;
546
607
  try {
@@ -552,14 +613,14 @@ class Agent extends events_1.EventEmitter {
552
613
  }
553
614
  }
554
615
  async codeGenerator(prompt, language = 'javascript') {
555
- const res = await this.generate(prompt, { system: 'Output only ' + language + ' code' });
616
+ const res = await this.generate(prompt, { system: 'Output only ' + language + ' code', temperature: 0.2, maxTokens: 4000 });
556
617
  if (!res.success)
557
618
  return res;
558
619
  const code = res.text?.match(/```(?:\w+)?\n?([\s\S]*?)```/)?.[1] || res.text;
559
620
  return { success: true, code: code?.trim() };
560
621
  }
561
622
  async reviewCode(code, requirements) {
562
- const res = await this.generate('Review code. Reqs: ' + requirements + ' Code: ' + code.slice(0, 2000), { system: 'Code reviewer' });
623
+ const res = await this.generate('Review code. Reqs: ' + requirements + ' Code: ' + code.slice(0, 2000), { system: 'Code reviewer', temperature: 0.3 });
563
624
  if (!res.success)
564
625
  return { success: false, passed: false, error: res.error };
565
626
  const passed = !!(res.text?.toUpperCase().includes('PASS') && !res.text?.toUpperCase().includes('FAIL'));
@@ -587,278 +648,6 @@ class Agent extends events_1.EventEmitter {
587
648
  async setTyping(jobId, isTyping = true) {
588
649
  this.socket?.emit('typing_state', { jobId, isTyping });
589
650
  }
590
- // --- 📡 MISSING METHODS (Fixed in v1.6.0) ---
591
- /**
592
- * Update mission progress (0-100%)
593
- */
594
- async setProgress(jobId, progress) {
595
- try {
596
- const res = await this.api.post(`jobs/${jobId}/progress`, { progress, agentId: this.agentId });
597
- const job = this.activeMissions.get(jobId);
598
- if (job) {
599
- job.progress = progress;
600
- this.activeMissions.set(jobId, job);
601
- this.saveState();
602
- }
603
- return { success: true };
604
- }
605
- catch (e) {
606
- this.logInternal(`Failed to update progress for ${jobId}: ${e.message}`);
607
- return { success: false, error: e.message };
608
- }
609
- }
610
- /**
611
- * Initialize isolated workspace for a mission, returns the path
612
- */
613
- async initializeMission(jobId) {
614
- const cwd = path.join(this.workspaceRoot, jobId);
615
- if (!fs.existsSync(cwd))
616
- fs.mkdirSync(cwd, { recursive: true });
617
- this.logInternal(`Mission workspace initialized: ${cwd}`);
618
- return cwd;
619
- }
620
- /**
621
- * One-shot bid search: find open jobs matching skills and bid on them
622
- */
623
- async findAndBid(options = {}) {
624
- try {
625
- const res = await this.api.get('jobs?status=open');
626
- const jobs = res.data.data.map((j) => this.enrichJob(j));
627
- const skills = options.skills || this.capabilities || [];
628
- let bidCount = 0;
629
- for (const job of jobs) {
630
- if (this.bidCache.has(job.id))
631
- continue;
632
- if (options.minBudget && job.budgetAmount < options.minBudget)
633
- continue;
634
- const matches = skills.length === 0 || skills.some(s => job.title.toLowerCase().includes(s.toLowerCase()) ||
635
- job.description.toLowerCase().includes(s.toLowerCase()));
636
- if (matches) {
637
- const msg = options.bidMessage || `I am an autonomous agent with ${this.completedMissions.size} completed missions. Skills: ${skills.join(', ')}. Ready to deliver.`;
638
- await this.bid(job.id, job.budgetAmount, msg);
639
- bidCount++;
640
- }
641
- }
642
- this.logInternal(`findAndBid: Placed ${bidCount} bids on ${jobs.length} open jobs.`);
643
- return { success: true, bidsPlaced: bidCount };
644
- }
645
- catch (e) {
646
- this.logInternal(`findAndBid failed: ${e.message}`);
647
- return { success: false, error: e.message };
648
- }
649
- }
650
- /**
651
- * Notify the agent's human owner (via API)
652
- */
653
- async notifyOwner(message) {
654
- try {
655
- return await this.api.post('agents/me/notify', { message });
656
- }
657
- catch (e) {
658
- this.logInternal(`notifyOwner failed: ${e.message}`);
659
- }
660
- }
661
- /**
662
- * Log a message to the agent's server-side log
663
- */
664
- async log(message, level = 'INFO') {
665
- this.logInternal(`[${level}] ${message}`);
666
- try {
667
- return await this.api.post('agents/me/logs', { message, level });
668
- }
669
- catch (e) {
670
- // Swallow — logging should never crash the agent
671
- }
672
- }
673
- /**
674
- * Alias: triggered when human hires this agent
675
- */
676
- onHired(callback) {
677
- this.on('assignment', callback);
678
- }
679
- /**
680
- * Alias: triggered on incoming chat message
681
- */
682
- onMessage(callback) {
683
- this.on('message', callback);
684
- }
685
- /**
686
- * Upload a deliverable file to a mission
687
- */
688
- async uploadDeliverable(jobId, url, name) {
689
- try {
690
- return await this.api.post(`jobs/${jobId}/files`, {
691
- url,
692
- name,
693
- type: 'deliverable',
694
- userId: this.agentId
695
- });
696
- }
697
- catch (e) {
698
- this.logInternal(`uploadDeliverable failed: ${e.message}`);
699
- }
700
- }
701
- /**
702
- * Create a private repository for a mission
703
- */
704
- async createRepo(jobId, name) {
705
- try {
706
- const res = await this.api.post(`jobs/${jobId}/repo`, { name });
707
- this.logInternal(`Repository created for job ${jobId}`);
708
- return { success: true, repo: res.data.repo };
709
- }
710
- catch (e) {
711
- this.logInternal(`createRepo failed: ${e.message}`);
712
- return { success: false, error: e.message };
713
- }
714
- }
715
- /**
716
- * Upload files to a mission's repository
717
- */
718
- async uploadToRepo(jobId, files) {
719
- const results = [];
720
- for (const file of files) {
721
- try {
722
- const res = await this.api.post(`jobs/${jobId}/repo/files`, file);
723
- results.push({ path: file.path, success: true });
724
- }
725
- catch (e) {
726
- results.push({ path: file.path, success: false, error: e.message });
727
- }
728
- }
729
- return results;
730
- }
731
- async uploadRepoFile(jobId, filePath, content, isBlob = false) {
732
- try {
733
- let finalContent;
734
- if (Buffer.isBuffer(content)) {
735
- finalContent = content.toString('base64');
736
- isBlob = true;
737
- }
738
- else {
739
- finalContent = String(content);
740
- }
741
- const res = await this.api.post(`jobs/${jobId}/repo/files`, { path: filePath, content: finalContent, isBlob });
742
- return { success: true, repoId: res.data.repoId };
743
- }
744
- catch (e) {
745
- return { success: false, error: e.response?.data?.error || e.message };
746
- }
747
- }
748
- /**
749
- * Check if a repository exists for a mission and get its info
750
- */
751
- async getRepo(jobId) {
752
- try {
753
- const res = await this.api.get(`jobs/${jobId}/repo`);
754
- return { success: true, exists: res.data.exists, repo: res.data.repo };
755
- }
756
- catch (e) {
757
- if (e.response?.status === 404)
758
- return { success: true, exists: false };
759
- return { success: false, exists: false, error: e.response?.data?.error || e.message };
760
- }
761
- }
762
- // --- 🧠 LLM ABSTRACTION LAYER ---
763
- /**
764
- * Ask the LLM brain a question. Works with OpenClaw or any custom API provider.
765
- * @param systemPrompt System-level instructions
766
- * @param userMessage The user/human message to respond to
767
- * @returns The LLM response text, or null on failure
768
- */
769
- async askLLM(systemPrompt, userMessage) {
770
- const provider = this.llmConfig.provider;
771
- if (provider === 'openclaw') {
772
- return this.askOpenClaw(systemPrompt, userMessage);
773
- }
774
- else {
775
- return this.askCustomLLM(systemPrompt, userMessage);
776
- }
777
- }
778
- /**
779
- * OpenClaw bridge: sends query to local OpenClaw instance
780
- */
781
- async askOpenClaw(systemPrompt, userMessage) {
782
- return new Promise((resolve) => {
783
- const fullQuery = `${systemPrompt}\n\nUser: ${userMessage}`;
784
- const child = (0, child_process_1.spawn)('openclaw', ['session:chat', fullQuery], {
785
- timeout: 60000,
786
- shell: false
787
- });
788
- let output = '';
789
- child.stdout?.on('data', (d) => output += d.toString());
790
- child.stderr?.on('data', (d) => output += d.toString());
791
- child.on('close', (code) => {
792
- if (code === 0 && output.trim())
793
- resolve(output.trim());
794
- else
795
- resolve(null);
796
- });
797
- child.on('error', () => resolve(null));
798
- });
799
- }
800
- /**
801
- * Custom LLM API: supports OpenAI-compatible endpoints (OpenAI, Groq, Mistral, etc.)
802
- * Also supports Anthropic and Google with adapter logic.
803
- */
804
- async askCustomLLM(systemPrompt, userMessage) {
805
- const provider = this.llmConfig.provider;
806
- const apiKey = this.llmConfig.apiKey;
807
- const model = this.llmConfig.model || DEFAULT_MODELS[provider] || 'gpt-4o-mini';
808
- const baseUrl = this.llmConfig.baseUrl || PROVIDER_URLS[provider];
809
- if (!apiKey) {
810
- this.logInternal('LLM API key not configured. Falling back to basic responses.');
811
- return null;
812
- }
813
- try {
814
- if (provider === 'anthropic') {
815
- // Anthropic uses a different API format
816
- const res = await axios_1.default.post(`${baseUrl}/messages`, {
817
- model,
818
- max_tokens: 1024,
819
- system: systemPrompt,
820
- messages: [{ role: 'user', content: userMessage }]
821
- }, {
822
- headers: {
823
- 'x-api-key': apiKey,
824
- 'anthropic-version': '2023-06-01',
825
- 'Content-Type': 'application/json'
826
- },
827
- timeout: 30000
828
- });
829
- return res.data.content?.[0]?.text || null;
830
- }
831
- else if (provider === 'google') {
832
- // Google Gemini API
833
- const res = await axios_1.default.post(`${baseUrl}/models/${model}:generateContent?key=${apiKey}`, {
834
- contents: [{ parts: [{ text: `${systemPrompt}\n\n${userMessage}` }] }]
835
- }, { timeout: 30000 });
836
- return res.data.candidates?.[0]?.content?.parts?.[0]?.text || null;
837
- }
838
- else {
839
- // OpenAI-compatible (OpenAI, Groq, Mistral, Custom)
840
- const res = await axios_1.default.post(`${baseUrl}/chat/completions`, {
841
- model,
842
- messages: [
843
- { role: 'system', content: systemPrompt },
844
- { role: 'user', content: userMessage }
845
- ],
846
- max_tokens: 1024
847
- }, {
848
- headers: {
849
- 'Authorization': `Bearer ${apiKey}`,
850
- 'Content-Type': 'application/json'
851
- },
852
- timeout: 30000
853
- });
854
- return res.data.choices?.[0]?.message?.content || null;
855
- }
856
- }
857
- catch (e) {
858
- this.logInternal(`LLM call failed (${provider}): ${e.message}`);
859
- return null;
860
- }
861
- }
862
651
  // --- UTILS ---
863
652
  enrichJob(raw) {
864
653
  return {
package/dist/llm.d.ts CHANGED
@@ -1,34 +1,36 @@
1
- /**
2
- * LLM Provider Configuration
3
- * Agents use their own API keys - no server proxy needed
4
- */
5
- export interface LLMConfig {
6
- provider: 'groq' | 'openai' | 'anthropic' | 'google' | 'nvidia';
7
- apiKey: string;
8
- baseUrl?: string;
9
- model?: string;
10
- }
11
- export interface LLMResponse {
12
- success: boolean;
13
- text?: string;
14
- error?: string;
15
- usage?: {
16
- promptTokens: number;
17
- completionTokens: number;
18
- };
19
- }
20
- export declare class LLMClient {
21
- private config;
22
- private client;
23
- constructor(config?: Partial<LLMConfig>);
24
- private detectProvider;
25
- private getApiKey;
26
- generate(prompt: string, options?: {
27
- system?: string;
28
- temperature?: number;
29
- maxTokens?: number;
30
- model?: string;
31
- }): Promise<LLMResponse>;
32
- private callAnthropic;
33
- }
34
- export default LLMClient;
1
+ export interface LLMConfig {
2
+ provider: 'groq' | 'openai' | 'anthropic' | 'google' | 'nvidia' | 'custom';
3
+ apiKey: string;
4
+ baseUrl?: string;
5
+ model?: string;
6
+ }
7
+ export interface LLMResponse {
8
+ success: boolean;
9
+ text?: string;
10
+ error?: string;
11
+ }
12
+ export declare const CODEX_MODELS: {
13
+ readonly codex: "codex-2025-01-21";
14
+ readonly codexMini: "codex-mini";
15
+ readonly codexLatest: "codex-latest";
16
+ };
17
+ export declare const GPT_MODELS: {
18
+ readonly gpt4o: "gpt-4o";
19
+ readonly gpt4oMini: "gpt-4o-mini";
20
+ readonly gpt4Turbo: "gpt-4-turbo";
21
+ readonly o1: "o1";
22
+ readonly o3Mini: "o3-mini";
23
+ };
24
+ export declare class LLMClient {
25
+ private config;
26
+ constructor(config?: Partial<LLMConfig>);
27
+ private detectProvider;
28
+ private getApiKey;
29
+ generate(prompt: string, options?: {
30
+ system?: string;
31
+ temperature?: number;
32
+ maxTokens?: number;
33
+ model?: string;
34
+ }): Promise<LLMResponse>;
35
+ }
36
+ export default LLMClient;
package/dist/llm.js CHANGED
@@ -1,129 +1,99 @@
1
- "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
- Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.LLMClient = void 0;
7
- const axios_1 = __importDefault(require("axios"));
8
- const PROVIDER_CONFIGS = {
9
- groq: {
10
- baseUrl: 'https://api.groq.com/openai/v1',
11
- defaultModel: 'llama-3.3-70b-versatile',
12
- apiFormat: 'openai'
13
- },
14
- openai: {
15
- baseUrl: 'https://api.openai.com/v1',
16
- defaultModel: 'gpt-4o-mini',
17
- apiFormat: 'openai'
18
- },
19
- anthropic: {
20
- baseUrl: 'https://api.anthropic.com',
21
- defaultModel: 'claude-3-sonnet-20240229',
22
- apiFormat: 'anthropic'
23
- },
24
- google: {
25
- baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
26
- defaultModel: 'gemini-1.5-flash',
27
- apiFormat: 'openai'
28
- },
29
- nvidia: {
30
- baseUrl: 'https://integrate.api.nvidia.com/v1',
31
- defaultModel: 'meta/llama-3.1-405b-instruct',
32
- apiFormat: 'openai'
33
- }
34
- };
35
- class LLMClient {
36
- constructor(config) {
37
- // Auto-detect from environment
38
- const provider = config?.provider || this.detectProvider();
39
- const apiKey = config?.apiKey || this.getApiKey(provider);
40
- if (!apiKey) {
41
- throw new Error(`No API key found for LLM provider '${provider}'. Set ${provider.toUpperCase()}_API_KEY or GROQ_API_KEY`);
42
- }
43
- const preset = PROVIDER_CONFIGS[provider];
44
- this.config = {
45
- provider,
46
- apiKey,
47
- baseUrl: config?.baseUrl || preset?.baseUrl,
48
- model: config?.model || preset?.defaultModel
49
- };
50
- this.client = axios_1.default.create({
51
- baseURL: this.config.baseUrl,
52
- headers: provider === 'anthropic'
53
- ? { 'x-api-key': apiKey, 'anthropic-version': '2023-06-01' }
54
- : { 'Authorization': `Bearer ${apiKey}` }
55
- });
56
- }
57
- detectProvider() {
58
- if (process.env.ANTHROPIC_API_KEY)
59
- return 'anthropic';
60
- if (process.env.OPENAI_API_KEY)
61
- return 'openai';
62
- if (process.env.GOOGLE_API_KEY)
63
- return 'google';
64
- if (process.env.NVIDIA_API_KEY)
65
- return 'nvidia';
66
- return 'groq'; // Default
67
- }
68
- getApiKey(provider) {
69
- const envMap = {
70
- groq: 'GROQ_API_KEY',
71
- openai: 'OPENAI_API_KEY',
72
- anthropic: 'ANTHROPIC_API_KEY',
73
- google: 'GOOGLE_API_KEY',
74
- nvidia: 'NVIDIA_API_KEY'
75
- };
76
- return process.env[envMap[provider]] || process.env.GROQ_API_KEY || process.env.RENTABOTS_LLM_KEY;
77
- }
78
- async generate(prompt, options = {}) {
79
- const model = options.model || this.config.model;
80
- try {
81
- if (this.config.provider === 'anthropic') {
82
- return await this.callAnthropic(prompt, options, model);
83
- }
84
- // OpenAI-compatible format
85
- const res = await this.client.post('/chat/completions', {
86
- model,
87
- messages: [
88
- ...(options.system ? [{ role: 'system', content: options.system }] : []),
89
- { role: 'user', content: prompt }
90
- ],
91
- temperature: options.temperature ?? 0.7,
92
- max_tokens: options.maxTokens ?? 2048
93
- });
94
- return {
95
- success: true,
96
- text: res.data.choices[0]?.message?.content,
97
- usage: res.data.usage ? {
98
- promptTokens: res.data.usage.prompt_tokens,
99
- completionTokens: res.data.usage.completion_tokens
100
- } : undefined
101
- };
102
- }
103
- catch (error) {
104
- return {
105
- success: false,
106
- error: error.response?.data?.error?.message || error.message
107
- };
108
- }
109
- }
110
- async callAnthropic(prompt, options, model) {
111
- const res = await this.client.post('/v1/messages', {
112
- model,
113
- messages: [{ role: 'user', content: prompt }],
114
- system: options.system,
115
- temperature: options.temperature ?? 0.7,
116
- max_tokens: options.maxTokens ?? 2048
117
- });
118
- return {
119
- success: true,
120
- text: res.data.content[0]?.text,
121
- usage: {
122
- promptTokens: res.data.usage?.input_tokens || 0,
123
- completionTokens: res.data.usage?.output_tokens || 0
124
- }
125
- };
126
- }
127
- }
128
- exports.LLMClient = LLMClient;
129
- exports.default = LLMClient;
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.LLMClient = exports.GPT_MODELS = exports.CODEX_MODELS = void 0;
7
+ const axios_1 = __importDefault(require("axios"));
8
+ const CONFIGS = {
9
+ groq: { baseUrl: 'https://api.groq.com/openai/v1', defaultModel: 'llama-3.3-70b-versatile' },
10
+ openai: { baseUrl: 'https://api.openai.com/v1', defaultModel: 'gpt-4o' },
11
+ anthropic: { baseUrl: 'https://api.anthropic.com', defaultModel: 'claude-3-sonnet-20240229' },
12
+ google: { baseUrl: 'https://generativelanguage.googleapis.com/v1beta', defaultModel: 'gemini-1.5-flash' },
13
+ nvidia: { baseUrl: 'https://integrate.api.nvidia.com/v1', defaultModel: 'meta/llama-3.1-405b-instruct' },
14
+ custom: { baseUrl: '', defaultModel: '' }
15
+ };
16
+ // Codex models available via OpenAI API
17
+ exports.CODEX_MODELS = {
18
+ codex: 'codex-2025-01-21', // Latest Codex
19
+ codexMini: 'codex-mini', // Mini version
20
+ codexLatest: 'codex-latest', // Alias to latest
21
+ };
22
+ // OpenAI GPT models
23
+ exports.GPT_MODELS = {
24
+ gpt4o: 'gpt-4o',
25
+ gpt4oMini: 'gpt-4o-mini',
26
+ gpt4Turbo: 'gpt-4-turbo',
27
+ o1: 'o1',
28
+ o3Mini: 'o3-mini',
29
+ };
30
+ class LLMClient {
31
+ constructor(config) {
32
+ const provider = config?.provider || this.detectProvider();
33
+ const apiKey = config?.apiKey || this.getApiKey(provider);
34
+ if (!apiKey)
35
+ throw new Error(`No API key for ${provider}`);
36
+ const preset = CONFIGS[provider];
37
+ this.config = {
38
+ provider,
39
+ apiKey,
40
+ baseUrl: config?.baseUrl || preset.baseUrl,
41
+ model: config?.model || preset.defaultModel
42
+ };
43
+ }
44
+ detectProvider() {
45
+ if (process.env.ANTHROPIC_API_KEY)
46
+ return 'anthropic';
47
+ if (process.env.OPENAI_API_KEY)
48
+ return 'openai';
49
+ if (process.env.GOOGLE_API_KEY)
50
+ return 'google';
51
+ if (process.env.NVIDIA_API_KEY)
52
+ return 'nvidia';
53
+ return 'groq';
54
+ }
55
+ getApiKey(provider) {
56
+ const map = {
57
+ groq: 'GROQ_API_KEY',
58
+ openai: 'OPENAI_API_KEY',
59
+ anthropic: 'ANTHROPIC_API_KEY',
60
+ google: 'GOOGLE_API_KEY',
61
+ nvidia: 'NVIDIA_API_KEY',
62
+ custom: 'LLM_API_KEY'
63
+ };
64
+ return process.env[map[provider]];
65
+ }
66
+ async generate(prompt, options = {}) {
67
+ const model = options.model || this.config.model;
68
+ const headers = this.config.provider === 'anthropic'
69
+ ? { 'x-api-key': this.config.apiKey, 'anthropic-version': '2023-06-01' }
70
+ : { 'Authorization': `Bearer ${this.config.apiKey}` };
71
+ try {
72
+ if (this.config.provider === 'anthropic') {
73
+ const res = await axios_1.default.post(`${this.config.baseUrl}/v1/messages`, {
74
+ model,
75
+ messages: [{ role: 'user', content: prompt }],
76
+ system: options.system,
77
+ temperature: options.temperature ?? 0.7,
78
+ max_tokens: options.maxTokens ?? 2048,
79
+ }, { headers });
80
+ return { success: true, text: res.data.content[0]?.text };
81
+ }
82
+ const res = await axios_1.default.post(`${this.config.baseUrl}/chat/completions`, {
83
+ model,
84
+ messages: [
85
+ ...(options.system ? [{ role: 'system', content: options.system }] : []),
86
+ { role: 'user', content: prompt }
87
+ ],
88
+ temperature: options.temperature ?? 0.7,
89
+ max_tokens: options.maxTokens ?? 2048,
90
+ }, { headers });
91
+ return { success: true, text: res.data.choices[0]?.message?.content };
92
+ }
93
+ catch (e) {
94
+ return { success: false, error: e.response?.data?.error?.message || e.message };
95
+ }
96
+ }
97
+ }
98
+ exports.LLMClient = LLMClient;
99
+ exports.default = LLMClient;
@@ -11,6 +11,17 @@
11
11
  */
12
12
  import { Agent, Job, Message } from './index';
13
13
  export type QueenPhase = 'REQUIREMENTS_GATHERING' | 'REQUIREMENTS_CONFIRMED' | 'WORK_IN_PROGRESS' | 'QA_REVIEW' | 'COMPLETED';
14
+ export interface VerificationGateState {
15
+ builderCompleted: boolean;
16
+ qaCompleted: boolean;
17
+ deliverablesVerified: boolean;
18
+ testsVerified: boolean;
19
+ testsStatus: 'passed' | 'failed' | 'no-tests-policy' | 'not-run';
20
+ repoDeliverableCount: number;
21
+ checklistSummary?: string;
22
+ verifiedAt?: string;
23
+ lastError?: string;
24
+ }
14
25
  export interface QueenState {
15
26
  phase: QueenPhase;
16
27
  requirements: string[];
@@ -18,6 +29,7 @@ export interface QueenState {
18
29
  brainLog: string[];
19
30
  questionCount: number;
20
31
  lastHumanMessage: string;
32
+ verificationGate?: VerificationGateState;
21
33
  }
22
34
  export declare class QueenBrain {
23
35
  private agent;
@@ -59,6 +71,15 @@ export declare class QueenBrain {
59
71
  * Get the internal brain log for debugging
60
72
  */
61
73
  getBrainLog(jobId: string): string[];
74
+ private createDefaultGateState;
75
+ private ensureGate;
76
+ markWorkerCompleted(jobId: string, phase: 'BUILDER' | 'QA'): void;
77
+ updateVerificationGate(jobId: string, update: Partial<VerificationGateState>): void;
78
+ canHandover(jobId: string): {
79
+ allowed: boolean;
80
+ reasons: string[];
81
+ gate: VerificationGateState;
82
+ };
62
83
  private saveStates;
63
84
  private loadStates;
64
85
  }
@@ -100,6 +100,7 @@ class QueenBrain {
100
100
  brainLog: [`[${new Date().toISOString()}] Mission assigned: ${job.title}`],
101
101
  questionCount: 0,
102
102
  lastHumanMessage: '',
103
+ verificationGate: this.createDefaultGateState(),
103
104
  };
104
105
  this.states.set(job.id, state);
105
106
  this.saveStates();
@@ -131,6 +132,7 @@ class QueenBrain {
131
132
  brainLog: [],
132
133
  questionCount: 0,
133
134
  lastHumanMessage: '',
135
+ verificationGate: this.createDefaultGateState(),
134
136
  };
135
137
  this.states.set(msg.jobId, state);
136
138
  }
@@ -254,6 +256,61 @@ class QueenBrain {
254
256
  getBrainLog(jobId) {
255
257
  return this.states.get(jobId)?.brainLog || [];
256
258
  }
259
+ createDefaultGateState() {
260
+ return {
261
+ builderCompleted: false,
262
+ qaCompleted: false,
263
+ deliverablesVerified: false,
264
+ testsVerified: false,
265
+ testsStatus: 'not-run',
266
+ repoDeliverableCount: 0,
267
+ };
268
+ }
269
+ ensureGate(jobId) {
270
+ const state = this.states.get(jobId);
271
+ if (!state)
272
+ return this.createDefaultGateState();
273
+ if (!state.verificationGate) {
274
+ state.verificationGate = this.createDefaultGateState();
275
+ }
276
+ return state.verificationGate;
277
+ }
278
+ markWorkerCompleted(jobId, phase) {
279
+ const state = this.states.get(jobId);
280
+ if (!state)
281
+ return;
282
+ const gate = this.ensureGate(jobId);
283
+ if (phase === 'BUILDER')
284
+ gate.builderCompleted = true;
285
+ if (phase === 'QA')
286
+ gate.qaCompleted = true;
287
+ state.brainLog.push(`[${new Date().toISOString()}] QUEEN THOUGHT: Worker phase completed -> ${phase}`);
288
+ this.saveStates();
289
+ }
290
+ updateVerificationGate(jobId, update) {
291
+ const state = this.states.get(jobId);
292
+ if (!state)
293
+ return;
294
+ const gate = this.ensureGate(jobId);
295
+ state.verificationGate = { ...gate, ...update };
296
+ if (update.checklistSummary) {
297
+ state.brainLog.push(`[${new Date().toISOString()}] QUEEN CHECKLIST: ${update.checklistSummary}`);
298
+ }
299
+ this.saveStates();
300
+ }
301
+ canHandover(jobId) {
302
+ const gate = this.ensureGate(jobId);
303
+ const reasons = [];
304
+ if (!gate.builderCompleted)
305
+ reasons.push('Builder phase not completed');
306
+ if (!gate.qaCompleted)
307
+ reasons.push('QA phase not completed');
308
+ if (!gate.deliverablesVerified || gate.repoDeliverableCount <= 0)
309
+ reasons.push('Repo-backed deliverables not verified');
310
+ if (!gate.testsVerified)
311
+ reasons.push(`Test/check gate not passed (status: ${gate.testsStatus})`);
312
+ return { allowed: reasons.length === 0, reasons, gate };
313
+ }
257
314
  // --- PERSISTENCE ---
258
315
  saveStates() {
259
316
  try {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rentabots-sdk",
3
- "version": "1.7.1",
3
+ "version": "1.7.7",
4
4
  "description": "Official SDK for RentaBots AI Agent Marketplace",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -40,4 +40,4 @@
40
40
  },
41
41
  "author": "",
42
42
  "license": "ISC"
43
- }
43
+ }