thepopebot 1.2.65 → 1.2.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ai/model.js +4 -1
- package/package.json +1 -1
- package/setup/lib/prompts.mjs +4 -5
- package/setup/setup.mjs +38 -108
package/lib/ai/model.js
CHANGED
|
@@ -38,9 +38,12 @@ export async function createModel(options = {}) {
|
|
|
38
38
|
anthropicApiKey: apiKey,
|
|
39
39
|
});
|
|
40
40
|
}
|
|
41
|
+
case 'custom':
|
|
41
42
|
case 'openai': {
|
|
42
43
|
const { ChatOpenAI } = await import('@langchain/openai');
|
|
43
|
-
const apiKey =
|
|
44
|
+
const apiKey = provider === 'custom'
|
|
45
|
+
? (process.env.CUSTOM_API_KEY || 'not-needed')
|
|
46
|
+
: process.env.OPENAI_API_KEY;
|
|
44
47
|
const baseURL = process.env.OPENAI_BASE_URL;
|
|
45
48
|
if (!apiKey && !baseURL) {
|
|
46
49
|
throw new Error('OPENAI_API_KEY environment variable is required (or set OPENAI_BASE_URL for local models)');
|
package/package.json
CHANGED
package/setup/lib/prompts.mjs
CHANGED
|
@@ -40,7 +40,7 @@ export async function promptForProvider() {
|
|
|
40
40
|
name: p.label,
|
|
41
41
|
value: key,
|
|
42
42
|
}));
|
|
43
|
-
choices.push({ name: '
|
|
43
|
+
choices.push({ name: 'Local (OpenAI Compatible API)', value: 'custom' });
|
|
44
44
|
|
|
45
45
|
const { provider } = await inquirer.prompt([
|
|
46
46
|
{
|
|
@@ -179,7 +179,7 @@ export async function promptForCustomProvider() {
|
|
|
179
179
|
{
|
|
180
180
|
type: 'input',
|
|
181
181
|
name: 'baseUrl',
|
|
182
|
-
message: 'API base URL (e.g., http://
|
|
182
|
+
message: 'API base URL (e.g., http://host.docker.internal:11434/v1):',
|
|
183
183
|
validate: (input) => {
|
|
184
184
|
if (!input) return 'Base URL is required';
|
|
185
185
|
if (!input.startsWith('http://') && !input.startsWith('https://')) {
|
|
@@ -194,7 +194,7 @@ export async function promptForCustomProvider() {
|
|
|
194
194
|
{
|
|
195
195
|
type: 'input',
|
|
196
196
|
name: 'model',
|
|
197
|
-
message: 'Model ID (e.g.,
|
|
197
|
+
message: 'Model ID (e.g., qwen3:8b):',
|
|
198
198
|
validate: (input) => input ? true : 'Model ID is required',
|
|
199
199
|
},
|
|
200
200
|
]);
|
|
@@ -203,9 +203,8 @@ export async function promptForCustomProvider() {
|
|
|
203
203
|
{
|
|
204
204
|
type: 'password',
|
|
205
205
|
name: 'apiKey',
|
|
206
|
-
message: 'API key:',
|
|
206
|
+
message: 'API key (leave blank if not needed):',
|
|
207
207
|
mask: '*',
|
|
208
|
-
validate: (input) => input ? true : 'API key is required',
|
|
209
208
|
},
|
|
210
209
|
]);
|
|
211
210
|
|
package/setup/setup.mjs
CHANGED
|
@@ -418,19 +418,22 @@ async function main() {
|
|
|
418
418
|
existingKey = env[existingEnvKey];
|
|
419
419
|
}
|
|
420
420
|
|
|
421
|
-
|
|
421
|
+
// For custom providers the API key is optional, so check existingEnvKey instead
|
|
422
|
+
if (existingKey || (env.LLM_PROVIDER === 'custom' && existingEnvKey)) {
|
|
422
423
|
const providerLabel = env.LLM_PROVIDER === 'custom'
|
|
423
|
-
? '
|
|
424
|
+
? 'Local (OpenAI Compatible API)'
|
|
424
425
|
: (PROVIDERS[env.LLM_PROVIDER]?.label || env.LLM_PROVIDER);
|
|
425
|
-
let llmDisplay =
|
|
426
|
-
|
|
426
|
+
let llmDisplay = existingKey
|
|
427
|
+
? `LLM: ${providerLabel} / ${env.LLM_MODEL} (${maskSecret(existingKey)})`
|
|
428
|
+
: `LLM: ${providerLabel} / ${env.LLM_MODEL}`;
|
|
429
|
+
if ((env.LLM_PROVIDER === 'openai' || env.LLM_PROVIDER === 'custom') && env.OPENAI_BASE_URL) {
|
|
427
430
|
llmDisplay += ` @ ${env.OPENAI_BASE_URL}`;
|
|
428
431
|
}
|
|
429
432
|
printSuccess(llmDisplay);
|
|
430
433
|
if (!await confirm('Reconfigure?', false)) {
|
|
431
434
|
agentProvider = env.LLM_PROVIDER;
|
|
432
435
|
agentModel = env.LLM_MODEL;
|
|
433
|
-
collectedKeys[existingEnvKey] = existingKey;
|
|
436
|
+
collectedKeys[existingEnvKey] = existingKey || '';
|
|
434
437
|
}
|
|
435
438
|
}
|
|
436
439
|
}
|
|
@@ -441,98 +444,30 @@ async function main() {
|
|
|
441
444
|
agentProvider = await promptForProvider();
|
|
442
445
|
|
|
443
446
|
if (agentProvider === 'custom') {
|
|
447
|
+
printInfo('If the model runs on this machine, use http://host.docker.internal:<port>/v1');
|
|
448
|
+
printInfo('instead of localhost (localhost won\'t work from inside Docker)');
|
|
449
|
+
printInfo('Ollama example: http://host.docker.internal:11434/v1\n');
|
|
444
450
|
const custom = await promptForCustomProvider();
|
|
445
451
|
agentModel = custom.model;
|
|
452
|
+
openaiBaseUrl = custom.baseUrl;
|
|
446
453
|
writeModelsJson('custom', {
|
|
447
454
|
baseUrl: custom.baseUrl,
|
|
448
455
|
apiKey: 'CUSTOM_API_KEY',
|
|
449
456
|
api: 'openai-completions',
|
|
450
457
|
models: [custom.model],
|
|
451
458
|
});
|
|
452
|
-
collectedKeys['CUSTOM_API_KEY'] = custom.apiKey;
|
|
453
|
-
printSuccess(`Custom provider configured: ${custom.model}`);
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
const useCustomUrl = await confirm('Use your own OpenAI-compatible LLM? (e.g. Ollama, vLLM)', false);
|
|
457
|
-
|
|
458
|
-
if (useCustomUrl) {
|
|
459
|
-
// Custom URL flow: URL → model → key (optional)
|
|
460
|
-
printInfo('If the model runs on this machine, use http://host.docker.internal:<port>');
|
|
461
|
-
printInfo('instead of localhost (localhost won\'t work from inside Docker)\n');
|
|
462
|
-
const { baseUrl } = await inquirer.prompt([
|
|
463
|
-
{
|
|
464
|
-
type: 'input',
|
|
465
|
-
name: 'baseUrl',
|
|
466
|
-
message: 'API base URL:',
|
|
467
|
-
validate: (input) => {
|
|
468
|
-
if (!input) return 'URL is required';
|
|
469
|
-
if (!input.startsWith('http://') && !input.startsWith('https://')) {
|
|
470
|
-
return 'URL must start with http:// or https://';
|
|
471
|
-
}
|
|
472
|
-
return true;
|
|
473
|
-
},
|
|
474
|
-
},
|
|
475
|
-
]);
|
|
476
|
-
openaiBaseUrl = baseUrl;
|
|
477
|
-
printSuccess(`Custom base URL: ${openaiBaseUrl}`);
|
|
478
|
-
|
|
479
|
-
const { model } = await inquirer.prompt([
|
|
480
|
-
{
|
|
481
|
-
type: 'input',
|
|
482
|
-
name: 'model',
|
|
483
|
-
message: 'Model ID (e.g. qwen3:8b):',
|
|
484
|
-
validate: (input) => input ? true : 'Model ID is required',
|
|
485
|
-
},
|
|
486
|
-
]);
|
|
487
|
-
agentModel = model;
|
|
488
|
-
|
|
489
|
-
const { key } = await inquirer.prompt([
|
|
490
|
-
{
|
|
491
|
-
type: 'password',
|
|
492
|
-
name: 'key',
|
|
493
|
-
message: 'API key (leave blank if not needed):',
|
|
494
|
-
mask: '*',
|
|
495
|
-
},
|
|
496
|
-
]);
|
|
497
|
-
collectedKeys['OPENAI_API_KEY'] = key || '';
|
|
498
|
-
|
|
499
|
-
writeModelsJson('openai', {
|
|
500
|
-
baseUrl: openaiBaseUrl,
|
|
501
|
-
apiKey: 'OPENAI_API_KEY',
|
|
502
|
-
api: 'openai-completions',
|
|
503
|
-
models: [agentModel],
|
|
504
|
-
});
|
|
505
|
-
printSuccess(`Generated .pi/agent/models.json for custom OpenAI-compatible LLM`);
|
|
506
|
-
if (key) {
|
|
507
|
-
printSuccess(`OpenAI key added (${maskSecret(key)})`);
|
|
508
|
-
}
|
|
509
|
-
} else {
|
|
510
|
-
// Standard OpenAI flow — model list, browser page, sk- validation
|
|
511
|
-
agentModel = await promptForModel('openai');
|
|
512
|
-
const agentApiKey = await promptForApiKey('openai');
|
|
513
|
-
collectedKeys['OPENAI_API_KEY'] = agentApiKey;
|
|
514
|
-
|
|
515
|
-
writeModelsJson('openai', {
|
|
516
|
-
baseUrl: PROVIDERS.openai.baseUrl,
|
|
517
|
-
apiKey: PROVIDERS.openai.envKey,
|
|
518
|
-
api: PROVIDERS.openai.api,
|
|
519
|
-
models: PROVIDERS.openai.models.map((m) => m.id),
|
|
520
|
-
});
|
|
521
|
-
printSuccess(`Generated .pi/agent/models.json for OpenAI`);
|
|
522
|
-
printSuccess(`OpenAI key added (${maskSecret(agentApiKey)})`);
|
|
523
|
-
|
|
524
|
-
// Clear any previous custom base URL
|
|
525
|
-
if (isRerun && env?.OPENAI_BASE_URL) {
|
|
526
|
-
changedVars['OPENAI_BASE_URL'] = '';
|
|
527
|
-
}
|
|
459
|
+
collectedKeys['CUSTOM_API_KEY'] = custom.apiKey || '';
|
|
460
|
+
printSuccess(`Custom provider configured: ${custom.model} @ ${custom.baseUrl}`);
|
|
461
|
+
if (custom.apiKey) {
|
|
462
|
+
printSuccess(`API key added (${maskSecret(custom.apiKey)})`);
|
|
528
463
|
}
|
|
529
464
|
} else {
|
|
530
|
-
// Non-OpenAI providers (Anthropic, Google)
|
|
531
465
|
const providerConfig = PROVIDERS[agentProvider];
|
|
532
466
|
agentModel = await promptForModel(agentProvider);
|
|
533
467
|
const agentApiKey = await promptForApiKey(agentProvider);
|
|
534
468
|
collectedKeys[providerConfig.envKey] = agentApiKey;
|
|
535
469
|
|
|
470
|
+
// Non-builtin providers need models.json (e.g., OpenAI)
|
|
536
471
|
if (!providerConfig.builtin) {
|
|
537
472
|
writeModelsJson(agentProvider, {
|
|
538
473
|
baseUrl: providerConfig.baseUrl,
|
|
@@ -561,34 +496,29 @@ async function main() {
|
|
|
561
496
|
}
|
|
562
497
|
|
|
563
498
|
// Re-run: reconfigure existing OPENAI_BASE_URL if provider was kept (not freshly configured)
|
|
564
|
-
if (agentProvider === 'openai' && isRerun && env?.OPENAI_BASE_URL && !openaiBaseUrl) {
|
|
499
|
+
if ((agentProvider === 'openai' || agentProvider === 'custom') && isRerun && env?.OPENAI_BASE_URL && !openaiBaseUrl) {
|
|
565
500
|
printSuccess(`Custom LLM URL: ${env.OPENAI_BASE_URL}`);
|
|
566
501
|
if (await confirm('Reconfigure?', false)) {
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
return true;
|
|
582
|
-
},
|
|
502
|
+
printInfo('If the model runs on this machine, use http://host.docker.internal:<port>/v1');
|
|
503
|
+
printInfo('instead of localhost (localhost won\'t work from inside Docker)');
|
|
504
|
+
printInfo('Ollama example: http://host.docker.internal:11434/v1\n');
|
|
505
|
+
const { baseUrl } = await inquirer.prompt([
|
|
506
|
+
{
|
|
507
|
+
type: 'input',
|
|
508
|
+
name: 'baseUrl',
|
|
509
|
+
message: 'API base URL:',
|
|
510
|
+
validate: (input) => {
|
|
511
|
+
if (!input) return 'URL is required';
|
|
512
|
+
if (!input.startsWith('http://') && !input.startsWith('https://')) {
|
|
513
|
+
return 'URL must start with http:// or https://';
|
|
514
|
+
}
|
|
515
|
+
return true;
|
|
583
516
|
},
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
// Clear existing base URL
|
|
590
|
-
changedVars['OPENAI_BASE_URL'] = '';
|
|
591
|
-
}
|
|
517
|
+
},
|
|
518
|
+
]);
|
|
519
|
+
openaiBaseUrl = baseUrl;
|
|
520
|
+
changedVars['OPENAI_BASE_URL'] = openaiBaseUrl;
|
|
521
|
+
printSuccess(`Custom base URL: ${openaiBaseUrl}`);
|
|
592
522
|
} else {
|
|
593
523
|
openaiBaseUrl = env.OPENAI_BASE_URL;
|
|
594
524
|
}
|
|
@@ -881,7 +811,7 @@ async function main() {
|
|
|
881
811
|
|
|
882
812
|
console.log(chalk.bold.green('\n Configuration Summary:\n'));
|
|
883
813
|
|
|
884
|
-
const providerLabel = agentProvider === 'custom' ? '
|
|
814
|
+
const providerLabel = agentProvider === 'custom' ? 'Local (OpenAI Compatible API)' : PROVIDERS[agentProvider].label;
|
|
885
815
|
console.log(` ${chalk.dim('Repository:')} ${owner}/${repo}`);
|
|
886
816
|
console.log(` ${chalk.dim('App URL:')} ${appUrl}`);
|
|
887
817
|
console.log(` ${chalk.dim('Agent LLM:')} ${providerLabel} (${agentModel})`);
|