morpheus-cli 0.2.3 → 0.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +147 -0
- package/dist/channels/telegram.js +1 -1
- package/dist/cli/commands/doctor.js +62 -0
- package/dist/cli/commands/init.js +66 -11
- package/dist/config/manager.js +84 -5
- package/dist/config/precedence.js +138 -0
- package/dist/config/schemas.js +3 -1
- package/dist/http/middleware/auth.js +7 -5
- package/dist/http/server.js +21 -0
- package/dist/runtime/__tests__/manual_start_verify.js +1 -0
- package/dist/runtime/memory/sati/repository.js +62 -62
- package/dist/runtime/oracle.js +3 -3
- package/dist/runtime/providers/factory.js +14 -4
- package/dist/runtime/tools/config-tools.js +25 -31
- package/dist/types/config.js +1 -0
- package/dist/ui/assets/index-3USYAgWN.css +1 -0
- package/dist/ui/assets/index-DKCPYzx2.js +58 -0
- package/dist/ui/index.html +16 -14
- package/package.json +2 -2
- package/dist/ui/assets/index-BLLLlr0w.css +0 -1
- package/dist/ui/assets/index-Ccml5qIL.js +0 -50
package/README.md
CHANGED
|
@@ -93,17 +93,80 @@ Local React-based UI to manage recordings, chat history, and system status acros
|
|
|
93
93
|
#### 🔒 UI Authentication
|
|
94
94
|
To protect your Web UI, use the `THE_ARCHITECT_PASS` environment variable. This ensures only authorized users can access the dashboard and API.
|
|
95
95
|
|
|
96
|
+
Additionally, you can use environment variables for API keys instead of storing them in the configuration file:
|
|
97
|
+
|
|
98
|
+
| Variable | Description | Required |
|
|
99
|
+
|----------|-------------|----------|
|
|
100
|
+
| `OPENAI_API_KEY` | OpenAI API key (if using GPT) | No |
|
|
101
|
+
| `ANTHROPIC_API_KEY` | Anthropic API key (if using Claude) | No |
|
|
102
|
+
| `GOOGLE_API_KEY` | Google AI key (for Gemini and Audio) | Yes (for audio) |
|
|
103
|
+
| `OPENROUTER_API_KEY` | OpenRouter API key (if using OpenRouter) | No |
|
|
104
|
+
| `THE_ARCHITECT_PASS` | Web Dashboard access password | Recommended |
|
|
105
|
+
| `TELEGRAM_BOT_TOKEN` | Telegram BotFather token | No |
|
|
106
|
+
|
|
107
|
+
If these environment variables are set, they will take precedence over values stored in the configuration file.
|
|
108
|
+
|
|
109
|
+
The system also supports generic environment variables that apply to all providers:
|
|
110
|
+
|
|
111
|
+
| Variable | Description | Applies To |
|
|
112
|
+
|----------|-------------|------------|
|
|
113
|
+
| `MORPHEUS_AGENT_NAME` | Name of the agent | agent.name |
|
|
114
|
+
| `MORPHEUS_AGENT_PERSONALITY` | Personality of the agent | agent.personality |
|
|
115
|
+
| `MORPHEUS_LLM_PROVIDER` | LLM provider to use | llm.provider |
|
|
116
|
+
| `MORPHEUS_LLM_MODEL` | Model name for LLM | llm.model |
|
|
117
|
+
| `MORPHEUS_LLM_TEMPERATURE` | Temperature setting for LLM | llm.temperature |
|
|
118
|
+
| `MORPHEUS_LLM_MAX_TOKENS` | Maximum tokens for LLM | llm.max_tokens |
|
|
119
|
+
| `MORPHEUS_LLM_CONTEXT_WINDOW` | Context window size for LLM | llm.context_window |
|
|
120
|
+
| `MORPHEUS_LLM_API_KEY` | Generic API key for LLM (lower precedence than provider-specific keys) | llm.api_key |
|
|
121
|
+
| `MORPHEUS_SANTI_PROVIDER` | Sati provider to use | santi.provider |
|
|
122
|
+
| `MORPHEUS_SANTI_MODEL` | Model name for Sati | santi.model |
|
|
123
|
+
| `MORPHEUS_SANTI_TEMPERATURE` | Temperature setting for Sati | santi.temperature |
|
|
124
|
+
| `MORPHEUS_SANTI_MAX_TOKENS` | Maximum tokens for Sati | santi.max_tokens |
|
|
125
|
+
| `MORPHEUS_SANTI_CONTEXT_WINDOW` | Context window size for Sati | santi.context_window |
|
|
126
|
+
| `MORPHEUS_SANTI_API_KEY` | Generic API key for Sati (lower precedence than provider-specific keys) | santi.api_key |
|
|
127
|
+
| `MORPHEUS_SANTI_MEMORY_LIMIT` | Memory retrieval limit for Sati | santi.memory_limit |
|
|
128
|
+
| `MORPHEUS_AUDIO_MODEL` | Model name for audio processing | audio.model |
|
|
129
|
+
| `MORPHEUS_AUDIO_ENABLED` | Enable/disable audio processing | audio.enabled |
|
|
130
|
+
| `MORPHEUS_AUDIO_API_KEY` | Generic API key for audio (lower precedence than provider-specific keys) | audio.apiKey |
|
|
131
|
+
| `MORPHEUS_AUDIO_MAX_DURATION` | Max duration for audio processing | audio.maxDurationSeconds |
|
|
132
|
+
| `MORPHEUS_TELEGRAM_ENABLED` | Enable/disable Telegram channel | channels.telegram.enabled |
|
|
133
|
+
| `MORPHEUS_TELEGRAM_TOKEN` | Telegram bot token | channels.telegram.token |
|
|
134
|
+
| `MORPHEUS_TELEGRAM_ALLOWED_USERS` | Comma-separated list of allowed Telegram user IDs | channels.telegram.allowedUsers |
|
|
135
|
+
| `MORPHEUS_UI_ENABLED` | Enable/disable Web UI | ui.enabled |
|
|
136
|
+
| `MORPHEUS_UI_PORT` | Port for Web UI | ui.port |
|
|
137
|
+
| `MORPHEUS_LOGGING_ENABLED` | Enable/disable logging | logging.enabled |
|
|
138
|
+
| `MORPHEUS_LOGGING_LEVEL` | Logging level | logging.level |
|
|
139
|
+
| `MORPHEUS_LOGGING_RETENTION` | Log retention period | logging.retention |
|
|
140
|
+
|
|
141
|
+
**Precedence Order**: The system follows this order of precedence when resolving configuration values:
|
|
142
|
+
1. Provider-specific environment variable (e.g., `OPENAI_API_KEY`) - Highest priority
|
|
143
|
+
2. Generic environment variable (e.g., `MORPHEUS_LLM_API_KEY`) - Medium priority
|
|
144
|
+
3. Configuration file value (e.g., `config.llm.api_key`) - Lower priority
|
|
145
|
+
4. Default value - Lowest priority
|
|
146
|
+
|
|
147
|
+
> **Note**: If `THE_ARCHITECT_PASS` is not set, the system will use the default password `iamthearchitect`. This is less secure and it's recommended to set your own password in production environments.
|
|
148
|
+
|
|
96
149
|
**Option 1: Using a `.env` file**
|
|
97
150
|
Create a `.env` file in the root of your project:
|
|
98
151
|
|
|
99
152
|
```env
|
|
153
|
+
OPENAI_API_KEY="your-openai-api-key"
|
|
154
|
+
ANTHROPIC_API_KEY="your-anthropic-api-key"
|
|
155
|
+
GOOGLE_API_KEY="your-google-api-key"
|
|
100
156
|
THE_ARCHITECT_PASS="your-secure-password"
|
|
157
|
+
TELEGRAM_BOT_TOKEN="your-telegram-bot-token"
|
|
158
|
+
OPENROUTER_API_KEY="your-openrouter-api-key"
|
|
101
159
|
```
|
|
102
160
|
|
|
103
161
|
**Option 2: Using Shell export**
|
|
104
162
|
|
|
105
163
|
```bash
|
|
164
|
+
export OPENAI_API_KEY="your-openai-api-key"
|
|
165
|
+
export ANTHROPIC_API_KEY="your-anthropic-api-key"
|
|
166
|
+
export GOOGLE_API_KEY="your-google-api-key"
|
|
167
|
+
export OPENROUTER_API_KEY="your-openrouter-api-key"
|
|
106
168
|
export THE_ARCHITECT_PASS="your-secure-password"
|
|
169
|
+
export TELEGRAM_BOT_TOKEN="your-telegram-bot-token"
|
|
107
170
|
morpheus start
|
|
108
171
|
```
|
|
109
172
|
|
|
@@ -333,6 +396,90 @@ npm run test:watch
|
|
|
333
396
|
- [ ] **Discord Adapter**: Support for Discord interactions.
|
|
334
397
|
- [ ] **Plugin System**: Extend functionality via external modules.
|
|
335
398
|
|
|
399
|
+
## 🕵️ Privacy Protection
|
|
400
|
+
|
|
401
|
+
The Web UI includes privacy protection headers to prevent indexing by search engines:
|
|
402
|
+
- HTML meta tags: `<meta name="robots" content="noindex, nofollow">`
|
|
403
|
+
- HTTP header: `X-Robots-Tag: noindex, nofollow`
|
|
404
|
+
|
|
405
|
+
This ensures that your private agent dashboard remains private and is not discoverable by search engines.
|
|
406
|
+
|
|
407
|
+
## 🐳 Running with Docker
|
|
408
|
+
|
|
409
|
+
Morpheus can be easily deployed using Docker and Docker Compose. The container supports all environment variables for configuration.
|
|
410
|
+
|
|
411
|
+
### Prerequisites
|
|
412
|
+
|
|
413
|
+
- Docker Engine
|
|
414
|
+
- Docker Compose
|
|
415
|
+
|
|
416
|
+
### Quick Start
|
|
417
|
+
|
|
418
|
+
1. Create a `.env` file with your configuration:
|
|
419
|
+
|
|
420
|
+
```bash
|
|
421
|
+
cp .env.example .env
|
|
422
|
+
# Edit .env with your actual API keys and settings
|
|
423
|
+
```
|
|
424
|
+
|
|
425
|
+
2. Build and start the container:
|
|
426
|
+
|
|
427
|
+
```bash
|
|
428
|
+
docker-compose up -d
|
|
429
|
+
```
|
|
430
|
+
|
|
431
|
+
3. Access the Web UI at `http://localhost:3333`
|
|
432
|
+
|
|
433
|
+
### Using Docker Directly
|
|
434
|
+
|
|
435
|
+
```bash
|
|
436
|
+
# Build the image
|
|
437
|
+
docker build -t morpheus .
|
|
438
|
+
|
|
439
|
+
# Run with environment variables
|
|
440
|
+
docker run -d \
|
|
441
|
+
--name morpheus-agent \
|
|
442
|
+
-p 3333:3333 \
|
|
443
|
+
-v morpheus_data:/root/.morpheus \
|
|
444
|
+
-e MORPHEUS_LLM_PROVIDER=openai \
|
|
445
|
+
-e OPENAI_API_KEY=your-api-key-here \
|
|
446
|
+
-e THE_ARCHITECT_PASS=your-password \
|
|
447
|
+
morpheus
|
|
448
|
+
```
|
|
449
|
+
|
|
450
|
+
### Environment Variables in Docker
|
|
451
|
+
|
|
452
|
+
All environment variables described above work in Docker. The precedence order remains the same:
|
|
453
|
+
1. Container environment variables
|
|
454
|
+
2. Configuration file values
|
|
455
|
+
3. Default values
|
|
456
|
+
|
|
457
|
+
### Persistent Data
|
|
458
|
+
|
|
459
|
+
The container stores configuration and data in `/root/.morpheus`. Mount a volume to persist data between container restarts:
|
|
460
|
+
|
|
461
|
+
```yaml
|
|
462
|
+
volumes:
|
|
463
|
+
- morpheus_data:/root/.morpheus # Recommended for persistence
|
|
464
|
+
```
|
|
465
|
+
|
|
466
|
+
### Health Check
|
|
467
|
+
|
|
468
|
+
The container includes a health check that verifies the health endpoint is accessible. The application exposes a public `/health` endpoint that doesn't require authentication:
|
|
469
|
+
|
|
470
|
+
```bash
|
|
471
|
+
curl http://localhost:3333/health
|
|
472
|
+
```
|
|
473
|
+
|
|
474
|
+
Response:
|
|
475
|
+
```json
|
|
476
|
+
{
|
|
477
|
+
"status": "healthy",
|
|
478
|
+
"timestamp": "2026-02-05T21:30:00.000Z",
|
|
479
|
+
"uptime": 123.45
|
|
480
|
+
}
|
|
481
|
+
```
|
|
482
|
+
|
|
336
483
|
## Contributing
|
|
337
484
|
|
|
338
485
|
1. Fork the repository.
|
|
@@ -105,7 +105,7 @@ export class TelegramAdapter {
|
|
|
105
105
|
await ctx.reply(`🎤 *Transcription*: _"${text}"_`, { parse_mode: 'Markdown' });
|
|
106
106
|
await ctx.sendChatAction('typing');
|
|
107
107
|
// Process with Agent
|
|
108
|
-
const response = await this.oracle.chat(text, usage);
|
|
108
|
+
const response = await this.oracle.chat(text, usage, true);
|
|
109
109
|
// if (listeningMsg) {
|
|
110
110
|
// try {
|
|
111
111
|
// await ctx.telegram.deleteMessage(ctx.chat.id, listeningMsg.message_id);
|
|
@@ -47,6 +47,68 @@ export const doctorCommand = new Command('doctor')
|
|
|
47
47
|
else if (deprecatedLimit !== undefined && contextWindow !== undefined) {
|
|
48
48
|
console.log(chalk.yellow('⚠') + ' Found both \'memory.limit\' and \'llm.context_window\'. Remove \'memory.limit\' from config.');
|
|
49
49
|
}
|
|
50
|
+
// Check API keys availability for active providers
|
|
51
|
+
const llmProvider = config.llm?.provider;
|
|
52
|
+
const santiProvider = config.santi?.provider;
|
|
53
|
+
// Check LLM provider API key
|
|
54
|
+
if (llmProvider && llmProvider !== 'ollama') {
|
|
55
|
+
const hasLlmApiKey = config.llm?.api_key ||
|
|
56
|
+
(llmProvider === 'openai' && process.env.OPENAI_API_KEY) ||
|
|
57
|
+
(llmProvider === 'anthropic' && process.env.ANTHROPIC_API_KEY) ||
|
|
58
|
+
(llmProvider === 'gemini' && process.env.GOOGLE_API_KEY) ||
|
|
59
|
+
(llmProvider === 'openrouter' && process.env.OPENROUTER_API_KEY);
|
|
60
|
+
if (hasLlmApiKey) {
|
|
61
|
+
console.log(chalk.green('✓') + ` LLM API key available for ${llmProvider}`);
|
|
62
|
+
}
|
|
63
|
+
else {
|
|
64
|
+
console.log(chalk.red('✗') + ` LLM API key missing for ${llmProvider}. Either set in config or define environment variable.`);
|
|
65
|
+
allPassed = false;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
// Check Santi provider API key
|
|
69
|
+
if (santiProvider && santiProvider !== 'ollama') {
|
|
70
|
+
const hasSantiApiKey = config.santi?.api_key ||
|
|
71
|
+
(santiProvider === 'openai' && process.env.OPENAI_API_KEY) ||
|
|
72
|
+
(santiProvider === 'anthropic' && process.env.ANTHROPIC_API_KEY) ||
|
|
73
|
+
(santiProvider === 'gemini' && process.env.GOOGLE_API_KEY) ||
|
|
74
|
+
(santiProvider === 'openrouter' && process.env.OPENROUTER_API_KEY);
|
|
75
|
+
if (hasSantiApiKey) {
|
|
76
|
+
console.log(chalk.green('✓') + ` Santi API key available for ${santiProvider}`);
|
|
77
|
+
}
|
|
78
|
+
else {
|
|
79
|
+
console.log(chalk.red('✗') + ` Santi API key missing for ${santiProvider}. Either set in config or define environment variable.`);
|
|
80
|
+
allPassed = false;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
// Check audio API key if enabled
|
|
84
|
+
if (config.audio?.enabled && config.llm?.provider !== 'gemini') {
|
|
85
|
+
const hasAudioApiKey = config.audio?.apiKey || process.env.GOOGLE_API_KEY;
|
|
86
|
+
if (hasAudioApiKey) {
|
|
87
|
+
console.log(chalk.green('✓') + ' Audio API key available for transcription');
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
console.log(chalk.red('✗') + ' Audio API key missing. Either set in config or define GOOGLE_API_KEY environment variable.');
|
|
91
|
+
allPassed = false;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
// Check Telegram token if enabled
|
|
95
|
+
if (config.channels?.telegram?.enabled) {
|
|
96
|
+
const hasTelegramToken = config.channels.telegram?.token || process.env.TELEGRAM_BOT_TOKEN;
|
|
97
|
+
if (hasTelegramToken) {
|
|
98
|
+
console.log(chalk.green('✓') + ' Telegram bot token available');
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
console.log(chalk.red('✗') + ' Telegram bot token missing. Either set in config or define TELEGRAM_BOT_TOKEN environment variable.');
|
|
102
|
+
allPassed = false;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
// Check if default password is being used for dashboard
|
|
106
|
+
if (!process.env.THE_ARCHITECT_PASS) {
|
|
107
|
+
console.log(chalk.yellow('⚠') + ' Using default password for dashboard (iamthearchitect). For security, set THE_ARCHITECT_PASS environment variable.');
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
console.log(chalk.green('✓') + ' Custom dashboard password set');
|
|
111
|
+
}
|
|
50
112
|
}
|
|
51
113
|
else {
|
|
52
114
|
console.log(chalk.yellow('!') + ' Configuration: Missing (will be created on start)');
|
|
@@ -30,6 +30,7 @@ export const initCommand = new Command('init')
|
|
|
30
30
|
choices: [
|
|
31
31
|
{ name: 'OpenAI', value: 'openai' },
|
|
32
32
|
{ name: 'Anthropic', value: 'anthropic' },
|
|
33
|
+
{ name: 'OpenRouter', value: 'openrouter' },
|
|
33
34
|
{ name: 'Ollama', value: 'ollama' },
|
|
34
35
|
{ name: 'Google Gemini', value: 'gemini' },
|
|
35
36
|
],
|
|
@@ -43,6 +44,9 @@ export const initCommand = new Command('init')
|
|
|
43
44
|
case 'anthropic':
|
|
44
45
|
defaultModel = 'claude-3-5-sonnet-20240620';
|
|
45
46
|
break;
|
|
47
|
+
case 'openrouter':
|
|
48
|
+
defaultModel = 'openrouter/auto';
|
|
49
|
+
break;
|
|
46
50
|
case 'ollama':
|
|
47
51
|
defaultModel = 'llama3';
|
|
48
52
|
break;
|
|
@@ -59,10 +63,23 @@ export const initCommand = new Command('init')
|
|
|
59
63
|
});
|
|
60
64
|
let apiKey;
|
|
61
65
|
const hasExistingKey = !!currentConfig.llm.api_key;
|
|
62
|
-
|
|
66
|
+
let apiKeyMessage = hasExistingKey
|
|
63
67
|
? 'Enter API Key (leave empty to preserve existing, or if using env vars):'
|
|
64
68
|
: 'Enter API Key (leave empty if using env vars):';
|
|
65
|
-
|
|
69
|
+
// Add info about environment variables to the message
|
|
70
|
+
if (provider === 'openai') {
|
|
71
|
+
apiKeyMessage = `${apiKeyMessage} (Env var: OPENAI_API_KEY)`;
|
|
72
|
+
}
|
|
73
|
+
else if (provider === 'anthropic') {
|
|
74
|
+
apiKeyMessage = `${apiKeyMessage} (Env var: ANTHROPIC_API_KEY)`;
|
|
75
|
+
}
|
|
76
|
+
else if (provider === 'gemini') {
|
|
77
|
+
apiKeyMessage = `${apiKeyMessage} (Env var: GOOGLE_API_KEY)`;
|
|
78
|
+
}
|
|
79
|
+
else if (provider === 'openrouter') {
|
|
80
|
+
apiKeyMessage = `${apiKeyMessage} (Env var: OPENROUTER_API_KEY)`;
|
|
81
|
+
}
|
|
82
|
+
if (provider !== 'ollama' && provider !== 'openrouter') {
|
|
66
83
|
apiKey = await password({
|
|
67
84
|
message: apiKeyMessage,
|
|
68
85
|
});
|
|
@@ -75,6 +92,14 @@ export const initCommand = new Command('init')
|
|
|
75
92
|
if (apiKey) {
|
|
76
93
|
await configManager.set('llm.api_key', apiKey);
|
|
77
94
|
}
|
|
95
|
+
// Base URL Configuration for OpenRouter
|
|
96
|
+
if (provider === 'openrouter') {
|
|
97
|
+
const baseUrl = await input({
|
|
98
|
+
message: 'Enter OpenRouter Base URL:',
|
|
99
|
+
default: currentConfig.llm.base_url || 'https://openrouter.ai/api/v1',
|
|
100
|
+
});
|
|
101
|
+
await configManager.set('llm.base_url', baseUrl);
|
|
102
|
+
}
|
|
78
103
|
// Context Window Configuration
|
|
79
104
|
const contextWindow = await input({
|
|
80
105
|
message: 'Context Window Size (number of messages to send to LLM):',
|
|
@@ -105,6 +130,7 @@ export const initCommand = new Command('init')
|
|
|
105
130
|
choices: [
|
|
106
131
|
{ name: 'OpenAI', value: 'openai' },
|
|
107
132
|
{ name: 'Anthropic', value: 'anthropic' },
|
|
133
|
+
{ name: 'OpenRouter', value: 'openrouter' },
|
|
108
134
|
{ name: 'Ollama', value: 'ollama' },
|
|
109
135
|
{ name: 'Google Gemini', value: 'gemini' },
|
|
110
136
|
],
|
|
@@ -118,6 +144,9 @@ export const initCommand = new Command('init')
|
|
|
118
144
|
case 'anthropic':
|
|
119
145
|
defaultSatiModel = 'claude-3-5-sonnet-20240620';
|
|
120
146
|
break;
|
|
147
|
+
case 'openrouter':
|
|
148
|
+
defaultSatiModel = 'openrouter/auto';
|
|
149
|
+
break;
|
|
121
150
|
case 'ollama':
|
|
122
151
|
defaultSatiModel = 'llama3';
|
|
123
152
|
break;
|
|
@@ -133,9 +162,22 @@ export const initCommand = new Command('init')
|
|
|
133
162
|
default: defaultSatiModel,
|
|
134
163
|
});
|
|
135
164
|
const hasExistingSatiKey = !!currentConfig.santi?.api_key;
|
|
136
|
-
|
|
137
|
-
? 'Enter Sati API Key (leave empty to preserve existing):'
|
|
138
|
-
: 'Enter Sati API Key:';
|
|
165
|
+
let santiKeyMsg = hasExistingSatiKey
|
|
166
|
+
? 'Enter Sati API Key (leave empty to preserve existing, or if using env vars):'
|
|
167
|
+
: 'Enter Sati API Key (leave empty if using env vars):';
|
|
168
|
+
// Add info about environment variables to the message
|
|
169
|
+
if (santiProvider === 'openai') {
|
|
170
|
+
santiKeyMsg = `${santiKeyMsg} (Env var: OPENAI_API_KEY)`;
|
|
171
|
+
}
|
|
172
|
+
else if (santiProvider === 'anthropic') {
|
|
173
|
+
santiKeyMsg = `${santiKeyMsg} (Env var: ANTHROPIC_API_KEY)`;
|
|
174
|
+
}
|
|
175
|
+
else if (santiProvider === 'gemini') {
|
|
176
|
+
santiKeyMsg = `${santiKeyMsg} (Env var: GOOGLE_API_KEY)`;
|
|
177
|
+
}
|
|
178
|
+
else if (santiProvider === 'openrouter') {
|
|
179
|
+
santiKeyMsg = `${santiKeyMsg} (Env var: OPENROUTER_API_KEY)`;
|
|
180
|
+
}
|
|
139
181
|
const keyInput = await password({ message: santiKeyMsg });
|
|
140
182
|
if (keyInput) {
|
|
141
183
|
santiApiKey = keyInput;
|
|
@@ -146,6 +188,14 @@ export const initCommand = new Command('init')
|
|
|
146
188
|
else {
|
|
147
189
|
santiApiKey = undefined; // Ensure we don't accidentally carry over invalid state
|
|
148
190
|
}
|
|
191
|
+
// Base URL Configuration for Sati OpenRouter
|
|
192
|
+
if (santiProvider === 'openrouter') {
|
|
193
|
+
const satiBaseUrl = await input({
|
|
194
|
+
message: 'Enter Sati OpenRouter Base URL:',
|
|
195
|
+
default: currentConfig.santi?.base_url || 'https://openrouter.ai/api/v1',
|
|
196
|
+
});
|
|
197
|
+
await configManager.set('santi.base_url', satiBaseUrl);
|
|
198
|
+
}
|
|
149
199
|
}
|
|
150
200
|
const memoryLimit = await input({
|
|
151
201
|
message: 'Sati Memory Retrieval Limit (messages):',
|
|
@@ -171,9 +221,11 @@ export const initCommand = new Command('init')
|
|
|
171
221
|
}
|
|
172
222
|
else {
|
|
173
223
|
const hasExistingAudioKey = !!currentConfig.audio?.apiKey;
|
|
174
|
-
|
|
175
|
-
? 'Enter Gemini API Key for Audio (leave empty to preserve existing):'
|
|
176
|
-
: 'Enter Gemini API Key for Audio:';
|
|
224
|
+
let audioKeyMessage = hasExistingAudioKey
|
|
225
|
+
? 'Enter Gemini API Key for Audio (leave empty to preserve existing, or if using env vars):'
|
|
226
|
+
: 'Enter Gemini API Key for Audio (leave empty if using env vars):';
|
|
227
|
+
// Add info about environment variables to the message
|
|
228
|
+
audioKeyMessage = `${audioKeyMessage} (Env var: GOOGLE_API_KEY)`;
|
|
177
229
|
audioKey = await password({
|
|
178
230
|
message: audioKeyMessage,
|
|
179
231
|
});
|
|
@@ -210,10 +262,13 @@ export const initCommand = new Command('init')
|
|
|
210
262
|
display.log(chalk.gray('1. Create a bot via @BotFather to get your token.'));
|
|
211
263
|
display.log(chalk.gray('2. Get your User ID via @userinfobot.\n'));
|
|
212
264
|
const hasExistingToken = !!currentConfig.channels.telegram?.token;
|
|
265
|
+
let telegramTokenMessage = hasExistingToken
|
|
266
|
+
? 'Enter Telegram Bot Token (leave empty to preserve existing, or if using env vars):'
|
|
267
|
+
: 'Enter Telegram Bot Token (leave empty if using env vars):';
|
|
268
|
+
// Add info about environment variables to the message
|
|
269
|
+
telegramTokenMessage = `${telegramTokenMessage} (Env var: TELEGRAM_BOT_TOKEN)`;
|
|
213
270
|
const token = await password({
|
|
214
|
-
message:
|
|
215
|
-
? 'Enter Telegram Bot Token (leave empty to preserve existing):'
|
|
216
|
-
: 'Enter Telegram Bot Token:',
|
|
271
|
+
message: telegramTokenMessage,
|
|
217
272
|
validate: (value) => {
|
|
218
273
|
if (value.length > 0)
|
|
219
274
|
return true;
|
package/dist/config/manager.js
CHANGED
|
@@ -5,6 +5,7 @@ import { PATHS } from './paths.js';
|
|
|
5
5
|
import { setByPath } from './utils.js';
|
|
6
6
|
import { ConfigSchema } from './schemas.js';
|
|
7
7
|
import { migrateConfigFile } from '../runtime/migration.js';
|
|
8
|
+
import { resolveApiKey, resolveModel, resolveNumeric, resolveString, resolveBoolean, resolveProvider, resolveStringArray } from './precedence.js';
|
|
8
9
|
export class ConfigManager {
|
|
9
10
|
static instance;
|
|
10
11
|
config = DEFAULT_CONFIG;
|
|
@@ -18,16 +19,15 @@ export class ConfigManager {
|
|
|
18
19
|
async load() {
|
|
19
20
|
try {
|
|
20
21
|
await migrateConfigFile();
|
|
22
|
+
let rawConfig = DEFAULT_CONFIG;
|
|
21
23
|
if (await fs.pathExists(PATHS.config)) {
|
|
22
24
|
const raw = await fs.readFile(PATHS.config, 'utf8');
|
|
23
25
|
const parsed = yaml.load(raw);
|
|
24
26
|
// Validate and merge with defaults via Zod
|
|
25
|
-
|
|
26
|
-
}
|
|
27
|
-
else {
|
|
28
|
-
// File doesn't exist, use defaults
|
|
29
|
-
this.config = DEFAULT_CONFIG;
|
|
27
|
+
rawConfig = ConfigSchema.parse(parsed);
|
|
30
28
|
}
|
|
29
|
+
// Apply environment variable precedence to the loaded config
|
|
30
|
+
this.config = this.applyEnvironmentVariablePrecedence(rawConfig);
|
|
31
31
|
}
|
|
32
32
|
catch (error) {
|
|
33
33
|
console.error('Failed to load configuration:', error);
|
|
@@ -36,6 +36,85 @@ export class ConfigManager {
|
|
|
36
36
|
}
|
|
37
37
|
return this.config;
|
|
38
38
|
}
|
|
39
|
+
applyEnvironmentVariablePrecedence(config) {
|
|
40
|
+
// Apply precedence to agent config
|
|
41
|
+
const agentConfig = {
|
|
42
|
+
name: resolveString('MORPHEUS_AGENT_NAME', config.agent.name, DEFAULT_CONFIG.agent.name),
|
|
43
|
+
personality: resolveString('MORPHEUS_AGENT_PERSONALITY', config.agent.personality, DEFAULT_CONFIG.agent.personality)
|
|
44
|
+
};
|
|
45
|
+
// Apply precedence to LLM config
|
|
46
|
+
const llmProvider = resolveProvider('MORPHEUS_LLM_PROVIDER', config.llm.provider, DEFAULT_CONFIG.llm.provider);
|
|
47
|
+
const llmConfig = {
|
|
48
|
+
provider: llmProvider,
|
|
49
|
+
model: resolveModel(llmProvider, 'MORPHEUS_LLM_MODEL', config.llm.model),
|
|
50
|
+
temperature: resolveNumeric('MORPHEUS_LLM_TEMPERATURE', config.llm.temperature, DEFAULT_CONFIG.llm.temperature),
|
|
51
|
+
max_tokens: config.llm.max_tokens !== undefined ? resolveNumeric('MORPHEUS_LLM_MAX_TOKENS', config.llm.max_tokens, config.llm.max_tokens) : undefined,
|
|
52
|
+
api_key: resolveApiKey(llmProvider, 'MORPHEUS_LLM_API_KEY', config.llm.api_key),
|
|
53
|
+
base_url: config.llm.base_url, // base_url doesn't have environment variable precedence for now
|
|
54
|
+
context_window: config.llm.context_window !== undefined ? resolveNumeric('MORPHEUS_LLM_CONTEXT_WINDOW', config.llm.context_window, DEFAULT_CONFIG.llm.context_window) : undefined
|
|
55
|
+
};
|
|
56
|
+
// Apply precedence to Sati config
|
|
57
|
+
let santiConfig;
|
|
58
|
+
if (config.santi) {
|
|
59
|
+
const santiProvider = resolveProvider('MORPHEUS_SANTI_PROVIDER', config.santi.provider, llmConfig.provider);
|
|
60
|
+
santiConfig = {
|
|
61
|
+
provider: santiProvider,
|
|
62
|
+
model: resolveModel(santiProvider, 'MORPHEUS_SANTI_MODEL', config.santi.model || llmConfig.model),
|
|
63
|
+
temperature: resolveNumeric('MORPHEUS_SANTI_TEMPERATURE', config.santi.temperature, llmConfig.temperature),
|
|
64
|
+
max_tokens: config.santi.max_tokens !== undefined ? resolveNumeric('MORPHEUS_SANTI_MAX_TOKENS', config.santi.max_tokens, config.santi.max_tokens) : llmConfig.max_tokens,
|
|
65
|
+
api_key: resolveApiKey(santiProvider, 'MORPHEUS_SANTI_API_KEY', config.santi.api_key || llmConfig.api_key),
|
|
66
|
+
base_url: config.santi.base_url || config.llm.base_url,
|
|
67
|
+
context_window: config.santi.context_window !== undefined ? resolveNumeric('MORPHEUS_SANTI_CONTEXT_WINDOW', config.santi.context_window, config.santi.context_window) : llmConfig.context_window,
|
|
68
|
+
memory_limit: config.santi.memory_limit !== undefined ? resolveNumeric('MORPHEUS_SANTI_MEMORY_LIMIT', config.santi.memory_limit, config.santi.memory_limit) : undefined
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
// Apply precedence to audio config
|
|
72
|
+
const audioConfig = {
|
|
73
|
+
provider: config.audio.provider, // Audio provider is fixed as 'google'
|
|
74
|
+
model: resolveString('MORPHEUS_AUDIO_MODEL', config.audio.model, DEFAULT_CONFIG.audio.model),
|
|
75
|
+
enabled: resolveBoolean('MORPHEUS_AUDIO_ENABLED', config.audio.enabled, DEFAULT_CONFIG.audio.enabled),
|
|
76
|
+
apiKey: resolveApiKey('gemini', 'MORPHEUS_AUDIO_API_KEY', config.audio.apiKey),
|
|
77
|
+
maxDurationSeconds: resolveNumeric('MORPHEUS_AUDIO_MAX_DURATION', config.audio.maxDurationSeconds, DEFAULT_CONFIG.audio.maxDurationSeconds),
|
|
78
|
+
supportedMimeTypes: config.audio.supportedMimeTypes
|
|
79
|
+
};
|
|
80
|
+
// Apply precedence to channel configs
|
|
81
|
+
const channelsConfig = {
|
|
82
|
+
telegram: {
|
|
83
|
+
enabled: resolveBoolean('MORPHEUS_TELEGRAM_ENABLED', config.channels.telegram.enabled, DEFAULT_CONFIG.channels.telegram.enabled),
|
|
84
|
+
token: resolveString('MORPHEUS_TELEGRAM_TOKEN', config.channels.telegram.token, config.channels.telegram.token || ''),
|
|
85
|
+
allowedUsers: resolveStringArray('MORPHEUS_TELEGRAM_ALLOWED_USERS', config.channels.telegram.allowedUsers, DEFAULT_CONFIG.channels.telegram.allowedUsers)
|
|
86
|
+
},
|
|
87
|
+
discord: {
|
|
88
|
+
enabled: config.channels.discord.enabled, // Discord doesn't have env var precedence for now
|
|
89
|
+
token: config.channels.discord.token
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
// Apply precedence to UI config
|
|
93
|
+
const uiConfig = {
|
|
94
|
+
enabled: resolveBoolean('MORPHEUS_UI_ENABLED', config.ui.enabled, DEFAULT_CONFIG.ui.enabled),
|
|
95
|
+
port: resolveNumeric('MORPHEUS_UI_PORT', config.ui.port, DEFAULT_CONFIG.ui.port)
|
|
96
|
+
};
|
|
97
|
+
// Apply precedence to logging config
|
|
98
|
+
const loggingConfig = {
|
|
99
|
+
enabled: resolveBoolean('MORPHEUS_LOGGING_ENABLED', config.logging.enabled, DEFAULT_CONFIG.logging.enabled),
|
|
100
|
+
level: resolveString('MORPHEUS_LOGGING_LEVEL', config.logging.level, DEFAULT_CONFIG.logging.level),
|
|
101
|
+
retention: resolveString('MORPHEUS_LOGGING_RETENTION', config.logging.retention, DEFAULT_CONFIG.logging.retention)
|
|
102
|
+
};
|
|
103
|
+
// Memory config (deprecated, but keeping for backward compatibility)
|
|
104
|
+
const memoryConfig = {
|
|
105
|
+
limit: config.memory.limit // Not applying env var precedence to deprecated field
|
|
106
|
+
};
|
|
107
|
+
return {
|
|
108
|
+
agent: agentConfig,
|
|
109
|
+
llm: llmConfig,
|
|
110
|
+
santi: santiConfig,
|
|
111
|
+
audio: audioConfig,
|
|
112
|
+
channels: channelsConfig,
|
|
113
|
+
ui: uiConfig,
|
|
114
|
+
logging: loggingConfig,
|
|
115
|
+
memory: memoryConfig
|
|
116
|
+
};
|
|
117
|
+
}
|
|
39
118
|
get() {
|
|
40
119
|
return this.config;
|
|
41
120
|
}
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Functions to resolve configuration values with precedence:
|
|
3
|
+
* 1. Provider-specific environment variable (e.g., OPENAI_API_KEY)
|
|
4
|
+
* 2. Generic environment variable (e.g., MORPHEUS_LLM_API_KEY)
|
|
5
|
+
* 3. Configuration file value
|
|
6
|
+
* 4. Default value
|
|
7
|
+
*/
|
|
8
|
+
/**
|
|
9
|
+
* Resolve an API key with provider-specific precedence
|
|
10
|
+
* @param provider The current provider
|
|
11
|
+
* @param genericEnvVar The generic environment variable name
|
|
12
|
+
* @param configFileValue The value from the config file
|
|
13
|
+
* @returns The resolved API key value
|
|
14
|
+
*/
|
|
15
|
+
export function resolveApiKey(provider, genericEnvVar, configFileValue) {
|
|
16
|
+
// Map provider to its specific environment variable
|
|
17
|
+
const providerSpecificVars = {
|
|
18
|
+
'openai': 'OPENAI_API_KEY',
|
|
19
|
+
'anthropic': 'ANTHROPIC_API_KEY',
|
|
20
|
+
'openrouter': 'OPENROUTER_API_KEY',
|
|
21
|
+
'ollama': '', // Ollama typically doesn't need an API key
|
|
22
|
+
'gemini': 'GOOGLE_API_KEY'
|
|
23
|
+
};
|
|
24
|
+
const providerSpecificVar = providerSpecificVars[provider];
|
|
25
|
+
// Check provider-specific variable first, then generic, then config file
|
|
26
|
+
if (providerSpecificVar && process.env[providerSpecificVar]) {
|
|
27
|
+
return process.env[providerSpecificVar];
|
|
28
|
+
}
|
|
29
|
+
if (process.env[genericEnvVar]) {
|
|
30
|
+
return process.env[genericEnvVar];
|
|
31
|
+
}
|
|
32
|
+
return configFileValue;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Resolve a model name with provider-specific precedence
|
|
36
|
+
* @param provider The current provider
|
|
37
|
+
* @param genericEnvVar The generic environment variable name
|
|
38
|
+
* @param configFileValue The value from the config file
|
|
39
|
+
* @returns The resolved model name value
|
|
40
|
+
*/
|
|
41
|
+
export function resolveModel(provider, genericEnvVar, configFileValue) {
|
|
42
|
+
// For now, we don't have provider-specific model variables, but we could add them later
|
|
43
|
+
// Check generic variable first, then config file
|
|
44
|
+
if (process.env[genericEnvVar]) {
|
|
45
|
+
return process.env[genericEnvVar];
|
|
46
|
+
}
|
|
47
|
+
return configFileValue;
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Resolve a numeric configuration value
|
|
51
|
+
* @param genericEnvVar The generic environment variable name
|
|
52
|
+
* @param configFileValue The value from the config file
|
|
53
|
+
* @param defaultValue The default value to use if none is found
|
|
54
|
+
* @returns The resolved numeric value
|
|
55
|
+
*/
|
|
56
|
+
export function resolveNumeric(genericEnvVar, configFileValue, defaultValue) {
|
|
57
|
+
if (process.env[genericEnvVar] !== undefined && process.env[genericEnvVar] !== '') {
|
|
58
|
+
const envValue = Number(process.env[genericEnvVar]);
|
|
59
|
+
if (!isNaN(envValue)) {
|
|
60
|
+
return envValue;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
if (configFileValue !== undefined) {
|
|
64
|
+
return configFileValue;
|
|
65
|
+
}
|
|
66
|
+
return defaultValue;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Resolve a string configuration value
|
|
70
|
+
* @param genericEnvVar The generic environment variable name
|
|
71
|
+
* @param configFileValue The value from the config file
|
|
72
|
+
* @param defaultValue The default value to use if none is found
|
|
73
|
+
* @returns The resolved string value
|
|
74
|
+
*/
|
|
75
|
+
export function resolveString(genericEnvVar, configFileValue, defaultValue) {
|
|
76
|
+
if (process.env[genericEnvVar]) {
|
|
77
|
+
return process.env[genericEnvVar];
|
|
78
|
+
}
|
|
79
|
+
if (configFileValue !== undefined) {
|
|
80
|
+
return configFileValue;
|
|
81
|
+
}
|
|
82
|
+
return defaultValue;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Resolve a boolean configuration value
|
|
86
|
+
* @param genericEnvVar The generic environment variable name
|
|
87
|
+
* @param configFileValue The value from the config file
|
|
88
|
+
* @param defaultValue The default value to use if none is found
|
|
89
|
+
* @returns The resolved boolean value
|
|
90
|
+
*/
|
|
91
|
+
export function resolveBoolean(genericEnvVar, configFileValue, defaultValue) {
|
|
92
|
+
if (process.env[genericEnvVar] !== undefined) {
|
|
93
|
+
const envValue = process.env[genericEnvVar]?.toLowerCase();
|
|
94
|
+
if (envValue === 'true' || envValue === '1') {
|
|
95
|
+
return true;
|
|
96
|
+
}
|
|
97
|
+
else if (envValue === 'false' || envValue === '0') {
|
|
98
|
+
return false;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
if (configFileValue !== undefined) {
|
|
102
|
+
return configFileValue;
|
|
103
|
+
}
|
|
104
|
+
return defaultValue;
|
|
105
|
+
}
|
|
106
|
+
/**
|
|
107
|
+
* Resolve an array of string configuration value
|
|
108
|
+
* @param genericEnvVar The generic environment variable name
|
|
109
|
+
* @param configFileValue The value from the config file
|
|
110
|
+
* @param defaultValue The default value to use if none is found
|
|
111
|
+
* @returns The resolved array of strings value
|
|
112
|
+
*/
|
|
113
|
+
export function resolveStringArray(genericEnvVar, configFileValue, defaultValue) {
|
|
114
|
+
if (process.env[genericEnvVar]) {
|
|
115
|
+
// Split the environment variable by commas and trim whitespace
|
|
116
|
+
return process.env[genericEnvVar].split(',').map(item => item.trim()).filter(item => item.length > 0);
|
|
117
|
+
}
|
|
118
|
+
if (configFileValue !== undefined) {
|
|
119
|
+
return configFileValue;
|
|
120
|
+
}
|
|
121
|
+
return defaultValue;
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Resolve a provider configuration value
|
|
125
|
+
* @param genericEnvVar The generic environment variable name
|
|
126
|
+
* @param configFileValue The value from the config file
|
|
127
|
+
* @param defaultValue The default value to use if none is found
|
|
128
|
+
* @returns The resolved provider value
|
|
129
|
+
*/
|
|
130
|
+
export function resolveProvider(genericEnvVar, configFileValue, defaultValue) {
|
|
131
|
+
if (process.env[genericEnvVar]) {
|
|
132
|
+
return process.env[genericEnvVar];
|
|
133
|
+
}
|
|
134
|
+
if (configFileValue !== undefined) {
|
|
135
|
+
return configFileValue;
|
|
136
|
+
}
|
|
137
|
+
return defaultValue;
|
|
138
|
+
}
|
package/dist/config/schemas.js
CHANGED
|
@@ -2,17 +2,19 @@ import { z } from 'zod';
|
|
|
2
2
|
import { DEFAULT_CONFIG } from '../types/config.js';
|
|
3
3
|
export const AudioConfigSchema = z.object({
|
|
4
4
|
provider: z.enum(['google']).default(DEFAULT_CONFIG.audio.provider),
|
|
5
|
+
model: z.string().min(1).default(DEFAULT_CONFIG.audio.model),
|
|
5
6
|
enabled: z.boolean().default(DEFAULT_CONFIG.audio.enabled),
|
|
6
7
|
apiKey: z.string().optional(),
|
|
7
8
|
maxDurationSeconds: z.number().default(DEFAULT_CONFIG.audio.maxDurationSeconds),
|
|
8
9
|
supportedMimeTypes: z.array(z.string()).default(DEFAULT_CONFIG.audio.supportedMimeTypes),
|
|
9
10
|
});
|
|
10
11
|
export const LLMConfigSchema = z.object({
|
|
11
|
-
provider: z.enum(['openai', 'anthropic', 'ollama', 'gemini']).default(DEFAULT_CONFIG.llm.provider),
|
|
12
|
+
provider: z.enum(['openai', 'anthropic', 'openrouter', 'ollama', 'gemini']).default(DEFAULT_CONFIG.llm.provider),
|
|
12
13
|
model: z.string().min(1).default(DEFAULT_CONFIG.llm.model),
|
|
13
14
|
temperature: z.number().min(0).max(1).default(DEFAULT_CONFIG.llm.temperature),
|
|
14
15
|
max_tokens: z.number().int().positive().optional(),
|
|
15
16
|
api_key: z.string().optional(),
|
|
17
|
+
base_url: z.string().optional(),
|
|
16
18
|
context_window: z.number().int().positive().optional(),
|
|
17
19
|
});
|
|
18
20
|
export const SatiConfigSchema = LLMConfigSchema.extend({
|