morpheus-cli 0.2.4 → 0.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -105,7 +105,7 @@ export class TelegramAdapter {
105
105
  await ctx.reply(`🎤 *Transcription*: _"${text}"_`, { parse_mode: 'Markdown' });
106
106
  await ctx.sendChatAction('typing');
107
107
  // Process with Agent
108
- const response = await this.oracle.chat(text, usage);
108
+ const response = await this.oracle.chat(text, usage, true);
109
109
  // if (listeningMsg) {
110
110
  // try {
111
111
  // await ctx.telegram.deleteMessage(ctx.chat.id, listeningMsg.message_id);
@@ -47,6 +47,68 @@ export const doctorCommand = new Command('doctor')
47
47
  else if (deprecatedLimit !== undefined && contextWindow !== undefined) {
48
48
  console.log(chalk.yellow('⚠') + ' Found both \'memory.limit\' and \'llm.context_window\'. Remove \'memory.limit\' from config.');
49
49
  }
50
+ // Check API keys availability for active providers
51
+ const llmProvider = config.llm?.provider;
52
+ const santiProvider = config.santi?.provider;
53
+ // Check LLM provider API key
54
+ if (llmProvider && llmProvider !== 'ollama') {
55
+ const hasLlmApiKey = config.llm?.api_key ||
56
+ (llmProvider === 'openai' && process.env.OPENAI_API_KEY) ||
57
+ (llmProvider === 'anthropic' && process.env.ANTHROPIC_API_KEY) ||
58
+ (llmProvider === 'gemini' && process.env.GOOGLE_API_KEY) ||
59
+ (llmProvider === 'openrouter' && process.env.OPENROUTER_API_KEY);
60
+ if (hasLlmApiKey) {
61
+ console.log(chalk.green('✓') + ` LLM API key available for ${llmProvider}`);
62
+ }
63
+ else {
64
+ console.log(chalk.red('✗') + ` LLM API key missing for ${llmProvider}. Either set in config or define environment variable.`);
65
+ allPassed = false;
66
+ }
67
+ }
68
+ // Check Santi provider API key
69
+ if (santiProvider && santiProvider !== 'ollama') {
70
+ const hasSantiApiKey = config.santi?.api_key ||
71
+ (santiProvider === 'openai' && process.env.OPENAI_API_KEY) ||
72
+ (santiProvider === 'anthropic' && process.env.ANTHROPIC_API_KEY) ||
73
+ (santiProvider === 'gemini' && process.env.GOOGLE_API_KEY) ||
74
+ (santiProvider === 'openrouter' && process.env.OPENROUTER_API_KEY);
75
+ if (hasSantiApiKey) {
76
+ console.log(chalk.green('✓') + ` Santi API key available for ${santiProvider}`);
77
+ }
78
+ else {
79
+ console.log(chalk.red('✗') + ` Santi API key missing for ${santiProvider}. Either set in config or define environment variable.`);
80
+ allPassed = false;
81
+ }
82
+ }
83
+ // Check audio API key if enabled
84
+ if (config.audio?.enabled && config.llm?.provider !== 'gemini') {
85
+ const hasAudioApiKey = config.audio?.apiKey || process.env.GOOGLE_API_KEY;
86
+ if (hasAudioApiKey) {
87
+ console.log(chalk.green('✓') + ' Audio API key available for transcription');
88
+ }
89
+ else {
90
+ console.log(chalk.red('✗') + ' Audio API key missing. Either set in config or define GOOGLE_API_KEY environment variable.');
91
+ allPassed = false;
92
+ }
93
+ }
94
+ // Check Telegram token if enabled
95
+ if (config.channels?.telegram?.enabled) {
96
+ const hasTelegramToken = config.channels.telegram?.token || process.env.TELEGRAM_BOT_TOKEN;
97
+ if (hasTelegramToken) {
98
+ console.log(chalk.green('✓') + ' Telegram bot token available');
99
+ }
100
+ else {
101
+ console.log(chalk.red('✗') + ' Telegram bot token missing. Either set in config or define TELEGRAM_BOT_TOKEN environment variable.');
102
+ allPassed = false;
103
+ }
104
+ }
105
+ // Check if default password is being used for dashboard
106
+ if (!process.env.THE_ARCHITECT_PASS) {
107
+ console.log(chalk.yellow('⚠') + ' Using default password for dashboard (iamthearchitect). For security, set THE_ARCHITECT_PASS environment variable.');
108
+ }
109
+ else {
110
+ console.log(chalk.green('✓') + ' Custom dashboard password set');
111
+ }
50
112
  }
51
113
  else {
52
114
  console.log(chalk.yellow('!') + ' Configuration: Missing (will be created on start)');
@@ -30,6 +30,7 @@ export const initCommand = new Command('init')
30
30
  choices: [
31
31
  { name: 'OpenAI', value: 'openai' },
32
32
  { name: 'Anthropic', value: 'anthropic' },
33
+ { name: 'OpenRouter', value: 'openrouter' },
33
34
  { name: 'Ollama', value: 'ollama' },
34
35
  { name: 'Google Gemini', value: 'gemini' },
35
36
  ],
@@ -43,6 +44,9 @@ export const initCommand = new Command('init')
43
44
  case 'anthropic':
44
45
  defaultModel = 'claude-3-5-sonnet-20240620';
45
46
  break;
47
+ case 'openrouter':
48
+ defaultModel = 'openrouter/auto';
49
+ break;
46
50
  case 'ollama':
47
51
  defaultModel = 'llama3';
48
52
  break;
@@ -59,10 +63,23 @@ export const initCommand = new Command('init')
59
63
  });
60
64
  let apiKey;
61
65
  const hasExistingKey = !!currentConfig.llm.api_key;
62
- const apiKeyMessage = hasExistingKey
66
+ let apiKeyMessage = hasExistingKey
63
67
  ? 'Enter API Key (leave empty to preserve existing, or if using env vars):'
64
68
  : 'Enter API Key (leave empty if using env vars):';
65
- if (provider !== 'ollama') {
69
+ // Add info about environment variables to the message
70
+ if (provider === 'openai') {
71
+ apiKeyMessage = `${apiKeyMessage} (Env var: OPENAI_API_KEY)`;
72
+ }
73
+ else if (provider === 'anthropic') {
74
+ apiKeyMessage = `${apiKeyMessage} (Env var: ANTHROPIC_API_KEY)`;
75
+ }
76
+ else if (provider === 'gemini') {
77
+ apiKeyMessage = `${apiKeyMessage} (Env var: GOOGLE_API_KEY)`;
78
+ }
79
+ else if (provider === 'openrouter') {
80
+ apiKeyMessage = `${apiKeyMessage} (Env var: OPENROUTER_API_KEY)`;
81
+ }
82
+ if (provider !== 'ollama' && provider !== 'openrouter') {
66
83
  apiKey = await password({
67
84
  message: apiKeyMessage,
68
85
  });
@@ -75,6 +92,14 @@ export const initCommand = new Command('init')
75
92
  if (apiKey) {
76
93
  await configManager.set('llm.api_key', apiKey);
77
94
  }
95
+ // Base URL Configuration for OpenRouter
96
+ if (provider === 'openrouter') {
97
+ const baseUrl = await input({
98
+ message: 'Enter OpenRouter Base URL:',
99
+ default: currentConfig.llm.base_url || 'https://openrouter.ai/api/v1',
100
+ });
101
+ await configManager.set('llm.base_url', baseUrl);
102
+ }
78
103
  // Context Window Configuration
79
104
  const contextWindow = await input({
80
105
  message: 'Context Window Size (number of messages to send to LLM):',
@@ -105,6 +130,7 @@ export const initCommand = new Command('init')
105
130
  choices: [
106
131
  { name: 'OpenAI', value: 'openai' },
107
132
  { name: 'Anthropic', value: 'anthropic' },
133
+ { name: 'OpenRouter', value: 'openrouter' },
108
134
  { name: 'Ollama', value: 'ollama' },
109
135
  { name: 'Google Gemini', value: 'gemini' },
110
136
  ],
@@ -118,6 +144,9 @@ export const initCommand = new Command('init')
118
144
  case 'anthropic':
119
145
  defaultSatiModel = 'claude-3-5-sonnet-20240620';
120
146
  break;
147
+ case 'openrouter':
148
+ defaultSatiModel = 'openrouter/auto';
149
+ break;
121
150
  case 'ollama':
122
151
  defaultSatiModel = 'llama3';
123
152
  break;
@@ -133,9 +162,22 @@ export const initCommand = new Command('init')
133
162
  default: defaultSatiModel,
134
163
  });
135
164
  const hasExistingSatiKey = !!currentConfig.santi?.api_key;
136
- const santiKeyMsg = hasExistingSatiKey
137
- ? 'Enter Sati API Key (leave empty to preserve existing):'
138
- : 'Enter Sati API Key:';
165
+ let santiKeyMsg = hasExistingSatiKey
166
+ ? 'Enter Sati API Key (leave empty to preserve existing, or if using env vars):'
167
+ : 'Enter Sati API Key (leave empty if using env vars):';
168
+ // Add info about environment variables to the message
169
+ if (santiProvider === 'openai') {
170
+ santiKeyMsg = `${santiKeyMsg} (Env var: OPENAI_API_KEY)`;
171
+ }
172
+ else if (santiProvider === 'anthropic') {
173
+ santiKeyMsg = `${santiKeyMsg} (Env var: ANTHROPIC_API_KEY)`;
174
+ }
175
+ else if (santiProvider === 'gemini') {
176
+ santiKeyMsg = `${santiKeyMsg} (Env var: GOOGLE_API_KEY)`;
177
+ }
178
+ else if (santiProvider === 'openrouter') {
179
+ santiKeyMsg = `${santiKeyMsg} (Env var: OPENROUTER_API_KEY)`;
180
+ }
139
181
  const keyInput = await password({ message: santiKeyMsg });
140
182
  if (keyInput) {
141
183
  santiApiKey = keyInput;
@@ -146,6 +188,14 @@ export const initCommand = new Command('init')
146
188
  else {
147
189
  santiApiKey = undefined; // Ensure we don't accidentally carry over invalid state
148
190
  }
191
+ // Base URL Configuration for Sati OpenRouter
192
+ if (santiProvider === 'openrouter') {
193
+ const satiBaseUrl = await input({
194
+ message: 'Enter Sati OpenRouter Base URL:',
195
+ default: currentConfig.santi?.base_url || 'https://openrouter.ai/api/v1',
196
+ });
197
+ await configManager.set('santi.base_url', satiBaseUrl);
198
+ }
149
199
  }
150
200
  const memoryLimit = await input({
151
201
  message: 'Sati Memory Retrieval Limit (messages):',
@@ -171,9 +221,11 @@ export const initCommand = new Command('init')
171
221
  }
172
222
  else {
173
223
  const hasExistingAudioKey = !!currentConfig.audio?.apiKey;
174
- const audioKeyMessage = hasExistingAudioKey
175
- ? 'Enter Gemini API Key for Audio (leave empty to preserve existing):'
176
- : 'Enter Gemini API Key for Audio:';
224
+ let audioKeyMessage = hasExistingAudioKey
225
+ ? 'Enter Gemini API Key for Audio (leave empty to preserve existing, or if using env vars):'
226
+ : 'Enter Gemini API Key for Audio (leave empty if using env vars):';
227
+ // Add info about environment variables to the message
228
+ audioKeyMessage = `${audioKeyMessage} (Env var: GOOGLE_API_KEY)`;
177
229
  audioKey = await password({
178
230
  message: audioKeyMessage,
179
231
  });
@@ -210,10 +262,13 @@ export const initCommand = new Command('init')
210
262
  display.log(chalk.gray('1. Create a bot via @BotFather to get your token.'));
211
263
  display.log(chalk.gray('2. Get your User ID via @userinfobot.\n'));
212
264
  const hasExistingToken = !!currentConfig.channels.telegram?.token;
265
+ let telegramTokenMessage = hasExistingToken
266
+ ? 'Enter Telegram Bot Token (leave empty to preserve existing, or if using env vars):'
267
+ : 'Enter Telegram Bot Token (leave empty if using env vars):';
268
+ // Add info about environment variables to the message
269
+ telegramTokenMessage = `${telegramTokenMessage} (Env var: TELEGRAM_BOT_TOKEN)`;
213
270
  const token = await password({
214
- message: hasExistingToken
215
- ? 'Enter Telegram Bot Token (leave empty to preserve existing):'
216
- : 'Enter Telegram Bot Token:',
271
+ message: telegramTokenMessage,
217
272
  validate: (value) => {
218
273
  if (value.length > 0)
219
274
  return true;
@@ -5,6 +5,7 @@ import { PATHS } from './paths.js';
5
5
  import { setByPath } from './utils.js';
6
6
  import { ConfigSchema } from './schemas.js';
7
7
  import { migrateConfigFile } from '../runtime/migration.js';
8
+ import { resolveApiKey, resolveModel, resolveNumeric, resolveString, resolveBoolean, resolveProvider, resolveStringArray } from './precedence.js';
8
9
  export class ConfigManager {
9
10
  static instance;
10
11
  config = DEFAULT_CONFIG;
@@ -18,16 +19,15 @@ export class ConfigManager {
18
19
  async load() {
19
20
  try {
20
21
  await migrateConfigFile();
22
+ let rawConfig = DEFAULT_CONFIG;
21
23
  if (await fs.pathExists(PATHS.config)) {
22
24
  const raw = await fs.readFile(PATHS.config, 'utf8');
23
25
  const parsed = yaml.load(raw);
24
26
  // Validate and merge with defaults via Zod
25
- this.config = ConfigSchema.parse(parsed);
26
- }
27
- else {
28
- // File doesn't exist, use defaults
29
- this.config = DEFAULT_CONFIG;
27
+ rawConfig = ConfigSchema.parse(parsed);
30
28
  }
29
+ // Apply environment variable precedence to the loaded config
30
+ this.config = this.applyEnvironmentVariablePrecedence(rawConfig);
31
31
  }
32
32
  catch (error) {
33
33
  console.error('Failed to load configuration:', error);
@@ -36,6 +36,85 @@ export class ConfigManager {
36
36
  }
37
37
  return this.config;
38
38
  }
39
+ applyEnvironmentVariablePrecedence(config) {
40
+ // Apply precedence to agent config
41
+ const agentConfig = {
42
+ name: resolveString('MORPHEUS_AGENT_NAME', config.agent.name, DEFAULT_CONFIG.agent.name),
43
+ personality: resolveString('MORPHEUS_AGENT_PERSONALITY', config.agent.personality, DEFAULT_CONFIG.agent.personality)
44
+ };
45
+ // Apply precedence to LLM config
46
+ const llmProvider = resolveProvider('MORPHEUS_LLM_PROVIDER', config.llm.provider, DEFAULT_CONFIG.llm.provider);
47
+ const llmConfig = {
48
+ provider: llmProvider,
49
+ model: resolveModel(llmProvider, 'MORPHEUS_LLM_MODEL', config.llm.model),
50
+ temperature: resolveNumeric('MORPHEUS_LLM_TEMPERATURE', config.llm.temperature, DEFAULT_CONFIG.llm.temperature),
51
+ max_tokens: config.llm.max_tokens !== undefined ? resolveNumeric('MORPHEUS_LLM_MAX_TOKENS', config.llm.max_tokens, config.llm.max_tokens) : undefined,
52
+ api_key: resolveApiKey(llmProvider, 'MORPHEUS_LLM_API_KEY', config.llm.api_key),
53
+ base_url: config.llm.base_url, // base_url doesn't have environment variable precedence for now
54
+ context_window: config.llm.context_window !== undefined ? resolveNumeric('MORPHEUS_LLM_CONTEXT_WINDOW', config.llm.context_window, DEFAULT_CONFIG.llm.context_window) : undefined
55
+ };
56
+ // Apply precedence to Sati config
57
+ let santiConfig;
58
+ if (config.santi) {
59
+ const santiProvider = resolveProvider('MORPHEUS_SANTI_PROVIDER', config.santi.provider, llmConfig.provider);
60
+ santiConfig = {
61
+ provider: santiProvider,
62
+ model: resolveModel(santiProvider, 'MORPHEUS_SANTI_MODEL', config.santi.model || llmConfig.model),
63
+ temperature: resolveNumeric('MORPHEUS_SANTI_TEMPERATURE', config.santi.temperature, llmConfig.temperature),
64
+ max_tokens: config.santi.max_tokens !== undefined ? resolveNumeric('MORPHEUS_SANTI_MAX_TOKENS', config.santi.max_tokens, config.santi.max_tokens) : llmConfig.max_tokens,
65
+ api_key: resolveApiKey(santiProvider, 'MORPHEUS_SANTI_API_KEY', config.santi.api_key || llmConfig.api_key),
66
+ base_url: config.santi.base_url || config.llm.base_url,
67
+ context_window: config.santi.context_window !== undefined ? resolveNumeric('MORPHEUS_SANTI_CONTEXT_WINDOW', config.santi.context_window, config.santi.context_window) : llmConfig.context_window,
68
+ memory_limit: config.santi.memory_limit !== undefined ? resolveNumeric('MORPHEUS_SANTI_MEMORY_LIMIT', config.santi.memory_limit, config.santi.memory_limit) : undefined
69
+ };
70
+ }
71
+ // Apply precedence to audio config
72
+ const audioConfig = {
73
+ provider: config.audio.provider, // Audio provider is fixed as 'google'
74
+ model: resolveString('MORPHEUS_AUDIO_MODEL', config.audio.model, DEFAULT_CONFIG.audio.model),
75
+ enabled: resolveBoolean('MORPHEUS_AUDIO_ENABLED', config.audio.enabled, DEFAULT_CONFIG.audio.enabled),
76
+ apiKey: resolveApiKey('gemini', 'MORPHEUS_AUDIO_API_KEY', config.audio.apiKey),
77
+ maxDurationSeconds: resolveNumeric('MORPHEUS_AUDIO_MAX_DURATION', config.audio.maxDurationSeconds, DEFAULT_CONFIG.audio.maxDurationSeconds),
78
+ supportedMimeTypes: config.audio.supportedMimeTypes
79
+ };
80
+ // Apply precedence to channel configs
81
+ const channelsConfig = {
82
+ telegram: {
83
+ enabled: resolveBoolean('MORPHEUS_TELEGRAM_ENABLED', config.channels.telegram.enabled, DEFAULT_CONFIG.channels.telegram.enabled),
84
+ token: resolveString('MORPHEUS_TELEGRAM_TOKEN', config.channels.telegram.token, config.channels.telegram.token || ''),
85
+ allowedUsers: resolveStringArray('MORPHEUS_TELEGRAM_ALLOWED_USERS', config.channels.telegram.allowedUsers, DEFAULT_CONFIG.channels.telegram.allowedUsers)
86
+ },
87
+ discord: {
88
+ enabled: config.channels.discord.enabled, // Discord doesn't have env var precedence for now
89
+ token: config.channels.discord.token
90
+ }
91
+ };
92
+ // Apply precedence to UI config
93
+ const uiConfig = {
94
+ enabled: resolveBoolean('MORPHEUS_UI_ENABLED', config.ui.enabled, DEFAULT_CONFIG.ui.enabled),
95
+ port: resolveNumeric('MORPHEUS_UI_PORT', config.ui.port, DEFAULT_CONFIG.ui.port)
96
+ };
97
+ // Apply precedence to logging config
98
+ const loggingConfig = {
99
+ enabled: resolveBoolean('MORPHEUS_LOGGING_ENABLED', config.logging.enabled, DEFAULT_CONFIG.logging.enabled),
100
+ level: resolveString('MORPHEUS_LOGGING_LEVEL', config.logging.level, DEFAULT_CONFIG.logging.level),
101
+ retention: resolveString('MORPHEUS_LOGGING_RETENTION', config.logging.retention, DEFAULT_CONFIG.logging.retention)
102
+ };
103
+ // Memory config (deprecated, but keeping for backward compatibility)
104
+ const memoryConfig = {
105
+ limit: config.memory.limit // Not applying env var precedence to deprecated field
106
+ };
107
+ return {
108
+ agent: agentConfig,
109
+ llm: llmConfig,
110
+ santi: santiConfig,
111
+ audio: audioConfig,
112
+ channels: channelsConfig,
113
+ ui: uiConfig,
114
+ logging: loggingConfig,
115
+ memory: memoryConfig
116
+ };
117
+ }
39
118
  get() {
40
119
  return this.config;
41
120
  }
@@ -0,0 +1,138 @@
1
+ /**
2
+ * Functions to resolve configuration values with precedence:
3
+ * 1. Provider-specific environment variable (e.g., OPENAI_API_KEY)
4
+ * 2. Generic environment variable (e.g., MORPHEUS_LLM_API_KEY)
5
+ * 3. Configuration file value
6
+ * 4. Default value
7
+ */
8
+ /**
9
+ * Resolve an API key with provider-specific precedence
10
+ * @param provider The current provider
11
+ * @param genericEnvVar The generic environment variable name
12
+ * @param configFileValue The value from the config file
13
+ * @returns The resolved API key value
14
+ */
15
+ export function resolveApiKey(provider, genericEnvVar, configFileValue) {
16
+ // Map provider to its specific environment variable
17
+ const providerSpecificVars = {
18
+ 'openai': 'OPENAI_API_KEY',
19
+ 'anthropic': 'ANTHROPIC_API_KEY',
20
+ 'openrouter': 'OPENROUTER_API_KEY',
21
+ 'ollama': '', // Ollama typically doesn't need an API key
22
+ 'gemini': 'GOOGLE_API_KEY'
23
+ };
24
+ const providerSpecificVar = providerSpecificVars[provider];
25
+ // Check provider-specific variable first, then generic, then config file
26
+ if (providerSpecificVar && process.env[providerSpecificVar]) {
27
+ return process.env[providerSpecificVar];
28
+ }
29
+ if (process.env[genericEnvVar]) {
30
+ return process.env[genericEnvVar];
31
+ }
32
+ return configFileValue;
33
+ }
34
+ /**
35
+ * Resolve a model name with provider-specific precedence
36
+ * @param provider The current provider
37
+ * @param genericEnvVar The generic environment variable name
38
+ * @param configFileValue The value from the config file
39
+ * @returns The resolved model name value
40
+ */
41
+ export function resolveModel(provider, genericEnvVar, configFileValue) {
42
+ // For now, we don't have provider-specific model variables, but we could add them later
43
+ // Check generic variable first, then config file
44
+ if (process.env[genericEnvVar]) {
45
+ return process.env[genericEnvVar];
46
+ }
47
+ return configFileValue;
48
+ }
49
+ /**
50
+ * Resolve a numeric configuration value
51
+ * @param genericEnvVar The generic environment variable name
52
+ * @param configFileValue The value from the config file
53
+ * @param defaultValue The default value to use if none is found
54
+ * @returns The resolved numeric value
55
+ */
56
+ export function resolveNumeric(genericEnvVar, configFileValue, defaultValue) {
57
+ if (process.env[genericEnvVar] !== undefined && process.env[genericEnvVar] !== '') {
58
+ const envValue = Number(process.env[genericEnvVar]);
59
+ if (!isNaN(envValue)) {
60
+ return envValue;
61
+ }
62
+ }
63
+ if (configFileValue !== undefined) {
64
+ return configFileValue;
65
+ }
66
+ return defaultValue;
67
+ }
68
+ /**
69
+ * Resolve a string configuration value
70
+ * @param genericEnvVar The generic environment variable name
71
+ * @param configFileValue The value from the config file
72
+ * @param defaultValue The default value to use if none is found
73
+ * @returns The resolved string value
74
+ */
75
+ export function resolveString(genericEnvVar, configFileValue, defaultValue) {
76
+ if (process.env[genericEnvVar]) {
77
+ return process.env[genericEnvVar];
78
+ }
79
+ if (configFileValue !== undefined) {
80
+ return configFileValue;
81
+ }
82
+ return defaultValue;
83
+ }
84
+ /**
85
+ * Resolve a boolean configuration value
86
+ * @param genericEnvVar The generic environment variable name
87
+ * @param configFileValue The value from the config file
88
+ * @param defaultValue The default value to use if none is found
89
+ * @returns The resolved boolean value
90
+ */
91
+ export function resolveBoolean(genericEnvVar, configFileValue, defaultValue) {
92
+ if (process.env[genericEnvVar] !== undefined) {
93
+ const envValue = process.env[genericEnvVar]?.toLowerCase();
94
+ if (envValue === 'true' || envValue === '1') {
95
+ return true;
96
+ }
97
+ else if (envValue === 'false' || envValue === '0') {
98
+ return false;
99
+ }
100
+ }
101
+ if (configFileValue !== undefined) {
102
+ return configFileValue;
103
+ }
104
+ return defaultValue;
105
+ }
106
+ /**
107
+ * Resolve an array of string configuration value
108
+ * @param genericEnvVar The generic environment variable name
109
+ * @param configFileValue The value from the config file
110
+ * @param defaultValue The default value to use if none is found
111
+ * @returns The resolved array of strings value
112
+ */
113
+ export function resolveStringArray(genericEnvVar, configFileValue, defaultValue) {
114
+ if (process.env[genericEnvVar]) {
115
+ // Split the environment variable by commas and trim whitespace
116
+ return process.env[genericEnvVar].split(',').map(item => item.trim()).filter(item => item.length > 0);
117
+ }
118
+ if (configFileValue !== undefined) {
119
+ return configFileValue;
120
+ }
121
+ return defaultValue;
122
+ }
123
+ /**
124
+ * Resolve a provider configuration value
125
+ * @param genericEnvVar The generic environment variable name
126
+ * @param configFileValue The value from the config file
127
+ * @param defaultValue The default value to use if none is found
128
+ * @returns The resolved provider value
129
+ */
130
+ export function resolveProvider(genericEnvVar, configFileValue, defaultValue) {
131
+ if (process.env[genericEnvVar]) {
132
+ return process.env[genericEnvVar];
133
+ }
134
+ if (configFileValue !== undefined) {
135
+ return configFileValue;
136
+ }
137
+ return defaultValue;
138
+ }
@@ -2,17 +2,19 @@ import { z } from 'zod';
2
2
  import { DEFAULT_CONFIG } from '../types/config.js';
3
3
  export const AudioConfigSchema = z.object({
4
4
  provider: z.enum(['google']).default(DEFAULT_CONFIG.audio.provider),
5
+ model: z.string().min(1).default(DEFAULT_CONFIG.audio.model),
5
6
  enabled: z.boolean().default(DEFAULT_CONFIG.audio.enabled),
6
7
  apiKey: z.string().optional(),
7
8
  maxDurationSeconds: z.number().default(DEFAULT_CONFIG.audio.maxDurationSeconds),
8
9
  supportedMimeTypes: z.array(z.string()).default(DEFAULT_CONFIG.audio.supportedMimeTypes),
9
10
  });
10
11
  export const LLMConfigSchema = z.object({
11
- provider: z.enum(['openai', 'anthropic', 'ollama', 'gemini']).default(DEFAULT_CONFIG.llm.provider),
12
+ provider: z.enum(['openai', 'anthropic', 'openrouter', 'ollama', 'gemini']).default(DEFAULT_CONFIG.llm.provider),
12
13
  model: z.string().min(1).default(DEFAULT_CONFIG.llm.model),
13
14
  temperature: z.number().min(0).max(1).default(DEFAULT_CONFIG.llm.temperature),
14
15
  max_tokens: z.number().int().positive().optional(),
15
16
  api_key: z.string().optional(),
17
+ base_url: z.string().optional(),
16
18
  context_window: z.number().int().positive().optional(),
17
19
  });
18
20
  export const SatiConfigSchema = LLMConfigSchema.extend({
@@ -2,13 +2,15 @@ import { AUTH_HEADER } from '../../types/auth.js';
2
2
  import { DisplayManager } from '../../runtime/display.js';
3
3
  /**
4
4
  * Middleware to protect API routes with a password from THE_ARCHITECT_PASS env var.
5
- * If the env var is not set, authentication is skipped (open mode).
5
+ * If the env var is not set, uses default password 'iamthearchitect'.
6
6
  */
7
7
  export const authMiddleware = (req, res, next) => {
8
- const architectPass = process.env.THE_ARCHITECT_PASS;
9
- // If password is not configured, allow all requests
10
- if (!architectPass || architectPass.trim() === '') {
11
- return next();
8
+ // Use environment variable if set, otherwise use default password
9
+ const architectPass = process.env.THE_ARCHITECT_PASS || 'iamthearchitect';
10
+ // If password is not configured (using default), log a warning
11
+ if (!process.env.THE_ARCHITECT_PASS) {
12
+ const display = DisplayManager.getInstance();
13
+ display.log('Using default password for dashboard access. For security, set THE_ARCHITECT_PASS environment variable.', { source: 'http', level: 'warning' });
12
14
  }
13
15
  const providedPass = req.headers[AUTH_HEADER];
14
16
  if (providedPass === architectPass) {
@@ -20,8 +20,29 @@ export class HttpServer {
20
20
  setupMiddleware() {
21
21
  this.app.use(cors());
22
22
  this.app.use(bodyParser.json());
23
+ // Adicionar cabeçalhos para evitar indexação por motores de busca
24
+ this.app.use((req, res, next) => {
25
+ res.setHeader('X-Robots-Tag', 'noindex, nofollow');
26
+ next();
27
+ });
23
28
  }
24
29
  setupRoutes() {
30
+ // Rota de health check pública (sem autenticação)
31
+ this.app.get('/health', (req, res) => {
32
+ res.status(200).json({
33
+ status: 'healthy',
34
+ timestamp: new Date().toISOString(),
35
+ uptime: process.uptime()
36
+ });
37
+ });
38
+ // Rota de health check para o Docker (padrão)
39
+ this.app.get('/api/health', (req, res) => {
40
+ res.status(200).json({
41
+ status: 'healthy',
42
+ timestamp: new Date().toISOString(),
43
+ uptime: process.uptime()
44
+ });
45
+ });
25
46
  this.app.use('/api', authMiddleware, createApiRouter());
26
47
  // Serve static frontend from compiled output
27
48
  const uiPath = path.resolve(__dirname, '../ui');
@@ -13,6 +13,7 @@ const mockConfig = {
13
13
  logging: { enabled: false, level: 'info', retention: '1d' },
14
14
  audio: {
15
15
  provider: 'google',
16
+ model: 'gemini-2.5-flash-lite',
16
17
  enabled: false,
17
18
  maxDurationSeconds: 60,
18
19
  supportedMimeTypes: ['audio/ogg']
@@ -49,7 +49,7 @@ export class Oracle {
49
49
  throw new ProviderError(this.config.llm.provider || 'unknown', err, "Oracle initialization failed");
50
50
  }
51
51
  }
52
- async chat(message, extraUsage) {
52
+ async chat(message, extraUsage, isTelephonist) {
53
53
  if (!this.provider) {
54
54
  throw new Error("Oracle not initialized. Call initialize() first.");
55
55
  }
@@ -61,8 +61,8 @@ export class Oracle {
61
61
  const userMessage = new HumanMessage(message);
62
62
  // Inject provider/model metadata for persistence
63
63
  userMessage.provider_metadata = {
64
- provider: this.config.llm.provider,
65
- model: this.config.llm.model
64
+ provider: isTelephonist ? this.config.audio?.provider : this.config.llm.provider,
65
+ model: isTelephonist ? this.config.audio?.model : this.config.llm.model
66
66
  };
67
67
  // Attach extra usage (e.g. from Audio) to the user message to be persisted
68
68
  if (extraUsage) {
@@ -38,14 +38,24 @@ export class ProviderFactory {
38
38
  model = new ChatOpenAI({
39
39
  modelName: config.model,
40
40
  temperature: config.temperature,
41
- apiKey: config.api_key, // LangChain will also check process.env.OPENAI_API_KEY
41
+ apiKey: process.env.OPENAI_API_KEY || config.api_key, // Check env var first, then config
42
42
  });
43
43
  break;
44
44
  case 'anthropic':
45
45
  model = new ChatAnthropic({
46
46
  modelName: config.model,
47
47
  temperature: config.temperature,
48
- apiKey: config.api_key,
48
+ apiKey: process.env.ANTHROPIC_API_KEY || config.api_key, // Check env var first, then config
49
+ });
50
+ break;
51
+ case 'openrouter':
52
+ model = new ChatOpenAI({
53
+ modelName: config.model,
54
+ temperature: config.temperature,
55
+ apiKey: process.env.OPENROUTER_API_KEY || config.api_key, // Check env var first, then config
56
+ configuration: {
57
+ baseURL: config.base_url || 'https://openrouter.ai/api/v1'
58
+ }
49
59
  });
50
60
  break;
51
61
  case 'ollama':
@@ -60,7 +70,7 @@ export class ProviderFactory {
60
70
  model = new ChatGoogleGenerativeAI({
61
71
  model: config.model,
62
72
  temperature: config.temperature,
63
- apiKey: config.api_key
73
+ apiKey: process.env.GOOGLE_API_KEY || config.api_key // Check env var first, then config
64
74
  });
65
75
  break;
66
76
  default:
@@ -99,7 +109,7 @@ export class ProviderFactory {
99
109
  suggestion = `Model '${config.model}' may not be available. Check provider docs.`;
100
110
  }
101
111
  else if (msg.includes("unsupported provider")) {
102
- suggestion = "Edit your config file to use a supported provider (openai, anthropic, ollama, gemini).";
112
+ suggestion = "Edit your config file to use a supported provider (openai, anthropic, openrouter, ollama, gemini).";
103
113
  }
104
114
  throw new ProviderError(config.provider, error, suggestion);
105
115
  }