@satori-sh/cli 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +2 -12
  2. package/dist/index.js +73 -125
  3. package/package.json +2 -1
package/README.md CHANGED
@@ -21,7 +21,6 @@ npm install -g @satori-sh/cli
21
21
  Start memory-augmented chat sessions:
22
22
  ```bash
23
23
  satori "What's the best pizza topping?"
24
- satori "hello" --provider openai --model gpt-4o
25
24
  ```
26
25
 
27
26
  ### Search Memories
@@ -39,10 +38,7 @@ satori add "I like pizza"
39
38
  ```
40
39
 
41
40
  **Options & Memory:**
42
- - `--provider <openai|anthropic>` (default: openai)
43
- - `--model <model>` (default: gpt-4o)
44
41
  - `--memory-id <id>` (scopes conversations)
45
- - `--no-stream` (disables streaming)
46
42
 
47
43
  **Memory Sessions:** If no `--memory-id` is provided, a random ID is generated. Set `SATORI_MEMORY_ID=generated-id` to continue sessions:
48
44
  ```bash
@@ -55,22 +51,16 @@ satori chat "Follow up question"
55
51
  **Required:**
56
52
  - `SATORI_API_KEY` - Satori authentication key
57
53
 
58
- **For Chat:**
59
- - `OPENAI_API_KEY` / `ANTHROPIC_API_KEY` - LLM provider keys
60
-
61
54
  **Optional:**
62
55
  - `SATORI_BASE_URL` (default: http://localhost:8000)
63
- - `SATORI_PROVIDER` (default: openai)
64
- - `SATORI_MODEL` (default: gpt-4o)
65
56
  - `SATORI_MEMORY_ID` - Session scoping
66
57
  - `SATORI_MOCK` - Enable mock mode
67
58
 
68
59
  ## Troubleshooting
69
60
 
70
- - **API Key Errors**: Ensure provider keys are set (`OPENAI_API_KEY` or `ANTHROPIC_API_KEY`)
71
- - **Memory Server Down**: Chat falls back to raw LLM responses with warning logs
61
+ - **API Key Errors**: Ensure `SATORI_API_KEY` is set
62
+ - **Memory Server Down**: Chat requests to `/ask` will fail
72
63
  - **Memory ID Issues**: Invalid IDs scope searches but don't break functionality
73
- - **Streaming Problems**: Use `--no-stream` for terminal compatibility
74
64
 
75
65
  ## Contributing
76
66
 
package/dist/index.js CHANGED
@@ -35,6 +35,13 @@ async function saveApiKey(apiKey) {
35
35
  throw new Error(`Failed to save API key: ${error instanceof Error ? error.message : error}`);
36
36
  }
37
37
  }
38
+ async function getStoredApiKey() {
39
+ const data = await loadConfigFile();
40
+ if (typeof data.api_key === "string") {
41
+ return data.api_key;
42
+ }
43
+ return null;
44
+ }
38
45
  async function saveMemoryId(memoryId) {
39
46
  const { promises: fs } = await import("fs");
40
47
  await checkWriteAccess();
@@ -104,14 +111,10 @@ async function getConfig() {
104
111
  throw new Error(`Failed to generate API key: ${error instanceof Error ? error.message : error}`);
105
112
  }
106
113
  }
107
- const provider = process.env.SATORI_PROVIDER || "openai";
108
- const model = process.env.SATORI_MODEL || "gpt-4o";
109
- const openaiKey = process.env.OPENAI_API_KEY;
110
- const anthropicKey = process.env.ANTHROPIC_API_KEY;
111
114
  if (!memoryId) {
112
115
  memoryId = process.env.SATORI_MEMORY_ID;
113
116
  }
114
- return { apiKey, baseUrl, provider, model, openaiKey, anthropicKey, memoryId };
117
+ return { apiKey, baseUrl, memoryId };
115
118
  }
116
119
 
117
120
  // src/search.ts
@@ -168,7 +171,7 @@ async function addMemories(text, options = {}) {
168
171
 
169
172
  // src/memory.ts
170
173
  import { generate } from "random-words";
171
- async function buildMemoryContext(prompt, options = {}) {
174
+ async function resolveMemoryId(options = {}) {
172
175
  const config = await getConfig();
173
176
  let memoryId;
174
177
  let generated = false;
@@ -183,37 +186,13 @@ async function buildMemoryContext(prompt, options = {}) {
183
186
  memoryId = words.join("-");
184
187
  generated = true;
185
188
  }
186
- const topK = options.topK || 5;
187
- const url = `${config.baseUrl}/search`;
188
- const headers = {
189
- "Content-Type": "application/json",
190
- "Authorization": `Bearer ${config.apiKey}`
191
- };
192
- const body = JSON.stringify({
193
- query: prompt,
194
- memory_id: memoryId,
195
- top_k: topK
196
- });
197
- const response = await fetch(url, {
198
- method: "POST",
199
- headers,
200
- body
201
- });
202
- if (!response.ok) {
203
- throw new Error(`HTTP error: ${response.status} ${response.statusText}`);
204
- }
205
- const data = await response.json();
206
189
  const instruction = generated ? `Memory session id: ${memoryId}.` : void 0;
207
190
  if (generated) {
208
191
  saveMemoryId(memoryId).catch((err) => {
209
192
  console.error("Failed to save memory ID:", err);
210
193
  });
211
194
  }
212
- return {
213
- results: data.results,
214
- memoryId,
215
- instruction
216
- };
195
+ return { memoryId, instruction };
217
196
  }
218
197
  function enhanceMessagesWithMemory(messages, memoryContext) {
219
198
  const validResults = memoryContext.results.filter((r) => r.memory && r.memory.trim() !== "" && r.memory !== "undefined");
@@ -229,116 +208,61 @@ ${memoryText}`
229
208
  return [systemMessage, ...messages];
230
209
  }
231
210
 
232
- // src/providers.ts
233
- async function _callOpenAI(messages, options) {
234
- const config = await getConfig();
235
- const apiKey = config.openaiKey;
236
- if (!apiKey) {
237
- throw new Error("Missing API key for OPENAI_API_KEY");
238
- }
239
- const url = "https://api.openai.com/v1/chat/completions";
240
- const body = {
241
- model: config.model,
242
- messages,
243
- temperature: options.temperature ?? 0.7,
244
- max_tokens: options.maxTokens ?? 1e3,
245
- stream: options.stream ?? false
246
- };
247
- const headers = {
248
- "Content-Type": "application/json",
249
- "Authorization": `Bearer ${apiKey}`
250
- };
251
- const response = await fetch(url, {
252
- method: "POST",
253
- headers,
254
- body: JSON.stringify(body)
255
- });
256
- if (!response.ok) {
257
- throw new Error(`OpenAI API error: ${response.status} ${response.statusText}`);
258
- }
259
- const data = await response.json();
260
- return data.choices[0].message.content;
261
- }
262
- async function _callAnthropic(messages, options) {
263
- const config = await getConfig();
264
- const apiKey = config.anthropicKey;
265
- if (!apiKey) {
266
- throw new Error("Missing API key for ANTHROPIC_API_KEY");
267
- }
268
- const url = "https://api.anthropic.com/v1/messages";
269
- const body = {
270
- model: config.model,
271
- messages,
272
- temperature: options.temperature ?? 0.7,
273
- max_tokens: options.maxTokens ?? 1e3,
274
- stream: options.stream ?? false
275
- };
276
- const headers = {
277
- "Content-Type": "application/json",
278
- "Authorization": `Bearer ${apiKey}`,
279
- "anthropic-version": "2023-06-01"
280
- };
281
- const response = await fetch(url, {
282
- method: "POST",
283
- headers,
284
- body: JSON.stringify(body)
285
- });
286
- if (!response.ok) {
287
- throw new Error(`Anthropic API error: ${response.status} ${response.statusText}`);
288
- }
289
- const data = await response.json();
290
- return data.content[0].text;
291
- }
292
- async function _callProviderAPI(messages, options, provider) {
293
- if (provider === "openai") {
294
- return _callOpenAI(messages, options);
295
- } else if (provider === "anthropic") {
296
- return _callAnthropic(messages, options);
297
- } else {
298
- throw new Error(`Unsupported provider: ${provider}`);
299
- }
300
- }
301
-
302
211
  // src/index.ts
303
212
  async function main() {
304
- try {
305
- await getConfig();
306
- } catch (error) {
307
- console.error(error instanceof Error ? error.message : "Configuration error");
308
- process.exit(1);
213
+ const argv = process.argv.slice(2);
214
+ const isGetApiKey = argv[0] === "get" && argv[1] === "apikey";
215
+ if (!isGetApiKey) {
216
+ try {
217
+ await getConfig();
218
+ } catch (error) {
219
+ console.error(error instanceof Error ? error.message : "Configuration error");
220
+ process.exit(1);
221
+ }
309
222
  }
310
223
  const __dirname = dirname(fileURLToPath(import.meta.url));
311
224
  const logoPath = join2(__dirname, "..", "logo.txt");
312
225
  console.log(chalk.cyan(readFileSync(logoPath, "utf8")));
313
226
  const program = new Command();
314
227
  program.name("satori").description("CLI tool for Satori memory server").version("0.0.1");
315
- program.option("--provider <provider>", "Provider to use (openai or anthropic)", "openai").option("--model <model>", "Model to use", "gpt-4o").option("--memory-id <id>", "Memory ID for scoping");
316
- const processUserInput = async (input, options, isInteractive = false) => {
317
- let memoryContext;
318
- try {
319
- memoryContext = await buildMemoryContext(input, { memoryId: options.memoryId });
320
- } catch (memoryError) {
321
- memoryContext = { results: [], memoryId: options.memoryId, instruction: void 0 };
228
+ program.option("--memory-id <id>", "Memory ID for scoping");
229
+ const DEFAULT_LLM_MODEL = "gpt-4o";
230
+ const callAskAPI = async (prompt, memoryId) => {
231
+ const config = await getConfig();
232
+ const response = await fetch(`${config.baseUrl}/ask`, {
233
+ method: "POST",
234
+ headers: {
235
+ "Content-Type": "application/json",
236
+ "Authorization": `Bearer ${config.apiKey}`
237
+ },
238
+ body: JSON.stringify({
239
+ prompt,
240
+ memory_id: memoryId,
241
+ llm_model: DEFAULT_LLM_MODEL
242
+ })
243
+ });
244
+ if (!response.ok) {
245
+ throw new Error(`HTTP error: ${response.status} ${response.statusText}`);
322
246
  }
323
- const userMessage = { role: "user", content: input };
324
- const enhancedMessages = enhanceMessagesWithMemory([userMessage], { results: memoryContext.results });
325
- const response = await _callProviderAPI(enhancedMessages, {
326
- temperature: 0.7,
327
- maxTokens: 1e3
328
- }, options.provider);
247
+ const data = await response.json();
248
+ return data.response;
249
+ };
250
+ const processUserInput = async (input, options, isInteractive = false) => {
251
+ const { memoryId, instruction } = await resolveMemoryId({ memoryId: options.memoryId });
252
+ const response = await callAskAPI(input, memoryId);
329
253
  if (isInteractive) {
330
254
  console.log(`Assistant: ${response}`);
331
255
  } else {
332
256
  console.log(response);
333
257
  }
334
- if (memoryContext.instruction) {
258
+ if (instruction) {
335
259
  console.log(`
336
- ${memoryContext.instruction}`);
260
+ ${instruction}`);
337
261
  }
338
- addMemories(input, { memoryId: memoryContext.memoryId }).catch((err) => {
262
+ addMemories(input, { memoryId }).catch((err) => {
339
263
  console.error("Failed to save memory:", err);
340
264
  });
341
- return { response, instruction: memoryContext.instruction, memoryId: memoryContext.memoryId };
265
+ return { response, instruction, memoryId };
342
266
  };
343
267
  program.argument("[prompt]", "initial prompt for chat session (optional)").action(async (initialPrompt, options) => {
344
268
  try {
@@ -358,8 +282,16 @@ ${memoryContext.instruction}`);
358
282
  input: process.stdin,
359
283
  output: process.stdout
360
284
  });
285
+ const getPrompt = async () => {
286
+ if (!memoryId) {
287
+ const resolved = await resolveMemoryId({ memoryId: options.memoryId });
288
+ memoryId = resolved.memoryId;
289
+ }
290
+ return chalk.cyan(`[${memoryId}] > `);
291
+ };
361
292
  const chatLoop = async () => {
362
- rl.question(chalk.cyan("> "), async (input) => {
293
+ const prompt = await getPrompt();
294
+ rl.question(prompt, async (input) => {
363
295
  if (input.toLowerCase() === "exit" || input.toLowerCase() === "quit") {
364
296
  console.log("Goodbye!");
365
297
  rl.close();
@@ -378,7 +310,9 @@ ${memoryContext.instruction}`);
378
310
  chatLoop();
379
311
  });
380
312
  };
381
- console.log('\nEntering interactive mode. Type "exit" or "quit" to end the session.\n');
313
+ console.log(chalk.magenta("\nSatori is memory for AI. It remembers your sessions forever. Think of it like 'infinite context' for AI.\n"));
314
+ console.log(chalk.cyan("You're in interactive mode. Use interactive mode just like you would use ChatGPT."));
315
+ console.log(chalk.cyan('Type "exit" or "quit" to end the session.\n'));
382
316
  chatLoop();
383
317
  } catch (error) {
384
318
  console.error("Chat error:", error instanceof Error ? error.message : error);
@@ -391,6 +325,20 @@ ${memoryContext.instruction}`);
391
325
  program.command("search").description("search memories").argument("<query>", "search query for memories").action(async (query) => {
392
326
  await searchMemories(query);
393
327
  });
328
+ program.command("get").description("get config value").argument("<key>", "key to get").action(async (key) => {
329
+ if (key !== "apikey") {
330
+ console.error("Unknown key");
331
+ process.exit(1);
332
+ }
333
+ const apiKey = await getStoredApiKey();
334
+ if (!apiKey) {
335
+ console.error("API key not found in config");
336
+ process.exit(1);
337
+ }
338
+ console.log(chalk.magenta(`
339
+ ${apiKey}
340
+ `));
341
+ });
394
342
  program.parse();
395
343
  }
396
344
  var entryPath = process.argv[1] ? realpathSync(process.argv[1]) : "";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@satori-sh/cli",
3
- "version": "0.0.11",
3
+ "version": "0.0.13",
4
4
  "description": "CLI tool for Satori memory server",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -19,6 +19,7 @@
19
19
  "logo.txt"
20
20
  ],
21
21
  "dependencies": {
22
+ "@opentui/core": "^0.1.73",
22
23
  "chalk": "^5.6.2",
23
24
  "commander": "^12.1.0",
24
25
  "random-words": "^2.0.1"