askimo 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,18 +1,27 @@
1
+ <p align="center">
2
+ <img width="400" height="400" alt="Askimo"
3
+ src="https://github.com/user-attachments/assets/cbf2ab5d-5a07-45a2-9109-6a7bc22ea878" />
4
+ </p>
5
+
1
6
  # Askimo
2
7
 
3
- A CLI tool for communicating with AI providers (Perplexity, OpenAI, Anthropic).
8
+ A CLI tool for communicating with AI providers.
9
+
10
+ **Supported providers:** Perplexity · OpenAI · Anthropic
11
+
12
+ ---
4
13
 
5
- ## Installation
14
+ ## 📦 Installation
6
15
 
7
16
  ```bash
8
17
  npm install -g askimo
9
18
  ```
10
19
 
11
- ## Configuration
20
+ ## ⚙️ Configuration
12
21
 
13
22
  Create a config file at `~/.askimo/config`:
14
23
 
15
- ```
24
+ ```bash
16
25
  # API Keys (at least one required)
17
26
  PERPLEXITY_API_KEY=your-perplexity-key
18
27
  OPENAI_API_KEY=your-openai-key
@@ -25,33 +34,34 @@ OPENAI_MODEL=gpt-4o
25
34
  ANTHROPIC_MODEL=claude-sonnet-4-20250514
26
35
  ```
27
36
 
28
- ## Usage
37
+ ---
29
38
 
30
- ### Ask a single question
39
+ ## 🚀 Usage
40
+
41
+ ### Quick question
31
42
 
32
43
  ```bash
33
44
  askimo "What is the capital of France?"
34
45
  ```
35
46
 
36
- The `ask` command is the default, so you can omit it:
47
+ ### Choose a provider
37
48
 
38
- ```bash
39
- askimo ask "What is the capital of France?"
40
- ```
41
-
42
- ### Provider flags
49
+ | Flag | Provider |
50
+ |------|----------|
51
+ | `-p` | Perplexity (default) |
52
+ | `-o` | OpenAI |
53
+ | `-a` | Anthropic |
43
54
 
44
55
  ```bash
45
- askimo "question" -p # Use Perplexity (default)
46
- askimo "question" -o # Use OpenAI
47
- askimo "question" -a # Use Anthropic
56
+ askimo "explain quantum computing" -o # Use OpenAI
57
+ askimo "write a haiku" -a # Use Anthropic
48
58
  ```
49
59
 
50
60
  ### Continue a conversation
51
61
 
52
62
  ```bash
53
- askimo "follow up question" -c 1 # Continue last conversation
54
- askimo "follow up question" -c 2 # Continue second-to-last
63
+ askimo "tell me more" -c 1 # Continue last conversation
64
+ askimo "go deeper" -c 2 # Continue second-to-last
55
65
  ```
56
66
 
57
67
  ### JSON output
@@ -60,36 +70,53 @@ askimo "follow up question" -c 2 # Continue second-to-last
60
70
  askimo "question" --json
61
71
  ```
62
72
 
63
- Returns structured JSON with provider, model, question, response, and sources (for Perplexity).
73
+ ### Pipe content
74
+
75
+ ```bash
76
+ cat code.js | askimo "explain this code"
77
+ echo "hello world" | askimo "translate to French"
78
+ ```
79
+
80
+ ### Read from file
81
+
82
+ ```bash
83
+ askimo -f code.js "what does this do"
84
+ askimo -f error.log "find the bug"
85
+ ```
64
86
 
65
87
  ### Interactive chat
66
88
 
67
89
  ```bash
68
- askimo chat
69
- askimo chat -o # Chat with OpenAI
70
- askimo chat -c 1 # Continue last conversation
90
+ askimo chat # Start new chat
91
+ askimo chat -o # Chat with OpenAI
92
+ askimo chat -c 1 # Continue last conversation
71
93
  ```
72
94
 
73
- Type `exit` or press `Ctrl+C` to quit.
95
+ Type `exit` or `Ctrl+C` to quit.
74
96
 
75
- ### List available models
97
+ ### List models
76
98
 
77
99
  ```bash
78
- askimo models # List all providers
79
- askimo models -p # Perplexity only
80
- askimo models -o # OpenAI only
81
- askimo models -a # Anthropic only
100
+ askimo models # All providers
101
+ askimo models -p # Perplexity only
82
102
  ```
83
103
 
84
- ## Features
104
+ ---
105
+
106
+ ## ✨ Features
85
107
 
86
- - Streaming responses
87
- - Conversation history (saved to `~/.askimo/conversations/`)
88
- - Source citations (Perplexity)
89
- - Multiple AI providers
90
- - Configurable default models
108
+ | Feature | Description |
109
+ |---------|-------------|
110
+ | Streaming | Real-time response output |
111
+ | Piping | Pipe content via stdin |
112
+ | File input | Read content from files with `-f` |
113
+ | Citations | Source links with Perplexity |
114
+ | History | Conversations saved to `~/.askimo/conversations/` |
115
+ | Multi-provider | Switch between AI providers easily |
91
116
 
92
- ## Development
117
+ ---
118
+
119
+ ## 🛠️ Development
93
120
 
94
121
  ```bash
95
122
  npm install
@@ -97,6 +124,8 @@ npm test
97
124
  npm run lint
98
125
  ```
99
126
 
100
- ## License
127
+ ---
128
+
129
+ ## 📄 License
101
130
 
102
131
  Apache-2.0
package/index.mjs CHANGED
@@ -4,6 +4,7 @@ import { Command } from 'commander'
4
4
  import { startChat } from './lib/chat.mjs'
5
5
  import { ensureDirectories, loadConfig } from './lib/config.mjs'
6
6
  import { createConversation, loadConversation, saveConversation } from './lib/conversation.mjs'
7
+ import { buildMessage, readFile, readStdin } from './lib/input.mjs'
7
8
  import { DEFAULT_MODELS, determineProvider, getProvider, listModels } from './lib/providers.mjs'
8
9
  import { generateResponse, outputJson, streamResponse } from './lib/stream.mjs'
9
10
  import pkg from './package.json' with { type: 'json' }
@@ -15,14 +16,31 @@ program.name('askimo').description('CLI tool for communicating with AI providers
15
16
  program
16
17
  .command('ask', { isDefault: true })
17
18
  .description('Ask a single question')
18
- .argument('<question>', 'The question to ask')
19
+ .argument('[question]', 'The question to ask (can also pipe content via stdin)')
19
20
  .option('-p, --perplexity', 'Use Perplexity AI (default)')
20
21
  .option('-o, --openai', 'Use OpenAI')
21
22
  .option('-a, --anthropic', 'Use Anthropic Claude')
22
23
  .option('-j, --json', 'Output as JSON instead of streaming')
23
24
  .option('-c, --continue <n>', 'Continue conversation N (1=last, 2=second-to-last)', Number.parseInt)
25
+ .option('-f, --file <path>', 'Read content from file')
24
26
  .action(async (question, options) => {
25
27
  try {
28
+ const stdinContent = await readStdin()
29
+ const fileContent = options.file ? await readFile(options.file) : null
30
+
31
+ if (stdinContent && options.file) {
32
+ console.error('Error: Cannot use both piped input and --file flag')
33
+ process.exit(1)
34
+ }
35
+
36
+ const content = stdinContent || fileContent
37
+ const message = buildMessage(question, content)
38
+
39
+ if (!message) {
40
+ console.error('Error: No question provided. Use: askimo "question" or pipe content')
41
+ process.exit(1)
42
+ }
43
+
26
44
  const config = await loadConfig()
27
45
  await ensureDirectories()
28
46
 
@@ -46,7 +64,7 @@ program
46
64
 
47
65
  conversation.messages.push({
48
66
  role: 'user',
49
- content: question
67
+ content: message
50
68
  })
51
69
 
52
70
  let responseText
package/lib/input.mjs ADDED
@@ -0,0 +1,55 @@
1
+ import fs from 'node:fs/promises'
2
+
3
+ async function readStdin() {
4
+ if (process.stdin.isTTY) {
5
+ return null
6
+ }
7
+
8
+ // In non-TTY environments, check if data is available with a short timeout
9
+ // to avoid hanging when no data is being piped
10
+ return new Promise((resolve) => {
11
+ const chunks = []
12
+ let hasData = false
13
+
14
+ const timeout = setTimeout(() => {
15
+ if (!hasData) {
16
+ process.stdin.removeAllListeners()
17
+ process.stdin.pause()
18
+ resolve(null)
19
+ }
20
+ }, 10)
21
+
22
+ process.stdin.on('readable', () => {
23
+ let chunk = process.stdin.read()
24
+ while (chunk !== null) {
25
+ hasData = true
26
+ chunks.push(chunk)
27
+ chunk = process.stdin.read()
28
+ }
29
+ })
30
+
31
+ process.stdin.on('end', () => {
32
+ clearTimeout(timeout)
33
+ if (chunks.length === 0) {
34
+ resolve(null)
35
+ } else {
36
+ const content = Buffer.concat(chunks).toString('utf8').trim()
37
+ resolve(content || null)
38
+ }
39
+ })
40
+ })
41
+ }
42
+
43
+ async function readFile(filePath) {
44
+ const content = await fs.readFile(filePath, 'utf8')
45
+ return content.trim() || null
46
+ }
47
+
48
+ function buildMessage(prompt, content) {
49
+ if (prompt && content) {
50
+ return `${prompt}:\n\n${content}`
51
+ }
52
+ return content || prompt || null
53
+ }
54
+
55
+ export { readStdin, readFile, buildMessage }
package/lib/stream.mjs CHANGED
@@ -42,7 +42,7 @@ async function generateResponse(model, messages) {
42
42
  }
43
43
 
44
44
  function buildJsonOutput(conversation, response, sources) {
45
- const lastUserMessage = conversation.messages[conversation.messages.length - 1]
45
+ const lastUserMessage = conversation.messages.findLast((m) => m.role === 'user')
46
46
  const output = {
47
47
  provider: conversation.provider,
48
48
  model: conversation.model,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "askimo",
3
- "version": "1.0.0",
3
+ "version": "1.1.0",
4
4
  "description": "A CLI tool for communicating with AI providers (Perplexity, OpenAI, Anthropic)",
5
5
  "license": "Apache-2.0",
6
6
  "author": "Amit Tal",
package/test/input.mjs ADDED
@@ -0,0 +1,53 @@
1
+ import test from 'ava'
2
+ import { buildMessage } from '../lib/input.mjs'
3
+
4
+ test('buildMessage combines prompt and content with colon format', (t) => {
5
+ const result = buildMessage('explain this', 'const x = 1')
6
+ t.is(result, 'explain this:\n\nconst x = 1')
7
+ })
8
+
9
+ test('buildMessage returns content only when no prompt', (t) => {
10
+ const result = buildMessage(null, 'some content')
11
+ t.is(result, 'some content')
12
+ })
13
+
14
+ test('buildMessage returns content only when prompt is undefined', (t) => {
15
+ const result = buildMessage(undefined, 'some content')
16
+ t.is(result, 'some content')
17
+ })
18
+
19
+ test('buildMessage returns prompt only when no content', (t) => {
20
+ const result = buildMessage('what is 2+2', null)
21
+ t.is(result, 'what is 2+2')
22
+ })
23
+
24
+ test('buildMessage returns prompt only when content is undefined', (t) => {
25
+ const result = buildMessage('what is 2+2', undefined)
26
+ t.is(result, 'what is 2+2')
27
+ })
28
+
29
+ test('buildMessage returns null when both are null', (t) => {
30
+ const result = buildMessage(null, null)
31
+ t.is(result, null)
32
+ })
33
+
34
+ test('buildMessage returns null when both are undefined', (t) => {
35
+ const result = buildMessage(undefined, undefined)
36
+ t.is(result, null)
37
+ })
38
+
39
+ test('buildMessage handles empty string prompt as falsy', (t) => {
40
+ const result = buildMessage('', 'content')
41
+ t.is(result, 'content')
42
+ })
43
+
44
+ test('buildMessage handles empty string content as falsy', (t) => {
45
+ const result = buildMessage('prompt', '')
46
+ t.is(result, 'prompt')
47
+ })
48
+
49
+ test('buildMessage preserves multiline content', (t) => {
50
+ const content = 'line 1\nline 2\nline 3'
51
+ const result = buildMessage('summarize', content)
52
+ t.is(result, 'summarize:\n\nline 1\nline 2\nline 3')
53
+ })
package/test/stream.mjs CHANGED
@@ -45,6 +45,17 @@ test('buildJsonOutput extracts question from last user message', (t) => {
45
45
  t.is(output.question, 'Second question')
46
46
  })
47
47
 
48
+ test('buildJsonOutput finds user message even when assistant message is last', (t) => {
49
+ const conversation = createMockConversation({
50
+ messages: [
51
+ { role: 'user', content: 'My question' },
52
+ { role: 'assistant', content: 'My answer' }
53
+ ]
54
+ })
55
+ const output = buildJsonOutput(conversation, 'response')
56
+ t.is(output.question, 'My question')
57
+ })
58
+
48
59
  test('buildJsonOutput returns empty question when no messages', (t) => {
49
60
  const conversation = createMockConversation({ messages: [] })
50
61
  const output = buildJsonOutput(conversation, 'response')