askimo 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -13
- package/index.mjs +8 -4
- package/lib/chat.mjs +1 -1
- package/lib/providers.mjs +35 -2
- package/lib/stream.mjs +21 -6
- package/package.json +14 -1
package/README.md
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
|
|
8
8
|
A CLI tool for communicating with AI providers.
|
|
9
9
|
|
|
10
|
-
**Supported providers:** Perplexity · OpenAI · Anthropic
|
|
10
|
+
**Supported providers:** Perplexity · OpenAI · Anthropic · xAI (Grok)
|
|
11
11
|
|
|
12
12
|
---
|
|
13
13
|
|
|
@@ -26,12 +26,14 @@ Create a config file at `~/.askimo/config`:
|
|
|
26
26
|
PERPLEXITY_API_KEY=your-perplexity-key
|
|
27
27
|
OPENAI_API_KEY=your-openai-key
|
|
28
28
|
ANTHROPIC_API_KEY=your-anthropic-key
|
|
29
|
+
XAI_API_KEY=your-xai-key
|
|
29
30
|
|
|
30
31
|
# Optional settings
|
|
31
32
|
DEFAULT_PROVIDER=perplexity
|
|
32
33
|
PERPLEXITY_MODEL=sonar
|
|
33
34
|
OPENAI_MODEL=gpt-4o
|
|
34
35
|
ANTHROPIC_MODEL=claude-sonnet-4-20250514
|
|
36
|
+
XAI_MODEL=grok-4
|
|
35
37
|
```
|
|
36
38
|
|
|
37
39
|
---
|
|
@@ -46,15 +48,17 @@ askimo "What is the capital of France?"
|
|
|
46
48
|
|
|
47
49
|
### Choose a provider
|
|
48
50
|
|
|
49
|
-
| Flag | Provider
|
|
50
|
-
|
|
51
|
+
| Flag | Provider |
|
|
52
|
+
|------|----------------------|
|
|
51
53
|
| `-p` | Perplexity (default) |
|
|
52
|
-
| `-o` | OpenAI
|
|
53
|
-
| `-a` | Anthropic
|
|
54
|
+
| `-o` | OpenAI |
|
|
55
|
+
| `-a` | Anthropic |
|
|
56
|
+
| `-x` | xAI (Grok) |
|
|
54
57
|
|
|
55
58
|
```bash
|
|
56
59
|
askimo "explain quantum computing" -o # Use OpenAI
|
|
57
60
|
askimo "write a haiku" -a # Use Anthropic
|
|
61
|
+
askimo "what's happening today?" -x # Use xAI Grok
|
|
58
62
|
```
|
|
59
63
|
|
|
60
64
|
### Continue a conversation
|
|
@@ -89,6 +93,7 @@ askimo -f error.log "find the bug"
|
|
|
89
93
|
```bash
|
|
90
94
|
askimo chat # Start new chat
|
|
91
95
|
askimo chat -o # Chat with OpenAI
|
|
96
|
+
askimo chat -x # Chat with xAI Grok
|
|
92
97
|
askimo chat -c 1 # Continue last conversation
|
|
93
98
|
```
|
|
94
99
|
|
|
@@ -99,20 +104,21 @@ Type `exit` or `Ctrl+C` to quit.
|
|
|
99
104
|
```bash
|
|
100
105
|
askimo models # All providers
|
|
101
106
|
askimo models -p # Perplexity only
|
|
107
|
+
askimo models -x # xAI only
|
|
102
108
|
```
|
|
103
109
|
|
|
104
110
|
---
|
|
105
111
|
|
|
106
112
|
## ✨ Features
|
|
107
113
|
|
|
108
|
-
| Feature
|
|
109
|
-
|
|
110
|
-
| Streaming
|
|
111
|
-
| Piping
|
|
112
|
-
| File input
|
|
113
|
-
| Citations
|
|
114
|
-
| History
|
|
115
|
-
| Multi-provider | Switch between AI providers easily
|
|
114
|
+
| Feature | Description |
|
|
115
|
+
|----------------|---------------------------------------------------|
|
|
116
|
+
| Streaming | Real-time response output |
|
|
117
|
+
| Piping | Pipe content via stdin |
|
|
118
|
+
| File input | Read content from files with `-f` |
|
|
119
|
+
| Citations | Source links with Perplexity |
|
|
120
|
+
| History | Conversations saved to `~/.askimo/conversations/` |
|
|
121
|
+
| Multi-provider | Switch between AI providers easily |
|
|
116
122
|
|
|
117
123
|
---
|
|
118
124
|
|
package/index.mjs
CHANGED
|
@@ -20,6 +20,7 @@ program
|
|
|
20
20
|
.option('-p, --perplexity', 'Use Perplexity AI (default)')
|
|
21
21
|
.option('-o, --openai', 'Use OpenAI')
|
|
22
22
|
.option('-a, --anthropic', 'Use Anthropic Claude')
|
|
23
|
+
.option('-x, --xai', 'Use xAI Grok')
|
|
23
24
|
.option('-j, --json', 'Output as JSON instead of streaming')
|
|
24
25
|
.option('-c, --continue <n>', 'Continue conversation N (1=last, 2=second-to-last)', Number.parseInt)
|
|
25
26
|
.option('-f, --file <path>', 'Read content from file')
|
|
@@ -70,16 +71,16 @@ program
|
|
|
70
71
|
let responseText
|
|
71
72
|
|
|
72
73
|
if (options.json) {
|
|
73
|
-
const { text, sources } = await generateResponse(model, conversation.messages)
|
|
74
|
+
const { text, sources, duration } = await generateResponse(model, conversation.messages)
|
|
74
75
|
responseText = text
|
|
75
76
|
conversation.messages.push({
|
|
76
77
|
role: 'assistant',
|
|
77
78
|
content: responseText
|
|
78
79
|
})
|
|
79
80
|
await saveConversation(conversation, existingPath)
|
|
80
|
-
outputJson(conversation, responseText, sources)
|
|
81
|
+
outputJson(conversation, responseText, sources, duration)
|
|
81
82
|
} else {
|
|
82
|
-
responseText = await streamResponse(model, conversation.messages)
|
|
83
|
+
responseText = await streamResponse(model, conversation.messages, modelName)
|
|
83
84
|
conversation.messages.push({
|
|
84
85
|
role: 'assistant',
|
|
85
86
|
content: responseText
|
|
@@ -98,6 +99,7 @@ program
|
|
|
98
99
|
.option('-p, --perplexity', 'Use Perplexity AI (default)')
|
|
99
100
|
.option('-o, --openai', 'Use OpenAI')
|
|
100
101
|
.option('-a, --anthropic', 'Use Anthropic Claude')
|
|
102
|
+
.option('-x, --xai', 'Use xAI Grok')
|
|
101
103
|
.option('-c, --continue <n>', 'Continue conversation N (1=last, 2=second-to-last)', Number.parseInt)
|
|
102
104
|
.action(async (options) => {
|
|
103
105
|
try {
|
|
@@ -120,6 +122,7 @@ program
|
|
|
120
122
|
.option('-p, --perplexity', 'Show only Perplexity models')
|
|
121
123
|
.option('-o, --openai', 'Show only OpenAI models')
|
|
122
124
|
.option('-a, --anthropic', 'Show only Anthropic models')
|
|
125
|
+
.option('-x, --xai', 'Show only xAI models')
|
|
123
126
|
.action(async (options) => {
|
|
124
127
|
try {
|
|
125
128
|
const config = await loadConfig()
|
|
@@ -128,8 +131,9 @@ program
|
|
|
128
131
|
if (options.perplexity) providers.push('perplexity')
|
|
129
132
|
if (options.openai) providers.push('openai')
|
|
130
133
|
if (options.anthropic) providers.push('anthropic')
|
|
134
|
+
if (options.xai) providers.push('xai')
|
|
131
135
|
|
|
132
|
-
const toShow = providers.length === 0 ? ['perplexity', 'openai', 'anthropic'] : providers
|
|
136
|
+
const toShow = providers.length === 0 ? ['perplexity', 'openai', 'anthropic', 'xai'] : providers
|
|
133
137
|
|
|
134
138
|
const results = await Promise.all(
|
|
135
139
|
toShow.map(async (provider) => ({
|
package/lib/chat.mjs
CHANGED
|
@@ -42,7 +42,7 @@ async function startChat(model, providerName, modelName, continueN = null) {
|
|
|
42
42
|
})
|
|
43
43
|
|
|
44
44
|
console.log('')
|
|
45
|
-
const responseText = await streamResponse(model, conversation.messages)
|
|
45
|
+
const responseText = await streamResponse(model, conversation.messages, modelName)
|
|
46
46
|
|
|
47
47
|
conversation.messages.push({
|
|
48
48
|
role: 'assistant',
|
package/lib/providers.mjs
CHANGED
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
import { createAnthropic } from '@ai-sdk/anthropic'
|
|
2
2
|
import { createOpenAI } from '@ai-sdk/openai'
|
|
3
3
|
import { createPerplexity } from '@ai-sdk/perplexity'
|
|
4
|
+
import { createXai } from '@ai-sdk/xai'
|
|
4
5
|
|
|
5
6
|
const DEFAULT_MODELS = {
|
|
6
7
|
perplexity: 'sonar',
|
|
7
8
|
openai: 'gpt-4o',
|
|
8
|
-
anthropic: 'claude-sonnet-4-20250514'
|
|
9
|
+
anthropic: 'claude-sonnet-4-20250514',
|
|
10
|
+
xai: 'grok-4'
|
|
9
11
|
}
|
|
10
12
|
|
|
11
13
|
// Perplexity doesn't have a models list API, so we hardcode these
|
|
@@ -17,6 +19,20 @@ const PERPLEXITY_MODELS = [
|
|
|
17
19
|
{ id: 'sonar-deep-research', description: 'Deep research sessions' }
|
|
18
20
|
]
|
|
19
21
|
|
|
22
|
+
// xAI doesn't have a public models list API, so we hardcode these
|
|
23
|
+
const XAI_MODELS = [
|
|
24
|
+
{ id: 'grok-4-1-fast-reasoning', description: 'Grok 4.1 fast with reasoning' },
|
|
25
|
+
{ id: 'grok-4-1-fast-non-reasoning', description: 'Grok 4.1 fast without reasoning' },
|
|
26
|
+
{ id: 'grok-code-fast-1', description: 'Grok optimized for code' },
|
|
27
|
+
{ id: 'grok-4-fast-reasoning', description: 'Grok 4 fast with reasoning' },
|
|
28
|
+
{ id: 'grok-4-fast-non-reasoning', description: 'Grok 4 fast without reasoning' },
|
|
29
|
+
{ id: 'grok-4-0709', description: 'Grok 4 flagship model' },
|
|
30
|
+
{ id: 'grok-3-mini', description: 'Lightweight Grok 3 model' },
|
|
31
|
+
{ id: 'grok-3', description: 'Grok 3 base model' },
|
|
32
|
+
{ id: 'grok-2-vision-1212', description: 'Grok 2 with vision capabilities' },
|
|
33
|
+
{ id: 'grok-2-image-1212', description: 'Image generation model' }
|
|
34
|
+
]
|
|
35
|
+
|
|
20
36
|
async function fetchOpenAiModels(apiKey) {
|
|
21
37
|
const response = await fetch('https://api.openai.com/v1/models', {
|
|
22
38
|
// biome-ignore lint/style/useNamingConvention: headers use standard capitalization
|
|
@@ -71,6 +87,9 @@ async function listModels(provider, config) {
|
|
|
71
87
|
return fetchAnthropicModels(apiKey)
|
|
72
88
|
}
|
|
73
89
|
|
|
90
|
+
case 'xai':
|
|
91
|
+
return XAI_MODELS
|
|
92
|
+
|
|
74
93
|
default:
|
|
75
94
|
throw new Error(`Unknown provider: ${provider}`)
|
|
76
95
|
}
|
|
@@ -117,6 +136,19 @@ function getProvider(providerName, config) {
|
|
|
117
136
|
modelName
|
|
118
137
|
}
|
|
119
138
|
}
|
|
139
|
+
case 'xai': {
|
|
140
|
+
const apiKey = config.XAI_API_KEY
|
|
141
|
+
if (!apiKey) {
|
|
142
|
+
throw new Error('XAI_API_KEY not found in config')
|
|
143
|
+
}
|
|
144
|
+
const modelName = config.XAI_MODEL || DEFAULT_MODELS.xai
|
|
145
|
+
const xai = createXai({ apiKey })
|
|
146
|
+
return {
|
|
147
|
+
model: xai(modelName),
|
|
148
|
+
name: 'xai',
|
|
149
|
+
modelName
|
|
150
|
+
}
|
|
151
|
+
}
|
|
120
152
|
default:
|
|
121
153
|
throw new Error(`Unknown provider: ${providerName}`)
|
|
122
154
|
}
|
|
@@ -126,9 +158,10 @@ function determineProvider(options, config = {}) {
|
|
|
126
158
|
if (options.openai) return 'openai'
|
|
127
159
|
if (options.anthropic) return 'anthropic'
|
|
128
160
|
if (options.perplexity) return 'perplexity'
|
|
161
|
+
if (options.xai) return 'xai'
|
|
129
162
|
|
|
130
163
|
const defaultProvider = config.DEFAULT_PROVIDER?.toLowerCase()
|
|
131
|
-
if (defaultProvider && ['perplexity', 'openai', 'anthropic'].includes(defaultProvider)) {
|
|
164
|
+
if (defaultProvider && ['perplexity', 'openai', 'anthropic', 'xai'].includes(defaultProvider)) {
|
|
132
165
|
return defaultProvider
|
|
133
166
|
}
|
|
134
167
|
|
package/lib/stream.mjs
CHANGED
|
@@ -1,6 +1,13 @@
|
|
|
1
1
|
import { generateText, streamText } from 'ai'
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
function formatDuration(ms) {
|
|
4
|
+
if (ms < 1000) return `${ms}ms`
|
|
5
|
+
return `${(ms / 1000).toFixed(1)}s`
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
async function streamResponse(model, messages, modelName) {
|
|
9
|
+
const startTime = Date.now()
|
|
10
|
+
|
|
4
11
|
const result = streamText({
|
|
5
12
|
model,
|
|
6
13
|
messages
|
|
@@ -29,19 +36,26 @@ async function streamResponse(model, messages) {
|
|
|
29
36
|
})
|
|
30
37
|
}
|
|
31
38
|
|
|
39
|
+
// Display status line
|
|
40
|
+
const duration = Date.now() - startTime
|
|
41
|
+
process.stdout.write(`\n\x1b[2m${modelName} · ${formatDuration(duration)}\x1b[0m\n`)
|
|
42
|
+
|
|
32
43
|
return fullText
|
|
33
44
|
}
|
|
34
45
|
|
|
35
46
|
async function generateResponse(model, messages) {
|
|
47
|
+
const startTime = Date.now()
|
|
48
|
+
|
|
36
49
|
const { text, sources } = await generateText({
|
|
37
50
|
model,
|
|
38
51
|
messages
|
|
39
52
|
})
|
|
40
53
|
|
|
41
|
-
|
|
54
|
+
const duration = Date.now() - startTime
|
|
55
|
+
return { text, sources, duration }
|
|
42
56
|
}
|
|
43
57
|
|
|
44
|
-
function buildJsonOutput(conversation, response, sources) {
|
|
58
|
+
function buildJsonOutput(conversation, response, sources, duration) {
|
|
45
59
|
const lastUserMessage = conversation.messages.findLast((m) => m.role === 'user')
|
|
46
60
|
const output = {
|
|
47
61
|
provider: conversation.provider,
|
|
@@ -49,7 +63,8 @@ function buildJsonOutput(conversation, response, sources) {
|
|
|
49
63
|
question: lastUserMessage?.content || '',
|
|
50
64
|
response,
|
|
51
65
|
conversationId: conversation.id,
|
|
52
|
-
messageCount: conversation.messages.length + 1
|
|
66
|
+
messageCount: conversation.messages.length + 1,
|
|
67
|
+
durationMs: duration
|
|
53
68
|
}
|
|
54
69
|
|
|
55
70
|
if (sources?.length > 0) {
|
|
@@ -59,8 +74,8 @@ function buildJsonOutput(conversation, response, sources) {
|
|
|
59
74
|
return output
|
|
60
75
|
}
|
|
61
76
|
|
|
62
|
-
function outputJson(conversation, response, sources) {
|
|
63
|
-
const output = buildJsonOutput(conversation, response, sources)
|
|
77
|
+
function outputJson(conversation, response, sources, duration) {
|
|
78
|
+
const output = buildJsonOutput(conversation, response, sources, duration)
|
|
64
79
|
console.log(JSON.stringify(output, null, 2))
|
|
65
80
|
}
|
|
66
81
|
|
package/package.json
CHANGED
|
@@ -1,9 +1,21 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "askimo",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.2.0",
|
|
4
4
|
"description": "A CLI tool for communicating with AI providers (Perplexity, OpenAI, Anthropic)",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"author": "Amit Tal",
|
|
7
|
+
"keywords": [
|
|
8
|
+
"cli",
|
|
9
|
+
"ai",
|
|
10
|
+
"llm",
|
|
11
|
+
"perplexity",
|
|
12
|
+
"openai",
|
|
13
|
+
"anthropic",
|
|
14
|
+
"claude",
|
|
15
|
+
"gpt",
|
|
16
|
+
"chatbot",
|
|
17
|
+
"terminal"
|
|
18
|
+
],
|
|
7
19
|
"type": "module",
|
|
8
20
|
"bin": {
|
|
9
21
|
"askimo": "./index.mjs"
|
|
@@ -27,6 +39,7 @@
|
|
|
27
39
|
"@ai-sdk/anthropic": "^2.0.53",
|
|
28
40
|
"@ai-sdk/openai": "^2.0.76",
|
|
29
41
|
"@ai-sdk/perplexity": "^2.0.21",
|
|
42
|
+
"@ai-sdk/xai": "^2.0.40",
|
|
30
43
|
"@inquirer/input": "^5.0.2",
|
|
31
44
|
"ai": "^5.0.106",
|
|
32
45
|
"commander": "^14.0.2"
|