pplx-zero 2.1.0 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +67 -88
- package/bin/pplx.js +5 -2
- package/package.json +3 -2
- package/src/index.ts +6 -2
- package/src/markdown.ts +64 -0
- package/src/api.test.ts +0 -21
- package/src/files.test.ts +0 -71
- package/src/history.test.ts +0 -84
- package/src/output.test.ts +0 -41
package/README.md
CHANGED
|
@@ -1,133 +1,112 @@
|
|
|
1
|
-
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://raw.githubusercontent.com/codewithkenzo/pplx-zero/main/logo.png" alt="pplx-zero" width="140" />
|
|
3
|
+
</p>
|
|
2
4
|
|
|
3
|
-
|
|
4
|
-
[](https://aur.archlinux.org/packages/pplx-zero)
|
|
5
|
-

|
|
6
|
-

|
|
5
|
+
<h1 align="center">pplx</h1>
|
|
7
6
|
|
|
8
|
-
|
|
7
|
+
<p align="center">
|
|
8
|
+
<strong>AI search from your terminal. Zero bloat.</strong>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
<p align="center">
|
|
12
|
+
<a href="https://www.npmjs.com/package/pplx-zero"><img src="https://img.shields.io/npm/v/pplx-zero.svg?color=00d4ff" alt="npm"></a>
|
|
13
|
+
<a href="https://aur.archlinux.org/packages/pplx-zero"><img src="https://img.shields.io/aur/version/pplx-zero?color=00d4ff" alt="AUR"></a>
|
|
14
|
+
<img src="https://img.shields.io/badge/bun-runtime-f9f1e1" alt="Bun">
|
|
15
|
+
<img src="https://img.shields.io/badge/license-MIT-blue" alt="License">
|
|
16
|
+
</p>
|
|
17
|
+
|
|
18
|
+
---
|
|
9
19
|
|
|
10
20
|
```bash
|
|
11
21
|
pplx "what is bun"
|
|
12
22
|
```
|
|
13
23
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
- **Fast** — Bun-native, streams responses as they arrive
|
|
17
|
-
- **Minimal** — ~400 lines of code, one dependency (zod)
|
|
18
|
-
- **Powerful** — 5 models including deep research, file & image support
|
|
19
|
-
- **Conversational** — Continue previous queries with `-c`
|
|
20
|
-
- **Unix-friendly** — Pipes, JSON output, history, exit codes done right
|
|
24
|
+
Query [Perplexity AI](https://perplexity.ai) directly from your terminal. Responses stream in real-time with beautiful markdown formatting.
|
|
21
25
|
|
|
22
|
-
##
|
|
26
|
+
## Features
|
|
23
27
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
28
|
+
- **⚡ Streaming** — Answers appear as they're generated
|
|
29
|
+
- **💬 Conversations** — Continue with `-c` for multi-turn
|
|
30
|
+
- **📄 Documents** — Analyze PDFs, code, text files
|
|
31
|
+
- **🖼️ Images** — Describe screenshots and diagrams
|
|
32
|
+
- **📝 Export** — Save research to markdown
|
|
33
|
+
- **🎨 Pretty** — Rendered markdown by default
|
|
34
|
+
- **🕐 History** — Browse and search past queries
|
|
27
35
|
|
|
28
|
-
|
|
29
|
-
npm install -g pplx-zero
|
|
36
|
+
## Install
|
|
30
37
|
|
|
31
|
-
|
|
32
|
-
|
|
38
|
+
```bash
|
|
39
|
+
bun install -g pplx-zero # recommended
|
|
40
|
+
npm install -g pplx-zero # requires bun
|
|
41
|
+
yay -S pplx-zero # arch linux
|
|
33
42
|
```
|
|
34
43
|
|
|
35
44
|
## Setup
|
|
36
45
|
|
|
37
|
-
Get your API key from [Perplexity Settings](https://www.perplexity.ai/settings/api).
|
|
38
|
-
|
|
39
46
|
```bash
|
|
40
47
|
export PERPLEXITY_API_KEY="pplx-..."
|
|
41
48
|
```
|
|
42
49
|
|
|
50
|
+
Get your key at [perplexity.ai/settings/api](https://www.perplexity.ai/settings/api)
|
|
51
|
+
|
|
43
52
|
## Usage
|
|
44
53
|
|
|
45
54
|
```bash
|
|
46
|
-
#
|
|
47
|
-
pplx "best
|
|
55
|
+
# search
|
|
56
|
+
pplx "best typescript patterns 2025"
|
|
48
57
|
|
|
49
|
-
#
|
|
50
|
-
pplx -m sonar-pro "explain
|
|
58
|
+
# models
|
|
59
|
+
pplx -m sonar-pro "explain transformers"
|
|
60
|
+
pplx -m sonar-deep-research "AI regulation analysis"
|
|
51
61
|
|
|
52
|
-
#
|
|
53
|
-
pplx
|
|
62
|
+
# conversation
|
|
63
|
+
pplx "what is rust"
|
|
64
|
+
pplx -c "compare to go"
|
|
54
65
|
|
|
55
|
-
#
|
|
56
|
-
pplx -f
|
|
66
|
+
# files
|
|
67
|
+
pplx -f paper.pdf "summarize"
|
|
68
|
+
pplx -i diagram.png "explain this"
|
|
57
69
|
|
|
58
|
-
#
|
|
59
|
-
pplx -
|
|
70
|
+
# export
|
|
71
|
+
pplx "topic" -o research.md
|
|
60
72
|
|
|
61
|
-
#
|
|
62
|
-
pplx "
|
|
63
|
-
pplx -c "how does it compare to go?"
|
|
64
|
-
pplx -c "which should I learn first?"
|
|
73
|
+
# pretty markdown is default
|
|
74
|
+
pplx "explain monads"
|
|
65
75
|
|
|
66
|
-
#
|
|
67
|
-
pplx
|
|
76
|
+
# raw output (no formatting)
|
|
77
|
+
pplx --raw "explain monads"
|
|
68
78
|
|
|
69
|
-
#
|
|
70
|
-
pplx --json "capital of france" | jq .answer
|
|
71
|
-
|
|
72
|
-
# View query history
|
|
79
|
+
# history
|
|
73
80
|
pplx --history
|
|
74
|
-
|
|
75
|
-
# Search without saving to history
|
|
76
|
-
pplx --no-history "sensitive query"
|
|
77
81
|
```
|
|
78
82
|
|
|
79
83
|
## Models
|
|
80
84
|
|
|
81
|
-
| Model |
|
|
82
|
-
|
|
83
|
-
| `sonar` | Quick answers
|
|
85
|
+
| Model | Use |
|
|
86
|
+
|-------|-----|
|
|
87
|
+
| `sonar` | Quick answers |
|
|
84
88
|
| `sonar-pro` | Complex questions |
|
|
85
|
-
| `sonar-reasoning` | Step-by-step
|
|
89
|
+
| `sonar-reasoning` | Step-by-step |
|
|
86
90
|
| `sonar-reasoning-pro` | Advanced reasoning |
|
|
87
|
-
| `sonar-deep-research` |
|
|
91
|
+
| `sonar-deep-research` | Research reports |
|
|
88
92
|
|
|
89
93
|
## Options
|
|
90
94
|
|
|
91
95
|
| Flag | Description |
|
|
92
96
|
|------|-------------|
|
|
93
|
-
| `-m
|
|
94
|
-
| `-f
|
|
95
|
-
| `-i
|
|
96
|
-
| `-o
|
|
97
|
-
| `-c
|
|
98
|
-
| `--
|
|
99
|
-
| `--
|
|
100
|
-
| `--json` |
|
|
101
|
-
| `-h, --help` | Show help |
|
|
102
|
-
|
|
103
|
-
## History & Sessions
|
|
104
|
-
|
|
105
|
-
pplx-zero keeps a local history of your queries at `~/.pplx/history.jsonl`.
|
|
106
|
-
|
|
107
|
-
```bash
|
|
108
|
-
# View recent queries
|
|
109
|
-
pplx --history
|
|
110
|
-
|
|
111
|
-
# Filter with grep
|
|
112
|
-
pplx --history | grep "typescript"
|
|
113
|
-
|
|
114
|
-
# Continue last conversation
|
|
115
|
-
pplx -c "tell me more"
|
|
116
|
-
|
|
117
|
-
# Skip history for sensitive queries
|
|
118
|
-
pplx --no-history "private question"
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
History auto-rotates at 1000 entries to keep the file small.
|
|
97
|
+
| `-m` | Model selection |
|
|
98
|
+
| `-f` | Attach file |
|
|
99
|
+
| `-i` | Attach image |
|
|
100
|
+
| `-o` | Output to file |
|
|
101
|
+
| `-c` | Continue conversation |
|
|
102
|
+
| `--raw` | Raw output (no markdown) |
|
|
103
|
+
| `--history` | View history |
|
|
104
|
+
| `--json` | JSON output |
|
|
122
105
|
|
|
123
|
-
##
|
|
106
|
+
## Philosophy
|
|
124
107
|
|
|
125
|
-
|
|
126
|
-
|------|---------|
|
|
127
|
-
| `0` | Success |
|
|
128
|
-
| `1` | API error |
|
|
129
|
-
| `2` | Configuration error |
|
|
108
|
+
~400 lines. 1 dependency. No frameworks.
|
|
130
109
|
|
|
131
|
-
|
|
110
|
+
---
|
|
132
111
|
|
|
133
|
-
MIT
|
|
112
|
+
<p align="center">MIT © <a href="https://github.com/codewithkenzo">kenzo</a></p>
|
package/bin/pplx.js
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
import { spawn, execSync } from 'child_process';
|
|
4
|
+
import { dirname, join } from 'path';
|
|
5
|
+
import { fileURLToPath } from 'url';
|
|
6
|
+
|
|
7
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
5
8
|
|
|
6
9
|
const hasBun = () => {
|
|
7
10
|
try {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pplx-zero",
|
|
3
|
-
"version": "2.1
|
|
3
|
+
"version": "2.2.1",
|
|
4
4
|
"description": "Minimal Perplexity AI CLI - search from terminal",
|
|
5
5
|
"author": "kenzo",
|
|
6
6
|
"license": "MIT",
|
|
@@ -25,7 +25,8 @@
|
|
|
25
25
|
},
|
|
26
26
|
"files": [
|
|
27
27
|
"src",
|
|
28
|
-
"bin"
|
|
28
|
+
"bin",
|
|
29
|
+
"!src/**/*.test.ts"
|
|
29
30
|
],
|
|
30
31
|
"repository": {
|
|
31
32
|
"type": "git",
|
package/src/index.ts
CHANGED
|
@@ -5,6 +5,7 @@ import { encodeFile } from './files';
|
|
|
5
5
|
import { getEnv } from './env';
|
|
6
6
|
import { fmt, write, writeLn } from './output';
|
|
7
7
|
import { appendHistory, readHistory, getLastEntry } from './history';
|
|
8
|
+
import { renderMarkdown, createMarkdownState } from './markdown';
|
|
8
9
|
|
|
9
10
|
getEnv();
|
|
10
11
|
|
|
@@ -20,6 +21,7 @@ const { values, positionals } = parseArgs({
|
|
|
20
21
|
'no-history': { type: 'boolean', default: false },
|
|
21
22
|
continue: { type: 'boolean', short: 'c', default: false },
|
|
22
23
|
output: { type: 'string', short: 'o' },
|
|
24
|
+
raw: { type: 'boolean', default: false },
|
|
23
25
|
},
|
|
24
26
|
allowPositionals: true,
|
|
25
27
|
strict: true,
|
|
@@ -39,6 +41,7 @@ Options:
|
|
|
39
41
|
-c, --continue Continue from last query (add context)
|
|
40
42
|
--history Show query history
|
|
41
43
|
--no-history Don't save this query to history
|
|
44
|
+
--raw Raw output (no markdown rendering)
|
|
42
45
|
--json Output as JSON
|
|
43
46
|
-h, --help Show this help
|
|
44
47
|
|
|
@@ -91,7 +94,7 @@ const file = filePath ? await encodeFile(filePath) : undefined;
|
|
|
91
94
|
|
|
92
95
|
const startTime = Date.now();
|
|
93
96
|
let fullContent = '';
|
|
94
|
-
|
|
97
|
+
const mdState = createMarkdownState();
|
|
95
98
|
|
|
96
99
|
if (!values.json) {
|
|
97
100
|
await write(fmt.model(model) + ' ');
|
|
@@ -102,7 +105,8 @@ await search(query, model, {
|
|
|
102
105
|
onContent: async (text) => {
|
|
103
106
|
fullContent += text;
|
|
104
107
|
if (!values.json) {
|
|
105
|
-
|
|
108
|
+
const out = values.raw ? text : renderMarkdown(text, mdState);
|
|
109
|
+
await write(out);
|
|
106
110
|
}
|
|
107
111
|
},
|
|
108
112
|
onDone: async (citations, usage) => {
|
package/src/markdown.ts
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
const c = {
|
|
2
|
+
reset: '\x1b[0m',
|
|
3
|
+
bold: '\x1b[1m',
|
|
4
|
+
dim: '\x1b[2m',
|
|
5
|
+
italic: '\x1b[3m',
|
|
6
|
+
cyan: '\x1b[36m',
|
|
7
|
+
yellow: '\x1b[33m',
|
|
8
|
+
magenta: '\x1b[35m',
|
|
9
|
+
gray: '\x1b[90m',
|
|
10
|
+
bgBlue: '\x1b[44m',
|
|
11
|
+
} as const;
|
|
12
|
+
|
|
13
|
+
export interface MarkdownState {
|
|
14
|
+
inCode: boolean;
|
|
15
|
+
codeLanguage: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export function createMarkdownState(): MarkdownState {
|
|
19
|
+
return { inCode: false, codeLanguage: '' };
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export function renderMarkdown(chunk: string, state: MarkdownState): string {
|
|
23
|
+
let out = chunk;
|
|
24
|
+
|
|
25
|
+
const fenceMatch = out.match(/```(\w*)/);
|
|
26
|
+
if (fenceMatch) {
|
|
27
|
+
state.inCode = !state.inCode;
|
|
28
|
+
state.codeLanguage = fenceMatch[1] || '';
|
|
29
|
+
out = out.replace(/```\w*/g, state.inCode ? `${c.yellow}━━━ ${state.codeLanguage || 'code'} ━━━${c.reset}` : `${c.yellow}━━━━━━━━━━━${c.reset}`);
|
|
30
|
+
return out;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (state.inCode) {
|
|
34
|
+
return `${c.dim}${out}${c.reset}`;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
if (out.startsWith('### ')) {
|
|
38
|
+
return `${c.bold}${c.cyan}${out.slice(4)}${c.reset}`;
|
|
39
|
+
}
|
|
40
|
+
if (out.startsWith('## ')) {
|
|
41
|
+
return `${c.bold}${c.magenta}${out.slice(3)}${c.reset}`;
|
|
42
|
+
}
|
|
43
|
+
if (out.startsWith('# ')) {
|
|
44
|
+
return `${c.bold}${c.cyan}▸ ${out.slice(2)}${c.reset}`;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if (out.startsWith('> ')) {
|
|
48
|
+
return `${c.italic}${c.gray}│ ${out.slice(2)}${c.reset}`;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (out.match(/^[\-\*] /)) {
|
|
52
|
+
out = out.replace(/^[\-\*] /, `${c.cyan}• ${c.reset}`);
|
|
53
|
+
}
|
|
54
|
+
if (out.match(/^\d+\. /)) {
|
|
55
|
+
out = out.replace(/^(\d+)\. /, `${c.cyan}$1.${c.reset} `);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
out = out
|
|
59
|
+
.replace(/\*\*([^*]+)\*\*/g, `${c.bold}$1${c.reset}`)
|
|
60
|
+
.replace(/\*([^*]+)\*/g, `${c.italic}$1${c.reset}`)
|
|
61
|
+
.replace(/`([^`]+)`/g, `${c.bgBlue} $1 ${c.reset}`);
|
|
62
|
+
|
|
63
|
+
return out;
|
|
64
|
+
}
|
package/src/api.test.ts
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
import { test, expect, describe } from 'bun:test';
|
|
2
|
-
import { MODELS, type Model } from './api';
|
|
3
|
-
|
|
4
|
-
describe('MODELS', () => {
|
|
5
|
-
test('includes all expected models', () => {
|
|
6
|
-
expect(MODELS).toContain('sonar');
|
|
7
|
-
expect(MODELS).toContain('sonar-pro');
|
|
8
|
-
expect(MODELS).toContain('sonar-reasoning');
|
|
9
|
-
expect(MODELS).toContain('sonar-reasoning-pro');
|
|
10
|
-
expect(MODELS).toContain('sonar-deep-research');
|
|
11
|
-
});
|
|
12
|
-
|
|
13
|
-
test('has exactly 5 models', () => {
|
|
14
|
-
expect(MODELS).toHaveLength(5);
|
|
15
|
-
});
|
|
16
|
-
|
|
17
|
-
test('Model type matches MODELS array', () => {
|
|
18
|
-
const model: Model = MODELS[0]!;
|
|
19
|
-
expect(MODELS.includes(model)).toBe(true);
|
|
20
|
-
});
|
|
21
|
-
});
|
package/src/files.test.ts
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
import { test, expect, describe } from 'bun:test';
|
|
2
|
-
import { encodeFile, toDataUrl, type FileAttachment } from './files';
|
|
3
|
-
import { writeFile, unlink } from 'node:fs/promises';
|
|
4
|
-
import { join } from 'node:path';
|
|
5
|
-
|
|
6
|
-
const TMP_DIR = '/tmp';
|
|
7
|
-
|
|
8
|
-
describe('encodeFile', () => {
|
|
9
|
-
test('encodes text file correctly', async () => {
|
|
10
|
-
const testPath = join(TMP_DIR, 'test.txt');
|
|
11
|
-
await writeFile(testPath, 'hello world');
|
|
12
|
-
|
|
13
|
-
const result = await encodeFile(testPath);
|
|
14
|
-
|
|
15
|
-
expect(result.type).toBe('file');
|
|
16
|
-
expect(result.mimeType).toBe('text/plain');
|
|
17
|
-
expect(result.filename).toBe('test.txt');
|
|
18
|
-
expect(result.data).toBe(Buffer.from('hello world').toString('base64'));
|
|
19
|
-
|
|
20
|
-
await unlink(testPath);
|
|
21
|
-
});
|
|
22
|
-
|
|
23
|
-
test('encodes PDF as file type', async () => {
|
|
24
|
-
const testPath = join(TMP_DIR, 'test.pdf');
|
|
25
|
-
await writeFile(testPath, '%PDF-1.4 test');
|
|
26
|
-
|
|
27
|
-
const result = await encodeFile(testPath);
|
|
28
|
-
|
|
29
|
-
expect(result.type).toBe('file');
|
|
30
|
-
expect(result.mimeType).toBe('application/pdf');
|
|
31
|
-
|
|
32
|
-
await unlink(testPath);
|
|
33
|
-
});
|
|
34
|
-
|
|
35
|
-
test('encodes PNG as image type', async () => {
|
|
36
|
-
const testPath = join(TMP_DIR, 'test.png');
|
|
37
|
-
const pngHeader = Buffer.from([0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A]);
|
|
38
|
-
await writeFile(testPath, pngHeader);
|
|
39
|
-
|
|
40
|
-
const result = await encodeFile(testPath);
|
|
41
|
-
|
|
42
|
-
expect(result.type).toBe('image');
|
|
43
|
-
expect(result.mimeType).toBe('image/png');
|
|
44
|
-
|
|
45
|
-
await unlink(testPath);
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
test('throws on unsupported file type', async () => {
|
|
49
|
-
const testPath = join(TMP_DIR, 'test.xyz');
|
|
50
|
-
await writeFile(testPath, 'test');
|
|
51
|
-
|
|
52
|
-
await expect(encodeFile(testPath)).rejects.toThrow('Unsupported file type: .xyz');
|
|
53
|
-
|
|
54
|
-
await unlink(testPath);
|
|
55
|
-
});
|
|
56
|
-
});
|
|
57
|
-
|
|
58
|
-
describe('toDataUrl', () => {
|
|
59
|
-
test('creates valid data URL', () => {
|
|
60
|
-
const attachment: FileAttachment = {
|
|
61
|
-
type: 'image',
|
|
62
|
-
data: 'aGVsbG8gd29ybGQ=',
|
|
63
|
-
mimeType: 'image/png',
|
|
64
|
-
filename: 'test.png',
|
|
65
|
-
};
|
|
66
|
-
|
|
67
|
-
const result = toDataUrl(attachment);
|
|
68
|
-
|
|
69
|
-
expect(result).toBe('data:image/png;base64,aGVsbG8gd29ybGQ=');
|
|
70
|
-
});
|
|
71
|
-
});
|
package/src/history.test.ts
DELETED
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
import { test, expect, beforeEach, afterAll } from 'bun:test';
|
|
2
|
-
import { appendHistory, readHistory, getLastEntry, clearHistory } from './history';
|
|
3
|
-
|
|
4
|
-
beforeEach(async () => {
|
|
5
|
-
await clearHistory();
|
|
6
|
-
});
|
|
7
|
-
|
|
8
|
-
afterAll(async () => {
|
|
9
|
-
await clearHistory();
|
|
10
|
-
});
|
|
11
|
-
|
|
12
|
-
test('appendHistory creates entry', async () => {
|
|
13
|
-
await appendHistory({ q: 'test query', m: 'sonar', a: 'test answer' });
|
|
14
|
-
const entries = await readHistory();
|
|
15
|
-
expect(entries.length).toBe(1);
|
|
16
|
-
expect(entries[0]!.q).toBe('test query');
|
|
17
|
-
expect(entries[0]!.m).toBe('sonar');
|
|
18
|
-
expect(entries[0]!.a).toBe('test answer');
|
|
19
|
-
expect(entries[0]!.ts).toBeGreaterThan(0);
|
|
20
|
-
});
|
|
21
|
-
|
|
22
|
-
test('readHistory returns entries in reverse order', async () => {
|
|
23
|
-
await appendHistory({ q: 'first', m: 'sonar', a: 'a1' });
|
|
24
|
-
await appendHistory({ q: 'second', m: 'sonar-pro', a: 'a2' });
|
|
25
|
-
await appendHistory({ q: 'third', m: 'sonar', a: 'a3' });
|
|
26
|
-
|
|
27
|
-
const entries = await readHistory();
|
|
28
|
-
expect(entries.length).toBe(3);
|
|
29
|
-
expect(entries[0]!.q).toBe('third');
|
|
30
|
-
expect(entries[1]!.q).toBe('second');
|
|
31
|
-
expect(entries[2]!.q).toBe('first');
|
|
32
|
-
});
|
|
33
|
-
|
|
34
|
-
test('readHistory respects limit', async () => {
|
|
35
|
-
await appendHistory({ q: 'one', m: 'sonar', a: 'a' });
|
|
36
|
-
await appendHistory({ q: 'two', m: 'sonar', a: 'a' });
|
|
37
|
-
await appendHistory({ q: 'three', m: 'sonar', a: 'a' });
|
|
38
|
-
|
|
39
|
-
const entries = await readHistory(2);
|
|
40
|
-
expect(entries.length).toBe(2);
|
|
41
|
-
expect(entries[0]!.q).toBe('three');
|
|
42
|
-
expect(entries[1]!.q).toBe('two');
|
|
43
|
-
});
|
|
44
|
-
|
|
45
|
-
test('getLastEntry returns most recent', async () => {
|
|
46
|
-
await appendHistory({ q: 'old', m: 'sonar', a: 'old answer' });
|
|
47
|
-
await appendHistory({ q: 'new', m: 'sonar-pro', a: 'new answer' });
|
|
48
|
-
|
|
49
|
-
const last = await getLastEntry();
|
|
50
|
-
expect(last?.q).toBe('new');
|
|
51
|
-
expect(last?.m).toBe('sonar-pro');
|
|
52
|
-
});
|
|
53
|
-
|
|
54
|
-
test('getLastEntry returns null when empty', async () => {
|
|
55
|
-
const last = await getLastEntry();
|
|
56
|
-
expect(last).toBeNull();
|
|
57
|
-
});
|
|
58
|
-
|
|
59
|
-
test('clearHistory removes all entries', async () => {
|
|
60
|
-
await appendHistory({ q: 'test', m: 'sonar', a: 'answer' });
|
|
61
|
-
await clearHistory();
|
|
62
|
-
const entries = await readHistory();
|
|
63
|
-
expect(entries.length).toBe(0);
|
|
64
|
-
});
|
|
65
|
-
|
|
66
|
-
test('appendHistory stores citations', async () => {
|
|
67
|
-
await appendHistory({
|
|
68
|
-
q: 'query',
|
|
69
|
-
m: 'sonar',
|
|
70
|
-
a: 'answer',
|
|
71
|
-
citations: ['https://example.com', 'https://test.com']
|
|
72
|
-
});
|
|
73
|
-
|
|
74
|
-
const entries = await readHistory();
|
|
75
|
-
expect(entries[0]!.citations).toEqual(['https://example.com', 'https://test.com']);
|
|
76
|
-
});
|
|
77
|
-
|
|
78
|
-
test('appendHistory truncates long answers', async () => {
|
|
79
|
-
const longAnswer = 'x'.repeat(3000);
|
|
80
|
-
await appendHistory({ q: 'query', m: 'sonar', a: longAnswer });
|
|
81
|
-
|
|
82
|
-
const entries = await readHistory();
|
|
83
|
-
expect(entries[0]!.a.length).toBe(2000);
|
|
84
|
-
});
|
package/src/output.test.ts
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
import { test, expect, describe } from 'bun:test';
|
|
2
|
-
import { fmt } from './output';
|
|
3
|
-
|
|
4
|
-
describe('fmt', () => {
|
|
5
|
-
test('model formats with cyan color', () => {
|
|
6
|
-
const result = fmt.model('sonar');
|
|
7
|
-
expect(result).toContain('[sonar]');
|
|
8
|
-
expect(result).toContain('\x1b[36m');
|
|
9
|
-
expect(result).toContain('\x1b[0m');
|
|
10
|
-
});
|
|
11
|
-
|
|
12
|
-
test('searching shows dim text', () => {
|
|
13
|
-
const result = fmt.searching();
|
|
14
|
-
expect(result).toContain('Searching...');
|
|
15
|
-
expect(result).toContain('\x1b[2m');
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
test('error formats with red color', () => {
|
|
19
|
-
const result = fmt.error('test error');
|
|
20
|
-
expect(result).toContain('Error: test error');
|
|
21
|
-
expect(result).toContain('\x1b[31m');
|
|
22
|
-
});
|
|
23
|
-
|
|
24
|
-
test('citation formats with number and URL', () => {
|
|
25
|
-
const result = fmt.citation(1, 'https://example.com');
|
|
26
|
-
expect(result).toContain('1.');
|
|
27
|
-
expect(result).toContain('https://example.com');
|
|
28
|
-
});
|
|
29
|
-
|
|
30
|
-
test('stats formats tokens and time', () => {
|
|
31
|
-
const result = fmt.stats(100, 1500);
|
|
32
|
-
expect(result).toContain('100 tokens');
|
|
33
|
-
expect(result).toContain('1.5s');
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
test('sources shows yellow header', () => {
|
|
37
|
-
const result = fmt.sources();
|
|
38
|
-
expect(result).toContain('Sources:');
|
|
39
|
-
expect(result).toContain('\x1b[33m');
|
|
40
|
-
});
|
|
41
|
-
});
|