pplx-zero 2.0.0 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,46 +1,112 @@
1
- # pplx-zero
1
+ <p align="center">
2
+ <img src="https://raw.githubusercontent.com/codewithkenzo/pplx-zero/main/logo.png" alt="pplx-zero" width="140" />
3
+ </p>
2
4
 
3
- Minimal Perplexity AI CLI - search from terminal.
5
+ <h1 align="center">pplx</h1>
4
6
 
5
- ## Installation
7
+ <p align="center">
8
+ <strong>AI search from your terminal. Zero bloat.</strong>
9
+ </p>
10
+
11
+ <p align="center">
12
+ <a href="https://www.npmjs.com/package/pplx-zero"><img src="https://img.shields.io/npm/v/pplx-zero.svg?color=00d4ff" alt="npm"></a>
13
+ <a href="https://aur.archlinux.org/packages/pplx-zero"><img src="https://img.shields.io/aur/version/pplx-zero?color=00d4ff" alt="AUR"></a>
14
+ <img src="https://img.shields.io/badge/bun-runtime-f9f1e1" alt="Bun">
15
+ <img src="https://img.shields.io/badge/license-MIT-blue" alt="License">
16
+ </p>
17
+
18
+ ---
6
19
 
7
20
  ```bash
8
- bun install -g pplx-zero
21
+ pplx "what is bun"
9
22
  ```
10
23
 
11
- Or with npm:
24
+ Query [Perplexity AI](https://perplexity.ai) directly from your terminal. Responses stream in real-time with beautiful markdown formatting.
25
+
26
+ ## Features
27
+
28
+ - **⚡ Streaming** — Answers appear as they're generated
29
+ - **💬 Conversations** — Continue with `-c` for multi-turn
30
+ - **📄 Documents** — Analyze PDFs, code, text files
31
+ - **🖼️ Images** — Describe screenshots and diagrams
32
+ - **📝 Export** — Save research to markdown
33
+ - **🎨 Pretty** — Rendered markdown by default
34
+ - **🕐 History** — Browse and search past queries
35
+
36
+ ## Install
12
37
 
13
38
  ```bash
14
- npm install -g pplx-zero
39
+ bun install -g pplx-zero # recommended
40
+ npm install -g pplx-zero # requires bun
41
+ yay -S pplx-zero # arch linux
15
42
  ```
16
43
 
17
44
  ## Setup
18
45
 
19
46
  ```bash
20
- export PERPLEXITY_API_KEY="your-api-key"
47
+ export PERPLEXITY_API_KEY="pplx-..."
21
48
  ```
22
49
 
50
+ Get your key at [perplexity.ai/settings/api](https://www.perplexity.ai/settings/api)
51
+
23
52
  ## Usage
24
53
 
25
54
  ```bash
26
- pplx "what is bun"
27
- pplx -m sonar-pro "explain quantum computing"
28
- pplx -m sonar-deep-research "comprehensive analysis of AI trends"
29
- pplx -f report.pdf "summarize this document"
30
- pplx -i screenshot.png "what's in this image"
31
- pplx --json "get structured response"
55
+ # search
56
+ pplx "best typescript patterns 2025"
57
+
58
+ # models
59
+ pplx -m sonar-pro "explain transformers"
60
+ pplx -m sonar-deep-research "AI regulation analysis"
61
+
62
+ # conversation
63
+ pplx "what is rust"
64
+ pplx -c "compare to go"
65
+
66
+ # files
67
+ pplx -f paper.pdf "summarize"
68
+ pplx -i diagram.png "explain this"
69
+
70
+ # export
71
+ pplx "topic" -o research.md
72
+
73
+ # pretty markdown is default
74
+ pplx "explain monads"
75
+
76
+ # raw output (no formatting)
77
+ pplx --raw "explain monads"
78
+
79
+ # history
80
+ pplx --history
32
81
  ```
33
82
 
83
+ ## Models
84
+
85
+ | Model | Use |
86
+ |-------|-----|
87
+ | `sonar` | Quick answers |
88
+ | `sonar-pro` | Complex questions |
89
+ | `sonar-reasoning` | Step-by-step |
90
+ | `sonar-reasoning-pro` | Advanced reasoning |
91
+ | `sonar-deep-research` | Research reports |
92
+
34
93
  ## Options
35
94
 
36
95
  | Flag | Description |
37
96
  |------|-------------|
38
- | `-m, --model` | Model: sonar, sonar-pro, sonar-reasoning, sonar-reasoning-pro, sonar-deep-research |
39
- | `-f, --file` | Attach a file (PDF, TXT, etc.) |
40
- | `-i, --image` | Attach an image (PNG, JPG, etc.) |
41
- | `--json` | Output as JSON |
42
- | `-h, --help` | Show help |
97
+ | `-m` | Model selection |
98
+ | `-f` | Attach file |
99
+ | `-i` | Attach image |
100
+ | `-o` | Output to file |
101
+ | `-c` | Continue conversation |
102
+ | `--raw` | Raw output (no markdown) |
103
+ | `--history` | View history |
104
+ | `--json` | JSON output |
105
+
106
+ ## Philosophy
107
+
108
+ ~400 lines. 1 dependency. No frameworks.
43
109
 
44
- ## License
110
+ ---
45
111
 
46
- MIT
112
+ <p align="center">MIT © <a href="https://github.com/codewithkenzo">kenzo</a></p>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pplx-zero",
3
- "version": "2.0.0",
3
+ "version": "2.2.0",
4
4
  "description": "Minimal Perplexity AI CLI - search from terminal",
5
5
  "author": "kenzo",
6
6
  "license": "MIT",
@@ -0,0 +1,84 @@
1
+ import { test, expect, beforeEach, afterAll } from 'bun:test';
2
+ import { appendHistory, readHistory, getLastEntry, clearHistory } from './history';
3
+
4
+ beforeEach(async () => {
5
+ await clearHistory();
6
+ });
7
+
8
+ afterAll(async () => {
9
+ await clearHistory();
10
+ });
11
+
12
+ test('appendHistory creates entry', async () => {
13
+ await appendHistory({ q: 'test query', m: 'sonar', a: 'test answer' });
14
+ const entries = await readHistory();
15
+ expect(entries.length).toBe(1);
16
+ expect(entries[0]!.q).toBe('test query');
17
+ expect(entries[0]!.m).toBe('sonar');
18
+ expect(entries[0]!.a).toBe('test answer');
19
+ expect(entries[0]!.ts).toBeGreaterThan(0);
20
+ });
21
+
22
+ test('readHistory returns entries in reverse order', async () => {
23
+ await appendHistory({ q: 'first', m: 'sonar', a: 'a1' });
24
+ await appendHistory({ q: 'second', m: 'sonar-pro', a: 'a2' });
25
+ await appendHistory({ q: 'third', m: 'sonar', a: 'a3' });
26
+
27
+ const entries = await readHistory();
28
+ expect(entries.length).toBe(3);
29
+ expect(entries[0]!.q).toBe('third');
30
+ expect(entries[1]!.q).toBe('second');
31
+ expect(entries[2]!.q).toBe('first');
32
+ });
33
+
34
+ test('readHistory respects limit', async () => {
35
+ await appendHistory({ q: 'one', m: 'sonar', a: 'a' });
36
+ await appendHistory({ q: 'two', m: 'sonar', a: 'a' });
37
+ await appendHistory({ q: 'three', m: 'sonar', a: 'a' });
38
+
39
+ const entries = await readHistory(2);
40
+ expect(entries.length).toBe(2);
41
+ expect(entries[0]!.q).toBe('three');
42
+ expect(entries[1]!.q).toBe('two');
43
+ });
44
+
45
+ test('getLastEntry returns most recent', async () => {
46
+ await appendHistory({ q: 'old', m: 'sonar', a: 'old answer' });
47
+ await appendHistory({ q: 'new', m: 'sonar-pro', a: 'new answer' });
48
+
49
+ const last = await getLastEntry();
50
+ expect(last?.q).toBe('new');
51
+ expect(last?.m).toBe('sonar-pro');
52
+ });
53
+
54
+ test('getLastEntry returns null when empty', async () => {
55
+ const last = await getLastEntry();
56
+ expect(last).toBeNull();
57
+ });
58
+
59
+ test('clearHistory removes all entries', async () => {
60
+ await appendHistory({ q: 'test', m: 'sonar', a: 'answer' });
61
+ await clearHistory();
62
+ const entries = await readHistory();
63
+ expect(entries.length).toBe(0);
64
+ });
65
+
66
+ test('appendHistory stores citations', async () => {
67
+ await appendHistory({
68
+ q: 'query',
69
+ m: 'sonar',
70
+ a: 'answer',
71
+ citations: ['https://example.com', 'https://test.com']
72
+ });
73
+
74
+ const entries = await readHistory();
75
+ expect(entries[0]!.citations).toEqual(['https://example.com', 'https://test.com']);
76
+ });
77
+
78
+ test('appendHistory truncates long answers', async () => {
79
+ const longAnswer = 'x'.repeat(3000);
80
+ await appendHistory({ q: 'query', m: 'sonar', a: longAnswer });
81
+
82
+ const entries = await readHistory();
83
+ expect(entries[0]!.a.length).toBe(2000);
84
+ });
package/src/history.ts ADDED
@@ -0,0 +1,86 @@
1
+ import type { Model } from './api';
2
+
3
+ export interface HistoryEntry {
4
+ ts: number;
5
+ q: string;
6
+ m: Model;
7
+ a: string;
8
+ citations?: string[];
9
+ }
10
+
11
+ const HISTORY_DIR = `${process.env.HOME}/.pplx`;
12
+ const HISTORY_PATH = `${HISTORY_DIR}/history.jsonl`;
13
+ const MAX_ENTRIES = 1000;
14
+ const MAX_ANSWER_LENGTH = 2000;
15
+
16
+ async function ensureDir(): Promise<void> {
17
+ const dir = Bun.file(HISTORY_DIR);
18
+ if (!(await dir.exists())) {
19
+ await Bun.$`mkdir -p ${HISTORY_DIR}`;
20
+ }
21
+ }
22
+
23
+ export async function appendHistory(entry: Omit<HistoryEntry, 'ts'>): Promise<void> {
24
+ await ensureDir();
25
+
26
+ const file = Bun.file(HISTORY_PATH);
27
+ const exists = await file.exists();
28
+
29
+ if (exists) {
30
+ const text = await file.text();
31
+ const lines = text.trim().split('\n').filter(l => l.length > 0);
32
+ if (lines.length >= MAX_ENTRIES) {
33
+ const keep = lines.slice(-MAX_ENTRIES + 1).join('\n') + '\n';
34
+ await Bun.write(HISTORY_PATH, keep);
35
+ }
36
+ }
37
+
38
+ const record: HistoryEntry = {
39
+ ts: Date.now(),
40
+ q: entry.q,
41
+ m: entry.m,
42
+ a: entry.a.slice(0, MAX_ANSWER_LENGTH),
43
+ ...(entry.citations?.length ? { citations: entry.citations } : {}),
44
+ };
45
+
46
+ const line = JSON.stringify(record) + '\n';
47
+
48
+ if (exists) {
49
+ const current = await Bun.file(HISTORY_PATH).text();
50
+ await Bun.write(HISTORY_PATH, current + line);
51
+ } else {
52
+ await Bun.write(HISTORY_PATH, line);
53
+ }
54
+ }
55
+
56
+ export async function readHistory(limit = 20): Promise<HistoryEntry[]> {
57
+ const file = Bun.file(HISTORY_PATH);
58
+ if (!(await file.exists())) return [];
59
+
60
+ const text = await file.text();
61
+ const lines = text.trim().split('\n').filter(l => l.length > 0);
62
+
63
+ return lines
64
+ .map(line => {
65
+ try {
66
+ return JSON.parse(line) as HistoryEntry;
67
+ } catch {
68
+ return null;
69
+ }
70
+ })
71
+ .filter((e): e is HistoryEntry => e !== null)
72
+ .reverse()
73
+ .slice(0, limit);
74
+ }
75
+
76
+ export async function getLastEntry(): Promise<HistoryEntry | null> {
77
+ const entries = await readHistory(1);
78
+ return entries[0] ?? null;
79
+ }
80
+
81
+ export async function clearHistory(): Promise<void> {
82
+ const file = Bun.file(HISTORY_PATH);
83
+ if (await file.exists()) {
84
+ await Bun.write(HISTORY_PATH, '');
85
+ }
86
+ }
package/src/index.ts CHANGED
@@ -4,10 +4,11 @@ import { search, MODELS, type Model } from './api';
4
4
  import { encodeFile } from './files';
5
5
  import { getEnv } from './env';
6
6
  import { fmt, write, writeLn } from './output';
7
+ import { appendHistory, readHistory, getLastEntry } from './history';
8
+ import { renderMarkdown, createMarkdownState } from './markdown';
7
9
 
8
10
  getEnv();
9
11
 
10
-
11
12
  const { values, positionals } = parseArgs({
12
13
  args: Bun.argv.slice(2),
13
14
  options: {
@@ -16,12 +17,17 @@ const { values, positionals } = parseArgs({
16
17
  image: { type: 'string', short: 'i' },
17
18
  json: { type: 'boolean', default: false },
18
19
  help: { type: 'boolean', short: 'h' },
20
+ history: { type: 'boolean', default: false },
21
+ 'no-history': { type: 'boolean', default: false },
22
+ continue: { type: 'boolean', short: 'c', default: false },
23
+ output: { type: 'string', short: 'o' },
24
+ raw: { type: 'boolean', default: false },
19
25
  },
20
26
  allowPositionals: true,
21
27
  strict: true,
22
28
  });
23
29
 
24
- if (values.help || positionals.length === 0) {
30
+ if (values.help) {
25
31
  console.log(`
26
32
  pplx - Perplexity AI search from terminal
27
33
 
@@ -31,6 +37,11 @@ Options:
31
37
  -m, --model <name> Model: ${MODELS.join(', ')} (default: sonar)
32
38
  -f, --file <path> Attach a file (PDF, TXT, etc.)
33
39
  -i, --image <path> Attach an image (PNG, JPG, etc.)
40
+ -o, --output <path> Save output to file (.md, .txt)
41
+ -c, --continue Continue from last query (add context)
42
+ --history Show query history
43
+ --no-history Don't save this query to history
44
+ --raw Raw output (no markdown rendering)
34
45
  --json Output as JSON
35
46
  -h, --help Show this help
36
47
 
@@ -38,18 +49,52 @@ Examples:
38
49
  pplx "what is bun"
39
50
  pplx -m sonar-pro "explain quantum computing"
40
51
  pplx -f report.pdf "summarize this document"
52
+ pplx -c "tell me more about that"
53
+ pplx --history | grep "bun"
41
54
  `);
42
55
  process.exit(0);
43
56
  }
44
57
 
45
- const query = positionals.join(' ');
58
+ if (values.history) {
59
+ const entries = await readHistory(20);
60
+ if (entries.length === 0) {
61
+ console.log('No history yet.');
62
+ } else {
63
+ for (const entry of entries) {
64
+ console.log(fmt.historyEntry(entry.ts, entry.m, entry.q));
65
+ }
66
+ }
67
+ process.exit(0);
68
+ }
69
+
70
+ if (positionals.length === 0 && !values.continue) {
71
+ console.error(fmt.error('No query provided. Use -h for help.'));
72
+ process.exit(2);
73
+ }
74
+
75
+ let query = positionals.join(' ');
46
76
  const model = (MODELS.includes(values.model as Model) ? values.model : 'sonar') as Model;
47
77
 
78
+ if (values.continue) {
79
+ const last = await getLastEntry();
80
+ if (last) {
81
+ const context = `Previous question: "${last.q}"\nPrevious answer: "${last.a.slice(0, 500)}..."\n\nFollow-up question: ${query || 'Continue and elaborate on the previous answer.'}`;
82
+ query = context;
83
+ if (!values.json) {
84
+ await write(fmt.continuing(last.q));
85
+ }
86
+ } else if (!query) {
87
+ console.error(fmt.error('No previous query to continue from.'));
88
+ process.exit(2);
89
+ }
90
+ }
91
+
48
92
  const filePath = values.file || values.image;
49
93
  const file = filePath ? await encodeFile(filePath) : undefined;
50
94
 
51
95
  const startTime = Date.now();
52
96
  let fullContent = '';
97
+ const mdState = createMarkdownState();
53
98
 
54
99
  if (!values.json) {
55
100
  await write(fmt.model(model) + ' ');
@@ -60,16 +105,18 @@ await search(query, model, {
60
105
  onContent: async (text) => {
61
106
  fullContent += text;
62
107
  if (!values.json) {
63
- await write(text);
108
+ const out = values.raw ? text : renderMarkdown(text, mdState);
109
+ await write(out);
64
110
  }
65
111
  },
66
112
  onDone: async (citations, usage) => {
67
113
  const elapsed = Date.now() - startTime;
114
+ const citationUrls = citations.map((c) => c.url);
68
115
 
69
116
  if (values.json) {
70
117
  const output = {
71
118
  answer: fullContent,
72
- citations: citations.map((c) => c.url),
119
+ citations: citationUrls,
73
120
  model,
74
121
  tokens: usage.prompt_tokens + usage.completion_tokens,
75
122
  latency_ms: elapsed,
@@ -84,6 +131,46 @@ await search(query, model, {
84
131
  }
85
132
  await write(fmt.stats(usage.prompt_tokens + usage.completion_tokens, elapsed));
86
133
  }
134
+
135
+ if (values.output) {
136
+ const ext = values.output.split('.').pop()?.toLowerCase();
137
+ let content = '';
138
+
139
+ if (ext === 'md') {
140
+ content = `# ${positionals.join(' ') || 'Query'}\n\n`;
141
+ content += `**Model:** ${model}\n`;
142
+ content += `**Date:** ${new Date().toISOString()}\n\n`;
143
+ content += `## Answer\n\n${fullContent}\n\n`;
144
+ if (citationUrls.length > 0) {
145
+ content += `## Sources\n\n`;
146
+ citationUrls.forEach((url, i) => {
147
+ content += `${i + 1}. ${url}\n`;
148
+ });
149
+ }
150
+ } else {
151
+ content = fullContent;
152
+ if (citationUrls.length > 0) {
153
+ content += '\n\nSources:\n';
154
+ citationUrls.forEach((url, i) => {
155
+ content += `${i + 1}. ${url}\n`;
156
+ });
157
+ }
158
+ }
159
+
160
+ await Bun.write(values.output, content);
161
+ if (!values.json) {
162
+ await writeLn(`\n${fmt.model('saved')} ${values.output}`);
163
+ }
164
+ }
165
+
166
+ if (!values['no-history'] && !values.json) {
167
+ await appendHistory({
168
+ q: positionals.join(' ') || '(continued)',
169
+ m: model,
170
+ a: fullContent,
171
+ citations: citationUrls,
172
+ });
173
+ }
87
174
  },
88
175
  onError: async (error) => {
89
176
  if (values.json) {
@@ -0,0 +1,64 @@
1
+ const c = {
2
+ reset: '\x1b[0m',
3
+ bold: '\x1b[1m',
4
+ dim: '\x1b[2m',
5
+ italic: '\x1b[3m',
6
+ cyan: '\x1b[36m',
7
+ yellow: '\x1b[33m',
8
+ magenta: '\x1b[35m',
9
+ gray: '\x1b[90m',
10
+ bgBlue: '\x1b[44m',
11
+ } as const;
12
+
13
+ export interface MarkdownState {
14
+ inCode: boolean;
15
+ codeLanguage: string;
16
+ }
17
+
18
+ export function createMarkdownState(): MarkdownState {
19
+ return { inCode: false, codeLanguage: '' };
20
+ }
21
+
22
+ export function renderMarkdown(chunk: string, state: MarkdownState): string {
23
+ let out = chunk;
24
+
25
+ const fenceMatch = out.match(/```(\w*)/);
26
+ if (fenceMatch) {
27
+ state.inCode = !state.inCode;
28
+ state.codeLanguage = fenceMatch[1] || '';
29
+ out = out.replace(/```\w*/g, state.inCode ? `${c.yellow}━━━ ${state.codeLanguage || 'code'} ━━━${c.reset}` : `${c.yellow}━━━━━━━━━━━${c.reset}`);
30
+ return out;
31
+ }
32
+
33
+ if (state.inCode) {
34
+ return `${c.dim}${out}${c.reset}`;
35
+ }
36
+
37
+ if (out.startsWith('### ')) {
38
+ return `${c.bold}${c.cyan}${out.slice(4)}${c.reset}`;
39
+ }
40
+ if (out.startsWith('## ')) {
41
+ return `${c.bold}${c.magenta}${out.slice(3)}${c.reset}`;
42
+ }
43
+ if (out.startsWith('# ')) {
44
+ return `${c.bold}${c.cyan}▸ ${out.slice(2)}${c.reset}`;
45
+ }
46
+
47
+ if (out.startsWith('> ')) {
48
+ return `${c.italic}${c.gray}│ ${out.slice(2)}${c.reset}`;
49
+ }
50
+
51
+ if (out.match(/^[\-\*] /)) {
52
+ out = out.replace(/^[\-\*] /, `${c.cyan}• ${c.reset}`);
53
+ }
54
+ if (out.match(/^\d+\. /)) {
55
+ out = out.replace(/^(\d+)\. /, `${c.cyan}$1.${c.reset} `);
56
+ }
57
+
58
+ out = out
59
+ .replace(/\*\*([^*]+)\*\*/g, `${c.bold}$1${c.reset}`)
60
+ .replace(/\*([^*]+)\*/g, `${c.italic}$1${c.reset}`)
61
+ .replace(/`([^`]+)`/g, `${c.bgBlue} $1 ${c.reset}`);
62
+
63
+ return out;
64
+ }
package/src/output.ts CHANGED
@@ -17,6 +17,13 @@ export const fmt = {
17
17
  stats: (tokens: number, ms: number) =>
18
18
  `\n${c.gray}[${tokens} tokens, ${(ms / 1000).toFixed(1)}s]${c.reset}\n`,
19
19
  sources: () => `\n${c.yellow}Sources:${c.reset}`,
20
+ historyEntry: (ts: number, model: string, query: string) => {
21
+ const date = new Date(ts).toLocaleString('en-US', {
22
+ month: 'short', day: 'numeric', hour: '2-digit', minute: '2-digit'
23
+ });
24
+ return `${c.dim}${date}${c.reset} ${c.cyan}[${model}]${c.reset} ${query}`;
25
+ },
26
+ continuing: (query: string) => `${c.dim}Continuing from:${c.reset} ${query.slice(0, 50)}${query.length > 50 ? '...' : ''}\n`,
20
27
  };
21
28
 
22
29
  export async function write(text: string): Promise<void> {