@voidwire/llm-summarize 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (5) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +89 -0
  3. package/cli.ts +187 -0
  4. package/index.ts +399 -0
  5. package/package.json +46 -0
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Rudy Ruiz
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,89 @@
1
+ # llm-summarize
2
+
3
+ Fast LLM-powered text summarization for observability and logging.
4
+
5
+ ## Philosophy
6
+
7
+ - **Config-driven** - No hardcoded defaults, specify exact provider/model
8
+ - **Prismis pattern** - Secrets in .env, references in config.toml via `env:VAR_NAME`
9
+ - **Fast and cheap** - Designed for high-volume summarization (haiku/gpt-4.1-mini)
10
+ - **Composable** - JSON output pipes to jq and other tools
11
+
12
+ ## Installation
13
+
14
+ ```bash
15
+ cd llmcli-tools
16
+ ./install.sh llm-summarize
17
+ ```
18
+
19
+ ## Configuration
20
+
21
+ ### Config file: `~/.config/llm/config.toml`
22
+
23
+ ```toml
24
+ [llm]
25
+ provider = "openai"
26
+ model = "gpt-4.1-mini"
27
+ api_key = "env:OPENAI_API_KEY"
28
+ max_tokens = 50
29
+ ```
30
+
31
+ ### Secrets file: `~/.config/llm/.env`
32
+
33
+ ```bash
34
+ OPENAI_API_KEY=sk-...
35
+ ANTHROPIC_API_KEY=sk-ant-...
36
+ ```
37
+
38
+ ## Usage
39
+
40
+ ```bash
41
+ llm-summarize <text>
42
+ llm-summarize --stdin
43
+ echo "text" | llm-summarize --stdin
44
+ ```
45
+
46
+ ## Options
47
+
48
+ | Flag | Description |
49
+ |------|-------------|
50
+ | `--model <name>` | Override model from config |
51
+ | `--max-tokens <n>` | Max output tokens |
52
+ | `--stdin` | Read text from stdin |
53
+ | `-h, --help` | Show help |
54
+
55
+ ## Output
56
+
57
+ ```json
58
+ {
59
+ "summary": "User saved form data to PostgreSQL.",
60
+ "model": "gpt-4.1-mini",
61
+ "tokens_used": 12
62
+ }
63
+ ```
64
+
65
+ ## Supported Providers
66
+
67
+ | Provider | Models | API Key |
68
+ |----------|--------|---------|
69
+ | `anthropic` | claude-3-5-haiku-latest, claude-sonnet-4-20250514 | Required |
70
+ | `openai` | gpt-4.1-mini, gpt-4o | Required |
71
+ | `ollama` | llama3, mistral, gemma3, etc. | Not needed |
72
+
73
+ ### Ollama Configuration
74
+
75
+ ```toml
76
+ [llm]
77
+ provider = "ollama"
78
+ model = "llama3"
79
+ api_base = "http://localhost:11434/api/generate" # optional, this is default
80
+ max_tokens = 50
81
+ ```
82
+
83
+ ## Exit Codes
84
+
85
+ | Code | Meaning |
86
+ |------|---------|
87
+ | 0 | Success |
88
+ | 1 | API error (rate limit, auth, network) |
89
+ | 2 | Client error (missing args, invalid config) |
package/cli.ts ADDED
@@ -0,0 +1,187 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * llm-summarize CLI
4
+ *
5
+ * Philosophy:
6
+ * - Fast summaries for observability and logging
7
+ * - Multi-provider support (Anthropic, OpenAI, Ollama)
8
+ * - Deterministic JSON output for tooling integration
9
+ * - Config-driven - no hardcoded defaults
10
+ *
11
+ * Usage:
12
+ * llm-summarize <text>
13
+ * llm-summarize --stdin
14
+ * echo "text" | llm-summarize --stdin
15
+ *
16
+ * Config: ~/.config/llm/config.toml
17
+ * [llm]
18
+ * provider = "anthropic"
19
+ * model = "claude-3-5-haiku-latest"
20
+ * api_key = "env:ANTHROPIC_API_KEY"
21
+ * max_tokens = 50
22
+ *
23
+ * Secrets: ~/.config/llm/.env
24
+ * ANTHROPIC_API_KEY=sk-ant-...
25
+ *
26
+ * Exit codes:
27
+ * 0 - Success
28
+ * 1 - API error (rate limit, auth, network)
29
+ * 2 - Client error (missing args, invalid config)
30
+ */
31
+
32
+ import { summarize, loadConfig, type SummarizeOptions } from "./index";
33
+
34
+ /**
35
+ * Read text from stdin
36
+ */
37
+ async function readStdin(): Promise<string> {
38
+ const chunks: Buffer[] = [];
39
+
40
+ for await (const chunk of Bun.stdin.stream()) {
41
+ chunks.push(Buffer.from(chunk));
42
+ }
43
+
44
+ return Buffer.concat(chunks).toString("utf-8").trim();
45
+ }
46
+
47
+ /**
48
+ * Print usage
49
+ */
50
+ function printUsage(): void {
51
+ console.error(`
52
+ llm-summarize - Summarize text using LLM APIs
53
+
54
+ Philosophy:
55
+ Fast, cheap summaries for observability events.
56
+ Config-driven - specify exact provider/model.
57
+ JSON output for tooling integration.
58
+
59
+ Usage: llm-summarize [options] <text>
60
+ llm-summarize --stdin
61
+
62
+ Options:
63
+ --model <name> Override model from config
64
+ --max-tokens <n> Max output tokens (default: from config or 50)
65
+ --stdin Read text from stdin
66
+ -h, --help Show this help
67
+
68
+ Config file: ~/.config/llm/config.toml
69
+ [llm]
70
+ provider = "anthropic"
71
+ model = "claude-3-5-haiku-latest"
72
+ api_key = "env:ANTHROPIC_API_KEY"
73
+ max_tokens = 50
74
+
75
+ Secrets file: ~/.config/llm/.env
76
+ ANTHROPIC_API_KEY=sk-ant-...
77
+ OPENAI_API_KEY=sk-...
78
+
79
+ Environment overrides:
80
+ LLM_PROVIDER Override provider
81
+ LLM_MODEL Override model
82
+ LLM_API_KEY Override API key
83
+
84
+ Supported providers:
85
+ anthropic - Claude models (claude-3-5-haiku-latest, claude-sonnet-4-20250514)
86
+ openai - GPT models (gpt-4.1-mini, gpt-4o)
87
+ ollama - Local models (llama3, mistral, gemma3, etc.) - no API key needed
88
+
89
+ Examples:
90
+ # Simple summarization
91
+ llm-summarize "User requested fix for post-password-reset login failure"
92
+
93
+ # With options
94
+ llm-summarize --max-tokens 30 "Long event description..."
95
+
96
+ # From stdin (for piping)
97
+ echo "Tool: Edit, File: auth.ts, Result: added JWT validation" | llm-summarize --stdin
98
+
99
+ # Pipe from another tool
100
+ cat event.json | jq -r '.description' | llm-summarize --stdin
101
+ `);
102
+ }
103
+
104
+ interface ParsedArgs {
105
+ text: string;
106
+ options: SummarizeOptions;
107
+ }
108
+
109
+ /**
110
+ * Parse command-line arguments
111
+ */
112
+ async function parseArgs(argv: string[]): Promise<ParsedArgs | null> {
113
+ const args = argv.slice(2);
114
+
115
+ // Handle help
116
+ if (args.length === 0 || args.includes("--help") || args.includes("-h")) {
117
+ return null;
118
+ }
119
+
120
+ let modelOverride: string | undefined;
121
+ let maxTokensOverride: number | undefined;
122
+ let useStdin = false;
123
+ let text = "";
124
+
125
+ for (let i = 0; i < args.length; i++) {
126
+ const arg = args[i];
127
+
128
+ if (arg === "--model" && i + 1 < args.length) {
129
+ modelOverride = args[++i];
130
+ } else if (arg === "--max-tokens" && i + 1 < args.length) {
131
+ maxTokensOverride = parseInt(args[++i], 10);
132
+ } else if (arg === "--stdin") {
133
+ useStdin = true;
134
+ } else if (!arg.startsWith("-")) {
135
+ text = arg;
136
+ }
137
+ }
138
+
139
+ // Get text from stdin or argument
140
+ if (useStdin) {
141
+ text = await readStdin();
142
+ }
143
+
144
+ return {
145
+ text,
146
+ options: {
147
+ model: modelOverride,
148
+ maxTokens: maxTokensOverride,
149
+ },
150
+ };
151
+ }
152
+
153
+ /**
154
+ * Main entry point
155
+ */
156
+ async function main(): Promise<void> {
157
+ const parsed = await parseArgs(process.argv);
158
+
159
+ if (!parsed) {
160
+ printUsage();
161
+ process.exit(0);
162
+ }
163
+
164
+ if (!parsed.text) {
165
+ console.log(JSON.stringify({ error: "No text provided" }, null, 2));
166
+ console.error("Error: No text to summarize");
167
+ process.exit(2);
168
+ }
169
+
170
+ // Load config and summarize
171
+ const config = loadConfig();
172
+ const result = await summarize(parsed.text, config, parsed.options);
173
+
174
+ // Output JSON
175
+ console.log(JSON.stringify(result, null, 2));
176
+
177
+ // Diagnostic
178
+ if (result.summary) {
179
+ console.error(`✅ Summarized (${result.tokens_used || "?"} tokens)`);
180
+ process.exit(0);
181
+ } else {
182
+ console.error(`❌ ${result.error}`);
183
+ process.exit(1);
184
+ }
185
+ }
186
+
187
+ main();
package/index.ts ADDED
@@ -0,0 +1,399 @@
1
+ /**
2
+ * llm-summarize - Library exports
3
+ *
4
+ * Fast LLM-powered text summarization for observability and logging.
5
+ * Pure functions, no process.exit, no stderr output.
6
+ *
7
+ * Usage:
8
+ * import { summarize, loadConfig } from "llm-summarize";
9
+ * const config = loadConfig();
10
+ * const result = await summarize("text to summarize", config);
11
+ */
12
+
13
+ import { readFileSync, existsSync } from "fs";
14
+ import { join } from "path";
15
+
16
+ // ============================================================================
17
+ // Types
18
+ // ============================================================================
19
+
20
+ export interface SummarizeResult {
21
+ summary?: string;
22
+ error?: string;
23
+ model?: string;
24
+ tokens_used?: number;
25
+ }
26
+
27
+ export interface LLMConfig {
28
+ provider: string | null;
29
+ model: string | null;
30
+ apiKey: string | null;
31
+ apiBase: string | null;
32
+ maxTokens: number;
33
+ }
34
+
35
+ export interface SummarizeOptions {
36
+ model?: string;
37
+ maxTokens?: number;
38
+ }
39
+
40
+ export type ProviderType = "anthropic" | "openai" | "ollama";
41
+
42
+ // ============================================================================
43
+ // Config Loading
44
+ // ============================================================================
45
+
46
+ /**
47
+ * Load environment variables from .env file
48
+ */
49
+ function loadEnvFile(envPath: string): Record<string, string> {
50
+ const env: Record<string, string> = {};
51
+
52
+ if (!existsSync(envPath)) {
53
+ return env;
54
+ }
55
+
56
+ try {
57
+ const content = readFileSync(envPath, "utf-8");
58
+ for (const line of content.split("\n")) {
59
+ const trimmed = line.trim();
60
+ if (!trimmed || trimmed.startsWith("#")) continue;
61
+
62
+ const eqIdx = trimmed.indexOf("=");
63
+ if (eqIdx === -1) continue;
64
+
65
+ const key = trimmed.slice(0, eqIdx).trim();
66
+ let value = trimmed.slice(eqIdx + 1).trim();
67
+
68
+ // Remove quotes if present
69
+ if (
70
+ (value.startsWith('"') && value.endsWith('"')) ||
71
+ (value.startsWith("'") && value.endsWith("'"))
72
+ ) {
73
+ value = value.slice(1, -1);
74
+ }
75
+
76
+ env[key] = value;
77
+ }
78
+ } catch {
79
+ // Ignore parse errors
80
+ }
81
+
82
+ return env;
83
+ }
84
+
85
+ /**
86
+ * Resolve env: references in config values
87
+ */
88
+ function resolveEnvRef(
89
+ value: string,
90
+ envVars: Record<string, string>,
91
+ ): string | null {
92
+ if (value.startsWith("env:")) {
93
+ const varName = value.slice(4);
94
+ return envVars[varName] || process.env[varName] || null;
95
+ }
96
+ return value;
97
+ }
98
+
99
+ /**
100
+ * Load configuration from config.toml with env file support
101
+ * Config: ~/.config/llm/config.toml
102
+ * Secrets: ~/.config/llm/.env
103
+ */
104
+ export function loadConfig(): LLMConfig {
105
+ const configDir = join(process.env.HOME!, ".config", "llm");
106
+ const configPath = join(configDir, "config.toml");
107
+ const envPath = join(configDir, ".env");
108
+
109
+ // Load .env file first
110
+ const envVars = loadEnvFile(envPath);
111
+
112
+ // No defaults - config required
113
+ const config: LLMConfig = {
114
+ provider: null,
115
+ model: null,
116
+ apiKey: null,
117
+ apiBase: null,
118
+ maxTokens: 50,
119
+ };
120
+
121
+ if (!existsSync(configPath)) {
122
+ return config;
123
+ }
124
+
125
+ try {
126
+ const content = readFileSync(configPath, "utf-8");
127
+
128
+ // Parse [llm] section
129
+ const providerMatch = content.match(/^\s*provider\s*=\s*"([^"]+)"/m);
130
+ if (providerMatch) {
131
+ config.provider = providerMatch[1];
132
+ }
133
+
134
+ const modelMatch = content.match(/^\s*model\s*=\s*"([^"]+)"/m);
135
+ if (modelMatch) {
136
+ config.model = modelMatch[1];
137
+ }
138
+
139
+ const apiKeyMatch = content.match(/^\s*api_key\s*=\s*"([^"]+)"/m);
140
+ if (apiKeyMatch) {
141
+ config.apiKey = resolveEnvRef(apiKeyMatch[1], envVars);
142
+ }
143
+
144
+ const apiBaseMatch = content.match(/^\s*api_base\s*=\s*"([^"]+)"/m);
145
+ if (apiBaseMatch) {
146
+ config.apiBase = apiBaseMatch[1];
147
+ }
148
+
149
+ const maxTokensMatch = content.match(/^\s*max_tokens\s*=\s*(\d+)/m);
150
+ if (maxTokensMatch) {
151
+ config.maxTokens = parseInt(maxTokensMatch[1], 10);
152
+ }
153
+ } catch {
154
+ // Ignore parse errors
155
+ }
156
+
157
+ // Environment variables override config
158
+ if (process.env.LLM_PROVIDER) config.provider = process.env.LLM_PROVIDER;
159
+ if (process.env.LLM_MODEL) config.model = process.env.LLM_MODEL;
160
+ if (process.env.LLM_API_KEY) config.apiKey = process.env.LLM_API_KEY;
161
+
162
+ return config;
163
+ }
164
+
165
+ // ============================================================================
166
+ // Provider Implementations
167
+ // ============================================================================
168
+
169
+ /**
170
+ * Call Anthropic API
171
+ */
172
+ async function callAnthropic(
173
+ text: string,
174
+ model: string,
175
+ maxTokens: number,
176
+ apiKey: string,
177
+ apiBase?: string,
178
+ ): Promise<SummarizeResult> {
179
+ const endpoint = apiBase || "https://api.anthropic.com/v1/messages";
180
+
181
+ try {
182
+ const response = await fetch(endpoint, {
183
+ method: "POST",
184
+ headers: {
185
+ "x-api-key": apiKey,
186
+ "anthropic-version": "2023-06-01",
187
+ "content-type": "application/json",
188
+ },
189
+ body: JSON.stringify({
190
+ model,
191
+ max_tokens: maxTokens,
192
+ temperature: 0.3,
193
+ messages: [
194
+ {
195
+ role: "user",
196
+ content: `What was accomplished or decided? One sentence, past tense, focus on actions and outcomes:\n\n${text}`,
197
+ },
198
+ ],
199
+ }),
200
+ });
201
+
202
+ if (!response.ok) {
203
+ const errorText = await response.text();
204
+ return {
205
+ error: `Anthropic API error: ${response.status} ${errorText}`,
206
+ };
207
+ }
208
+
209
+ const result = await response.json();
210
+ const content = result.content?.[0]?.text || "";
211
+
212
+ return {
213
+ summary: content.trim(),
214
+ model,
215
+ tokens_used: result.usage?.output_tokens,
216
+ };
217
+ } catch (error) {
218
+ return {
219
+ error: `Anthropic request failed: ${String(error)}`,
220
+ };
221
+ }
222
+ }
223
+
224
+ /**
225
+ * Call OpenAI API
226
+ */
227
+ async function callOpenAI(
228
+ text: string,
229
+ model: string,
230
+ maxTokens: number,
231
+ apiKey: string,
232
+ apiBase?: string,
233
+ ): Promise<SummarizeResult> {
234
+ const endpoint = apiBase || "https://api.openai.com/v1/chat/completions";
235
+
236
+ try {
237
+ const response = await fetch(endpoint, {
238
+ method: "POST",
239
+ headers: {
240
+ Authorization: `Bearer ${apiKey}`,
241
+ "Content-Type": "application/json",
242
+ },
243
+ body: JSON.stringify({
244
+ model,
245
+ max_tokens: maxTokens,
246
+ temperature: 0.3,
247
+ messages: [
248
+ {
249
+ role: "user",
250
+ content: `What was accomplished or decided? One sentence, past tense, focus on actions and outcomes:\n\n${text}`,
251
+ },
252
+ ],
253
+ }),
254
+ });
255
+
256
+ if (!response.ok) {
257
+ const errorText = await response.text();
258
+ return {
259
+ error: `OpenAI API error: ${response.status} ${errorText}`,
260
+ };
261
+ }
262
+
263
+ const result = await response.json();
264
+ const content = result.choices?.[0]?.message?.content || "";
265
+
266
+ return {
267
+ summary: content.trim(),
268
+ model,
269
+ tokens_used: result.usage?.completion_tokens,
270
+ };
271
+ } catch (error) {
272
+ return {
273
+ error: `OpenAI request failed: ${String(error)}`,
274
+ };
275
+ }
276
+ }
277
+
278
+ /**
279
+ * Call Ollama API
280
+ */
281
+ async function callOllama(
282
+ text: string,
283
+ model: string,
284
+ maxTokens: number,
285
+ apiBase: string,
286
+ ): Promise<SummarizeResult> {
287
+ const endpoint = `${apiBase}/api/generate`;
288
+
289
+ try {
290
+ const response = await fetch(endpoint, {
291
+ method: "POST",
292
+ headers: {
293
+ "Content-Type": "application/json",
294
+ },
295
+ body: JSON.stringify({
296
+ model,
297
+ prompt: `What was accomplished or decided? One sentence, past tense, focus on actions and outcomes:\n\n${text}`,
298
+ stream: false,
299
+ options: {
300
+ num_predict: maxTokens,
301
+ temperature: 0.3,
302
+ },
303
+ }),
304
+ });
305
+
306
+ if (!response.ok) {
307
+ const errorText = await response.text();
308
+ return {
309
+ error: `Ollama API error: ${response.status} ${errorText}`,
310
+ };
311
+ }
312
+
313
+ const result = await response.json();
314
+ const content = result.response || "";
315
+
316
+ return {
317
+ summary: content.trim(),
318
+ model,
319
+ tokens_used: result.eval_count,
320
+ };
321
+ } catch (error) {
322
+ return {
323
+ error: `Ollama request failed: ${String(error)}`,
324
+ };
325
+ }
326
+ }
327
+
328
+ // ============================================================================
329
+ // Main API
330
+ // ============================================================================
331
+
332
+ /**
333
+ * Summarize text using configured LLM
334
+ *
335
+ * @param text - Text to summarize
336
+ * @param config - LLM configuration (from loadConfig())
337
+ * @param options - Optional overrides for model and maxTokens
338
+ * @returns SummarizeResult with summary or error
339
+ */
340
+ export async function summarize(
341
+ text: string,
342
+ config: LLMConfig,
343
+ options?: SummarizeOptions,
344
+ ): Promise<SummarizeResult> {
345
+ const provider = config.provider;
346
+ const model = options?.model || config.model;
347
+ const maxTokens = options?.maxTokens || config.maxTokens;
348
+ const apiKey = config.apiKey;
349
+
350
+ // Validate config
351
+ if (!provider) {
352
+ return {
353
+ error: `No provider configured. Set provider in ~/.config/llm/config.toml`,
354
+ };
355
+ }
356
+
357
+ if (!model) {
358
+ return {
359
+ error: `No model configured. Set model in ~/.config/llm/config.toml`,
360
+ };
361
+ }
362
+
363
+ // API key required for cloud providers
364
+ if (!apiKey && provider !== "ollama") {
365
+ return {
366
+ error: `No API key configured. Set api_key = "env:VAR_NAME" in ~/.config/llm/config.toml`,
367
+ };
368
+ }
369
+
370
+ // Call appropriate provider
371
+ if (provider === "anthropic") {
372
+ return callAnthropic(
373
+ text,
374
+ model,
375
+ maxTokens,
376
+ apiKey!,
377
+ config.apiBase || undefined,
378
+ );
379
+ } else if (provider === "openai") {
380
+ return callOpenAI(
381
+ text,
382
+ model,
383
+ maxTokens,
384
+ apiKey!,
385
+ config.apiBase || undefined,
386
+ );
387
+ } else if (provider === "ollama") {
388
+ if (!config.apiBase) {
389
+ return {
390
+ error: `No api_base configured for ollama. Set api_base in ~/.config/llm/config.toml`,
391
+ };
392
+ }
393
+ return callOllama(text, model, maxTokens, config.apiBase);
394
+ } else {
395
+ return {
396
+ error: `Unknown provider: ${provider}. Supported: anthropic, openai, ollama`,
397
+ };
398
+ }
399
+ }
package/package.json ADDED
@@ -0,0 +1,46 @@
1
+ {
2
+ "name": "@voidwire/llm-summarize",
3
+ "version": "2.0.0",
4
+ "description": "Fast LLM-powered text summarization for observability and logging",
5
+ "type": "module",
6
+ "main": "./index.ts",
7
+ "bin": {
8
+ "llm-summarize": "./cli.ts"
9
+ },
10
+ "exports": {
11
+ ".": "./index.ts",
12
+ "./cli": "./cli.ts"
13
+ },
14
+ "files": [
15
+ "index.ts",
16
+ "cli.ts",
17
+ "lib/**/*.ts",
18
+ "README.md",
19
+ "LICENSE"
20
+ ],
21
+ "scripts": {
22
+ "test": "bun test"
23
+ },
24
+ "keywords": [
25
+ "llm",
26
+ "summarize",
27
+ "anthropic",
28
+ "openai",
29
+ "cli",
30
+ "llcli"
31
+ ],
32
+ "author": "nickpending <nickpending@users.noreply.github.com>",
33
+ "license": "MIT",
34
+ "repository": {
35
+ "type": "git",
36
+ "url": "git+https://github.com/nickpending/llmcli-tools.git",
37
+ "directory": "packages/llm-summarize"
38
+ },
39
+ "homepage": "https://github.com/nickpending/llmcli-tools/tree/main/packages/llm-summarize#readme",
40
+ "bugs": {
41
+ "url": "https://github.com/nickpending/llmcli-tools/issues"
42
+ },
43
+ "engines": {
44
+ "bun": ">=1.0.0"
45
+ }
46
+ }