genai-lite 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -215,6 +215,101 @@ import type {
215
215
  } from 'genai-lite';
216
216
  ```
217
217
 
218
+ ## Utilities
219
+
220
+ genai-lite includes useful utilities for working with LLMs, available through the `genai-lite/utils` subpath:
221
+
222
+ ### Token Counting
223
+
224
+ Count the number of tokens in a string using OpenAI's tiktoken library:
225
+
226
+ ```typescript
227
+ import { countTokens } from 'genai-lite/utils';
228
+
229
+ const text = 'Hello, this is a sample text for token counting.';
230
+ const tokenCount = countTokens(text); // Uses gpt-4 tokenizer by default
231
+ console.log(`Token count: ${tokenCount}`);
232
+
233
+ // Specify a different model's tokenizer
234
+ const gpt35Tokens = countTokens(text, 'gpt-3.5-turbo');
235
+ ```
236
+
237
+ **Note:** The `countTokens` function uses the `js-tiktoken` library and supports all models that have tiktoken encodings.
238
+
239
+ ### Smart Text Preview
240
+
241
+ Generate intelligent previews of large text blocks that preserve context:
242
+
243
+ ```typescript
244
+ import { getSmartPreview } from 'genai-lite/utils';
245
+
246
+ const largeCodeFile = `
247
+ function calculateTotal(items) {
248
+ let total = 0;
249
+
250
+ for (const item of items) {
251
+ total += item.price * item.quantity;
252
+ }
253
+
254
+ return total;
255
+ }
256
+
257
+ function applyDiscount(total, discountPercent) {
258
+ return total * (1 - discountPercent / 100);
259
+ }
260
+
261
+ // ... many more lines of code ...
262
+ `;
263
+
264
+ // Get a preview that shows at least 5 lines but extends to a logical break point
265
+ const preview = getSmartPreview(largeCodeFile, {
266
+ minLines: 5,
267
+ maxLines: 10
268
+ });
269
+ ```
270
+
271
+ The `getSmartPreview` function intelligently truncates text:
272
+ - Returns the full content if it's shorter than `maxLines`
273
+ - Shows at least `minLines` of content
274
+ - Extends to the next blank line (up to `maxLines`) to avoid cutting off in the middle of a code block or paragraph
275
+ - Adds `... (content truncated)` when content is truncated
276
+
277
+ ### Example: Building Token-Aware Prompts
278
+
279
+ Combine these utilities to build prompts that fit within model context windows:
280
+
281
+ ```typescript
282
+ import { LLMService, fromEnvironment } from 'genai-lite';
283
+ import { countTokens, getSmartPreview } from 'genai-lite/utils';
284
+
285
+ const llm = new LLMService(fromEnvironment);
286
+
287
+ // Large source file
288
+ const sourceCode = await fs.readFile('large-file.js', 'utf-8');
289
+
290
+ // Get a smart preview that fits within token budget
291
+ let preview = getSmartPreview(sourceCode, { minLines: 20, maxLines: 50 });
292
+ let tokenCount = countTokens(preview, 'gpt-4.1-mini');
293
+
294
+ // Adjust preview if needed to fit token budget
295
+ const maxTokens = 4000;
296
+ if (tokenCount > maxTokens) {
297
+ preview = getSmartPreview(sourceCode, { minLines: 10, maxLines: 30 });
298
+ }
299
+
300
+ // Send to LLM
301
+ const response = await llm.sendMessage({
302
+ providerId: 'openai',
303
+ modelId: 'gpt-4.1-mini',
304
+ messages: [
305
+ {
306
+ role: 'user',
307
+ content: `Analyze this code:\n\n${preview}`
308
+ }
309
+ ]
310
+ });
311
+ ```
312
+
218
313
  ## Contributing
219
314
 
220
315
  Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
@@ -0,0 +1 @@
1
+ export * from './prompt';
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./prompt"), exports);
@@ -0,0 +1,6 @@
1
+ import { TiktokenModel } from 'js-tiktoken';
2
+ export declare function countTokens(text: string, model?: TiktokenModel): number;
3
+ export declare function getSmartPreview(content: string, config: {
4
+ minLines: number;
5
+ maxLines: number;
6
+ }): string;
@@ -0,0 +1,55 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.countTokens = countTokens;
4
+ exports.getSmartPreview = getSmartPreview;
5
+ const js_tiktoken_1 = require("js-tiktoken");
6
+ const tokenizerCache = new Map();
7
+ function getTokenizer(model) {
8
+ if (tokenizerCache.has(model)) {
9
+ return tokenizerCache.get(model);
10
+ }
11
+ try {
12
+ const tokenizer = (0, js_tiktoken_1.encodingForModel)(model);
13
+ tokenizerCache.set(model, tokenizer);
14
+ return tokenizer;
15
+ }
16
+ catch (error) {
17
+ console.error(`Failed to initialize tokenizer for model ${model}:`, error);
18
+ throw error;
19
+ }
20
+ }
21
+ function countTokens(text, model = 'gpt-4') {
22
+ if (!text)
23
+ return 0;
24
+ try {
25
+ const tokenizer = getTokenizer(model);
26
+ return tokenizer.encode(text).length;
27
+ }
28
+ catch (error) {
29
+ // Fallback to a rough estimate if tokenizer fails for any reason
30
+ return Math.ceil(text.length / 4);
31
+ }
32
+ }
33
+ function getSmartPreview(content, config) {
34
+ const lines = content.split('\n');
35
+ // If the file is not longer than maxLines, return it in full
36
+ if (lines.length <= config.maxLines) {
37
+ return content;
38
+ }
39
+ // Always show at least minLines
40
+ let endLine = config.minLines;
41
+ let emptyLinesCount = lines
42
+ .slice(0, config.minLines)
43
+ .filter((line) => line.trim() === '').length;
44
+ // If we haven't found at least two empty lines, keep looking up to maxLines
45
+ if (emptyLinesCount < 2 && lines.length > config.minLines) {
46
+ for (let i = config.minLines; i < Math.min(lines.length, config.maxLines); i++) {
47
+ if (lines[i].trim() === '') {
48
+ endLine = i + 1; // Include the empty line
49
+ break;
50
+ }
51
+ endLine = i + 1;
52
+ }
53
+ }
54
+ return lines.slice(0, endLine).join('\n') + '\n... (content truncated)';
55
+ }
package/package.json CHANGED
@@ -1,9 +1,21 @@
1
1
  {
2
2
  "name": "genai-lite",
3
- "version": "0.1.0",
3
+ "version": "0.1.1",
4
4
  "description": "A lightweight, portable toolkit for interacting with various Generative AI APIs.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./dist/index.js",
10
+ "require": "./dist/index.js",
11
+ "types": "./dist/index.d.ts"
12
+ },
13
+ "./utils": {
14
+ "import": "./dist/utils/index.js",
15
+ "require": "./dist/utils/index.js",
16
+ "types": "./dist/utils/index.d.ts"
17
+ }
18
+ },
7
19
  "author": "Luigi Acerbi <luigi.acerbi@gmail.com>",
8
20
  "license": "MIT",
9
21
  "funding": {
@@ -26,6 +38,7 @@
26
38
  "dependencies": {
27
39
  "@anthropic-ai/sdk": "^0.52.0",
28
40
  "@google/genai": "^1.0.1",
41
+ "js-tiktoken": "^1.0.20",
29
42
  "openai": "^4.103.0"
30
43
  },
31
44
  "devDependencies": {