greptor 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +22 -0
  2. package/README.md +365 -0
  3. package/dist/greptor.d.ts +7 -0
  4. package/dist/greptor.d.ts.map +1 -0
  5. package/dist/greptor.js +98 -0
  6. package/dist/greptor.js.map +1 -0
  7. package/dist/index.d.ts +4 -0
  8. package/dist/index.d.ts.map +1 -0
  9. package/dist/index.js +3 -0
  10. package/dist/index.js.map +1 -0
  11. package/dist/llm/llm-factory.d.ts +7 -0
  12. package/dist/llm/llm-factory.d.ts.map +1 -0
  13. package/dist/llm/llm-factory.js +53 -0
  14. package/dist/llm/llm-factory.js.map +1 -0
  15. package/dist/metadata-schema/generate.d.ts +3 -0
  16. package/dist/metadata-schema/generate.d.ts.map +1 -0
  17. package/dist/metadata-schema/generate.js +43 -0
  18. package/dist/metadata-schema/generate.js.map +1 -0
  19. package/dist/metadata-schema/initialize.d.ts +5 -0
  20. package/dist/metadata-schema/initialize.d.ts.map +1 -0
  21. package/dist/metadata-schema/initialize.js +37 -0
  22. package/dist/metadata-schema/initialize.js.map +1 -0
  23. package/dist/metadata-schema/types.d.ts +34 -0
  24. package/dist/metadata-schema/types.d.ts.map +1 -0
  25. package/dist/metadata-schema/types.js +30 -0
  26. package/dist/metadata-schema/types.js.map +1 -0
  27. package/dist/processing/chunk.d.ts +3 -0
  28. package/dist/processing/chunk.d.ts.map +1 -0
  29. package/dist/processing/chunk.js +36 -0
  30. package/dist/processing/chunk.js.map +1 -0
  31. package/dist/processing/extract-metadata.d.ts +4 -0
  32. package/dist/processing/extract-metadata.d.ts.map +1 -0
  33. package/dist/processing/extract-metadata.js +39 -0
  34. package/dist/processing/extract-metadata.js.map +1 -0
  35. package/dist/processing/processor.d.ts +28 -0
  36. package/dist/processing/processor.d.ts.map +1 -0
  37. package/dist/processing/processor.js +112 -0
  38. package/dist/processing/processor.js.map +1 -0
  39. package/dist/skills/skill-generator.d.ts +16 -0
  40. package/dist/skills/skill-generator.d.ts.map +1 -0
  41. package/dist/skills/skill-generator.js +210 -0
  42. package/dist/skills/skill-generator.js.map +1 -0
  43. package/dist/storage/file-storage.d.ts +16 -0
  44. package/dist/storage/file-storage.d.ts.map +1 -0
  45. package/dist/storage/file-storage.js +162 -0
  46. package/dist/storage/file-storage.js.map +1 -0
  47. package/dist/storage/index.d.ts +3 -0
  48. package/dist/storage/index.d.ts.map +1 -0
  49. package/dist/storage/index.js +3 -0
  50. package/dist/storage/index.js.map +1 -0
  51. package/dist/storage/types.d.ts +16 -0
  52. package/dist/storage/types.d.ts.map +1 -0
  53. package/dist/storage/types.js +2 -0
  54. package/dist/storage/types.js.map +1 -0
  55. package/dist/types.d.ts +53 -0
  56. package/dist/types.d.ts.map +1 -0
  57. package/dist/types.js +2 -0
  58. package/dist/types.js.map +1 -0
  59. package/dist/utils/file.d.ts +2 -0
  60. package/dist/utils/file.d.ts.map +1 -0
  61. package/dist/utils/file.js +11 -0
  62. package/dist/utils/file.js.map +1 -0
  63. package/dist/utils/hash.d.ts +2 -0
  64. package/dist/utils/hash.d.ts.map +1 -0
  65. package/dist/utils/hash.js +5 -0
  66. package/dist/utils/hash.js.map +1 -0
  67. package/package.json +63 -0
package/LICENSE ADDED
@@ -0,0 +1,22 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Sergii Vashchyshchuk
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
package/README.md ADDED
@@ -0,0 +1,365 @@
1
+ # Greptor
2
+
3
+ > **Grep + Raptor**: Transform messy, unstructured text into clean, grep-friendly data for agentic search workflows.
4
+
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
6
+ [![npm version](https://badge.fury.io/js/greptor.svg)](https://www.npmjs.com/package/greptor)
7
+
8
+ Claude Code has proven that agentic search (`ripgrep` + filesystem traversal + iterative investigation) is powerful enough for complex code navigation tasks. But what about **textual data** like documents, transcripts, posts, articles, notes, and reports?
9
+
10
+ Greptor is a library that helps you with this. It **ingests and indexes unstructured text** into a format that agents can easily search using simple tools like `ripgrep`.
11
+
12
+ ## Why Agentic Search (and Why Not Classic RAG)?
13
+
14
+ RAG worked around small context windows by chunking documents and retrieving "relevant" fragments. That approach has recurring pain points:
15
+
16
+ - **Chunking breaks structure**: Tables, section hierarchies, and cross-references get lost.
17
+ - **Embeddings are fuzzy**: They struggle with exact terms, numbers, and identifiers.
18
+ - **Complexity overhead**: Hybrid search + rerankers add latency, cost, and moving parts.
19
+ - **Error cascade**: If retrieval misses the right chunk, the answer can't be correct.
20
+
21
+ Agentic search flips the approach: with larger context windows and better tool use, agents can **search, open files, follow references, and refine queries** — more like a human analyst.
22
+
23
+ Greptor's job is to clean, chunk, and add structure to your documents, making them **easily searchable with text tools** like `ripgrep`. No complex indices, no retrievers, no vector databases. Just **minimal initial processing + maximal grep-ability**.
24
+
25
+ ## How It Works
26
+
27
+ ### Step 1: Install
28
+
29
+ ```bash
30
+ npm install greptor
31
+ # or
32
+ bun add greptor
33
+ ```
34
+
35
+ ### Step 2: Initialize
36
+
37
+ Create a Greptor instance with your base directory, topic, and LLM model.
38
+
39
+ ```typescript
40
+ import { createGreptor } from 'greptor';
41
+
42
+ // Create Greptor instance
43
+ const greptor = await createGreptor({
44
+ baseDir: './projects/investing',
45
+ topic: 'Investing, stock market, financial, and macroeconomics',
46
+ llmModel: 'openai:gpt-5-mini'
47
+ });
48
+ ```
49
+
50
+ - **`baseDir`**: Home directory for your project where all data will be stored.
51
+ - **`topic`**: Helps Greptor understand your data better and generate a relevant metadata schema.
52
+ - **`llmModel`**: OpenAI-compatible model for chunking and metadata extraction. You must provide an API key via environment variables.
53
+
54
+ ### Step 3: Start Feeding Documents
55
+
56
+ ```typescript
57
+ await greptor.eat({
58
+ id: 'QwwVJfvfqN8',
59
+ source: 'youtube',
60
+ publisher: '@JosephCarlsonShow',
61
+ format: 'text',
62
+ label: 'Top Five AI Stocks I\'m Buying Now',
63
+ content: '{fetch and populate video transcript here}',
64
+ creationDate: new Date('2025-11-15'),
65
+ metadata: {
66
+ // Optional custom metadata specific to the source or document
67
+ channelTitle: 'Joseph Carlson',
68
+ channelSubscribers: 496000
69
+ },
70
+ });
71
+
72
+ await greptor.eat({
73
+ id: 'tesla_reports_418227_deliveries_for_the_fourth',
74
+ source: 'reddit',
75
+ publisher: 'investing', // For Reddit, publisher is the subreddit name
76
+ format: 'text',
77
+ label: 'Tesla reports 418,227 deliveries for the fourth quarter, down 16%',
78
+ content: '{fetch and populate Reddit post with comments here}',
79
+ creationDate: new Date('2025-12-03'),
80
+ metadata: {
81
+ // Optional custom metadata
82
+ upvotes: 1400
83
+ },
84
+ });
85
+ ```
86
+
87
+ ### Step 4: Wait for Background Processing
88
+
89
+ Greptor will write your input to a raw Markdown file immediately, then run background enrichment (LLM cleaning + chunking + metadata extraction) and write a processed Markdown file. You can grep the raw files right away, and the processed files will appear shortly after.
90
+
91
+ ### Step 5: Generate a Claude Code Skill
92
+
93
+ ```typescript
94
+ await greptor.createSkill(['youtube', 'reddit']);
95
+ ```
96
+
97
+ This generates a Claude Code skill that instructs agents on how to search your indexed content effectively.
98
+
99
+ The skill is customized for the sources you provide and includes search tips based on the metadata schema. You can always customize it manually further for better results.
100
+
101
+ ### Step 6: Run the Agent
102
+
103
+ By this point, you should have the following structure in your `baseDir`:
104
+
105
+ ```
106
+ ./projects/investing/
107
+ .claude/
108
+ skills/
109
+ search-youtube-reddit/
110
+ SKILL.md
111
+ content/
112
+ raw/
113
+ youtube/
114
+ JosephCarlsonShow/
115
+ 2025-12/
116
+ 2025-12-01-Top-Five-AI-Stocks-Im-Buying-Now.md
117
+ reddit/
118
+ investing/
119
+ 2025-12/
120
+ 2025-12-03-Tesla-reports-418227-deliveries-for-the-fourth-quarter-down-16.md
121
+ processed/
122
+ youtube/
123
+ JosephCarlsonShow/
124
+ 2025-12/
125
+ 2025-12-01-Top-Five-AI-Stocks-Im-Buying-Now.md
126
+ reddit/
127
+ investing/
128
+ 2025-12/
129
+ 2025-12-03-Tesla-reports-418227-deliveries-for-the-fourth-quarter-down-16.md
130
+ ```
131
+
132
+ Now run Claude Code (or any other agent) in this folder and ask questions about your data or perform research tasks!
133
+
134
+ **Note**: For other agents, you may need to adapt the skill accordingly.
135
+
136
+ **For better results**:
137
+ 1. Connect MCP servers like Yahoo Finance or other relevant financial/stock market MCP servers for up-to-date information.
138
+ 2. Add personal financial information, such as your portfolio holdings, watchlists, and risk profile.
139
+ 3. Create custom skills, slash commands, or subagents for researching specific tickers, sectors, topics, or managing your portfolio.
140
+
141
+ Now you have a personal investment research assistant with access to your portfolio, sentiment data (YouTube, Reddit), news, and market data! You don't have to manually watch dozens of YouTube channels or spend hours scrolling Reddit and other sources.
142
+
143
+ ## Under the Hood
144
+
145
+ ### 1) Raw Write (Immediate)
146
+
147
+ `eat()` writes the input to a raw Markdown file with YAML frontmatter. You can grep it right away.
148
+
149
+ ### 2) Background Processing (Asynchronous)
150
+
151
+ Workers pick up new documents and run a one-time pipeline:
152
+
153
+ 1. **LLM cleaning**: Remove timestamps, ads, disclaimers, boilerplate, and irrelevant content.
154
+ 2. **LLM chunking**: Transform a blob into semantic section chunks.
155
+ 3. **LLM metadata extraction**: Extract metadata relevant to your topic/domain and enrich each chunk with denormalized metadata.
156
+
157
+ Here's an example of a processed file:
158
+
159
+ ```markdown
160
+ ---
161
+ title: "NVIDIA Q4 2024 Earnings: AI Boom Continues"
162
+ source: "youtube"
163
+ publisher: "Wall Street Millennial"
164
+ date: 2025-11-15
165
+ ticker: "NVDA"
166
+ videoId: "dQw4w9WgXcQ"
167
+ url: "https://youtube.com/watch?v=dQw4w9WgXcQ"
168
+ chunks:
169
+ - id: c01
170
+ title: "Revenue Growth Analysis"
171
+ topics: [earnings, revenue, data-center]
172
+ sentiment: positive
173
+ tickers: [NVDA]
174
+ price_mentioned_usd: 850.50
175
+ revenue_mentioned_billions: 35.1
176
+ - id: c02
177
+ title: "AI Chip Demand Outlook"
178
+ topics: [ai, competition, market-share]
179
+ sentiment: bullish
180
+ tickers: [NVDA, AMD, INTC]
181
+ timeframe: next-quarter
182
+ ---
183
+
184
+ CHUNK c01: "Revenue Growth Analysis"
185
+ NVIDIA reported Q4 revenue of $35.1 billion, beating estimates...
186
+
187
+ CHUNK c02: "AI Chip Demand Outlook"
188
+ The demand for AI accelerators continues to outpace supply...
189
+ ```
190
+
191
+ ### 3) Navigate with grep/glob
192
+
193
+ Your "index" is the YAML frontmatter combined with the file layout. Agents can search it deterministically.
194
+
195
+ **Search examples**:
196
+
197
+ ```bash
198
+ # Find all bullish sentiment for TSLA stock
199
+ rg -l "ticker:.*TSLA" content/processed | xargs rg "sentiment:.*bullish"
200
+
201
+ # Count documents per ticker
202
+ rg "ticker:" content/processed -o | sort | uniq -c | sort -rn | head -20
203
+
204
+ # What companies does a specific YouTuber discuss?
205
+ rg "company:" content/processed/youtube/JosephCarlsonShow -o | sort | uniq -c | sort -rn
206
+
207
+ # Find all AI-related narratives with strong buy recommendations
208
+ rg -l "narrative:.*ai_boom" content/processed | xargs rg "recommendation:.*strong_buy"
209
+
210
+ # Technology sector stocks with bullish sentiment in December 2025
211
+ rg -l "sector:.*technology" content/processed --glob "**/2025-12/*.md" | xargs rg "sentiment:.*bullish"
212
+
213
+ # Find dividend investment style discussions
214
+ rg "investment_style:.*dividend" content/processed -l | head -10
215
+
216
+ # Bearish sentiment on large-cap stocks
217
+ rg -l "market_cap:.*large_cap" content/processed | xargs rg "sentiment:.*bearish"
218
+
219
+ # List all tickers mentioned with their sentiment
220
+ rg "ticker: \[.*\]" content/processed -A 5 | rg "sentiment:"
221
+
222
+ # Find EV-related discussions across all sources
223
+ rg "narrative:.*ev_transition" content/processed
224
+
225
+ # Combine multiple filters: tech stocks with strong buy in specific timeframe
226
+ rg -l "sector:.*technology" content/processed --glob "**/2025-11/*.md" | \
227
+ xargs rg -l "recommendation:.*strong_buy" | \
228
+ xargs rg "ticker:" -o | sort | uniq -c
229
+ ```
230
+
231
+ **Analysis patterns**:
232
+
233
+ ```bash
234
+ # Aggregate sentiment distribution
235
+ rg "sentiment:" content/processed -o | cut -d: -f2 | tr -d ' ' | sort | uniq -c
236
+
237
+ # Most discussed sectors
238
+ rg "sector:" content/processed -o | sort | uniq -c | sort -rn
239
+
240
+ # Track narrative evolution over time
241
+ for month in 2025-{10..12}; do
242
+ echo "=== $month ==="
243
+ rg "narrative:" content/processed --glob "**/$month-*/*.md" -o | sort | uniq -c | sort -rn | head -5
244
+ done
245
+
246
+ # Compare sentiment on specific stock across sources
247
+ for source in youtube reddit; do
248
+ echo "=== $source ==="
249
+ rg -l "ticker:.*AAPL" content/processed/$source | xargs rg "sentiment:" -o | sort | uniq -c
250
+ done
251
+ ```
252
+
253
+ ## Configuration
254
+
255
+ ### LLM Model Format
256
+
257
+ Greptor uses the following LLM model format: `provider:model-name`
258
+
259
+ The provider is an OpenAI API-compatible provider, such as `openai`, `azure`, `ollama`, `deepseek`, etc.
260
+
261
+ **Examples**:
262
+
263
+ ```typescript
264
+ llmModel: 'openai:gpt-5-mini'
265
+ llmModel: 'ollama:llama3-70b'
266
+ ```
267
+
268
+ **Important**: Use a model at least at the level of GPT-5-mini or better.
269
+ **Required Environment Variables**:
270
+
271
+ ```bash
272
+ # For OpenAI models
273
+ OPENAI_API_KEY=your_key_here
274
+
275
+ # For Azure OpenAI models
276
+ AZURE_API_KEY=your_key_here
277
+ AZURE_API_BASE_URL=https://your-azure-endpoint.com/v1
278
+ ```
279
+
280
+ ## Metadata Schemas
281
+
282
+ If you don't provide a schema, Greptor can initialize one for your topic. However, for better results, provide a custom schema.
283
+
284
+ Here's a comprehensive example for investment research:
285
+
286
+ ```typescript
287
+ const greptor = await createGreptor({
288
+ baseDir: './projects/investing',
289
+ topic: 'Investing, stock market, financial, and macroeconomics',
290
+ llmModel: 'openai:gpt-5-mini',
291
+ metadataSchema: [
292
+ {
293
+ name: 'company',
294
+ type: 'string[]',
295
+ description: 'Canonical company names in snake_case (e.g. apple, tesla, microsoft)',
296
+ },
297
+ {
298
+ name: 'ticker',
299
+ type: 'string[]',
300
+ description: 'Canonical stock tickers, UPPERCASE only (e.g. AAPL, TSLA, MSFT, SPY)',
301
+ },
302
+ {
303
+ name: 'sector',
304
+ type: 'enum[]',
305
+ description: 'GICS sector classification for stocks/companies discussed',
306
+ enumValues: [
307
+ 'technology', 'healthcare', 'financials', 'consumer_discretionary',
308
+ 'consumer_staples', 'energy', 'utilities', 'industrials',
309
+ 'materials', 'real_estate', 'communication_services',
310
+ 'etf', 'index', 'commodity', 'bond', 'mixed'
311
+ ],
312
+ },
313
+ {
314
+ name: 'industry',
315
+ type: 'string[]',
316
+ description: 'Specific industry/sub-sector in snake_case (e.g. semiconductors, biotech, banking)',
317
+ },
318
+ {
319
+ name: 'market_cap',
320
+ type: 'enum[]',
321
+ description: 'Market capitalization category of the company',
322
+ enumValues: ['mega_cap', 'large_cap', 'mid_cap', 'small_cap', 'micro_cap'],
323
+ },
324
+ {
325
+ name: 'investment_style',
326
+ type: 'enum[]',
327
+ description: 'Investment approach or style discussed',
328
+ enumValues: [
329
+ 'value', 'growth', 'dividend', 'momentum', 'index',
330
+ 'passive', 'active', 'day_trading', 'swing_trading', 'long_term_hold'
331
+ ],
332
+ },
333
+ {
334
+ name: 'asset_type',
335
+ type: 'enum[]',
336
+ description: 'Type of financial instrument discussed',
337
+ enumValues: [
338
+ 'stock', 'etf', 'mutual_fund', 'option', 'bond',
339
+ 'reit', 'commodity', 'crypto', 'cash'
340
+ ],
341
+ },
342
+ {
343
+ name: 'narrative',
344
+ type: 'string[]',
345
+ description: 'Investment or market narratives in snake_case (e.g. ai_boom, ev_transition, rate_cuts)',
346
+ },
347
+ {
348
+ name: 'sentiment',
349
+ type: 'enum[]',
350
+ description: 'Directional stance on the stock/market',
351
+ enumValues: ['bullish', 'bearish', 'neutral', 'mixed', 'cautious'],
352
+ },
353
+ {
354
+ name: 'recommendation',
355
+ type: 'enum[]',
356
+ description: 'Analyst or influencer recommendation type',
357
+ enumValues: ['strong_buy', 'buy', 'hold', 'sell', 'strong_sell'],
358
+ },
359
+ ],
360
+ });
361
+ ```
362
+
363
+ ## License
364
+
365
+ MIT © Sergii Vashchyshchuk
@@ -0,0 +1,7 @@
1
+ import type { CreateSkillResult, GreptorEatInput, GreptorEatResult, GreptorOptions } from "./types.js";
2
+ export interface Greptor {
3
+ eat: (input: GreptorEatInput) => Promise<GreptorEatResult>;
4
+ createSkill: (sources: string[], overwrite: boolean) => Promise<CreateSkillResult>;
5
+ }
6
+ export declare function createGreptor(options: GreptorOptions): Promise<Greptor>;
7
+ //# sourceMappingURL=greptor.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"greptor.d.ts","sourceRoot":"","sources":["../src/greptor.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACX,iBAAiB,EACjB,eAAe,EACf,gBAAgB,EAChB,cAAc,EACd,MAAM,YAAY,CAAC;AAcpB,MAAM,WAAW,OAAO;IACvB,GAAG,EAAE,CAAC,KAAK,EAAE,eAAe,KAAK,OAAO,CAAC,gBAAgB,CAAC,CAAC;IAC3D,WAAW,EAAE,CACZ,OAAO,EAAE,MAAM,EAAE,EACjB,SAAS,EAAE,OAAO,KACd,OAAO,CAAC,iBAAiB,CAAC,CAAC;CAChC;AAED,wBAAsB,aAAa,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,OAAO,CAAC,CAqH7E"}
@@ -0,0 +1,98 @@
1
+ import path from "node:path";
2
+ import YAML from "yaml";
3
+ import { createLlmClient } from "./llm/llm-factory.js";
4
+ import { initializeMetadataSchema } from "./metadata-schema/initialize.js";
5
+ import { createProcessingQueue, enqueueUnprocessedDocuments, startBackgroundWorkers, } from "./processing/processor.js";
6
+ import { generateSkill } from "./skills/skill-generator.js";
7
+ import { createFileStorage } from "./storage/file-storage.js";
8
+ export async function createGreptor(options) {
9
+ const { baseDir, logger } = options;
10
+ const contentPath = path.join(baseDir, "content");
11
+ const storage = createFileStorage(contentPath);
12
+ logger?.debug?.("Initializing Greptor", { baseDir, topic: options.topic });
13
+ const metadataSchema = await initializeMetadataSchema(storage.baseDir, options.llmModel, options.topic, options.metadataSchema, logger);
14
+ const queue = createProcessingQueue();
15
+ const queuedCount = await enqueueUnprocessedDocuments({
16
+ storage,
17
+ queue,
18
+ ...(logger ? { logger } : {}),
19
+ });
20
+ const llm = createLlmClient(options.llmModel);
21
+ const ctx = {
22
+ domain: options.topic,
23
+ metadataSchema: YAML.stringify(metadataSchema),
24
+ llm,
25
+ storage,
26
+ ...(logger ? { logger } : {}),
27
+ };
28
+ startBackgroundWorkers({ ctx, queue, concurrency: options.workers ?? 1 });
29
+ logger?.info?.("Greptor initialized", {
30
+ topic: options.topic,
31
+ queued: queuedCount,
32
+ });
33
+ async function eat(input) {
34
+ if (input.format !== "text") {
35
+ logger?.warn?.("Unsupported format", { format: input.format });
36
+ return {
37
+ success: false,
38
+ message: `Unsupported format: ${input.format}`,
39
+ };
40
+ }
41
+ const res = await storage.saveRawContent(input);
42
+ if (res.type === "duplicate") {
43
+ logger?.warn?.("Attempt to add duplicate document", {
44
+ ref: res.ref,
45
+ label: input.label,
46
+ });
47
+ return {
48
+ success: false,
49
+ message: "Document already exists.",
50
+ };
51
+ }
52
+ if (res.type === "error") {
53
+ return {
54
+ success: false,
55
+ message: res.message,
56
+ };
57
+ }
58
+ queue.enqueue(res.ref);
59
+ logger?.info?.("Document ingested", { ref: res.ref, label: input.label });
60
+ return {
61
+ success: true,
62
+ message: "Content added.",
63
+ ref: res.ref,
64
+ };
65
+ }
66
+ async function createSkill(sources, overwrite = false) {
67
+ try {
68
+ logger?.info?.("Generating Claude Code skill", {
69
+ domain: options.topic,
70
+ });
71
+ const { skillPath } = await generateSkill({
72
+ domain: options.topic,
73
+ sources,
74
+ baseDir: options.baseDir,
75
+ metadataSchema,
76
+ overwrite,
77
+ }, storage);
78
+ return {
79
+ success: true,
80
+ message: `Skill created at ${skillPath}`,
81
+ skillPath,
82
+ };
83
+ }
84
+ catch (error) {
85
+ const errorMessage = error instanceof Error ? error.message : String(error);
86
+ logger?.error?.(`Skill generation failed:\n${errorMessage}`);
87
+ return {
88
+ success: false,
89
+ message: errorMessage,
90
+ };
91
+ }
92
+ }
93
+ return {
94
+ eat,
95
+ createSkill,
96
+ };
97
+ }
98
+ //# sourceMappingURL=greptor.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"greptor.js","sourceRoot":"","sources":["../src/greptor.ts"],"names":[],"mappings":"AAOA,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,IAAI,MAAM,MAAM,CAAC;AACxB,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AACvD,OAAO,EAAE,wBAAwB,EAAE,MAAM,iCAAiC,CAAC;AAC3E,OAAO,EACN,qBAAqB,EACrB,2BAA2B,EAC3B,sBAAsB,GACtB,MAAM,2BAA2B,CAAC;AACnC,OAAO,EAAE,aAAa,EAAE,MAAM,6BAA6B,CAAC;AAC5D,OAAO,EAAE,iBAAiB,EAAE,MAAM,2BAA2B,CAAC;AAU9D,MAAM,CAAC,KAAK,UAAU,aAAa,CAAC,OAAuB;IAC1D,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC;IACpC,MAAM,WAAW,GAAG,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,SAAS,CAAC,CAAC;IAClD,MAAM,OAAO,GAAG,iBAAiB,CAAC,WAAW,CAAC,CAAC;IAE/C,MAAM,EAAE,KAAK,EAAE,CAAC,sBAAsB,EAAE,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,CAAC,KAAK,EAAE,CAAC,CAAC;IAE3E,MAAM,cAAc,GAAG,MAAM,wBAAwB,CACpD,OAAO,CAAC,OAAO,EACf,OAAO,CAAC,QAAQ,EAChB,OAAO,CAAC,KAAK,EACb,OAAO,CAAC,cAAc,EACtB,MAAM,CACN,CAAC;IAEF,MAAM,KAAK,GAAG,qBAAqB,EAAE,CAAC;IACtC,MAAM,WAAW,GAAG,MAAM,2BAA2B,CAAC;QACrD,OAAO;QACP,KAAK;QACL,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;KAC7B,CAAC,CAAC;IAEH,MAAM,GAAG,GAAG,eAAe,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC;IAC9C,MAAM,GAAG,GAAG;QACX,MAAM,EAAE,OAAO,CAAC,KAAK;QACrB,cAAc,EAAE,IAAI,CAAC,SAAS,CAAC,cAAc,CAAC;QAC9C,GAAG;QACH,OAAO;QACP,GAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;KAC7B,CAAC;IAEF,sBAAsB,CAAC,EAAE,GAAG,EAAE,KAAK,EAAE,WAAW,EAAE,OAAO,CAAC,OAAO,IAAI,CAAC,EAAE,CAAC,CAAC;IAE1E,MAAM,EAAE,IAAI,EAAE,CAAC,qBAAqB,EAAE;QACrC,KAAK,EAAE,OAAO,CAAC,KAAK;QACpB,MAAM,EAAE,WAAW;KACnB,CAAC,CAAC;IAEH,KAAK,UAAU,GAAG,CAAC,KAAsB;QACxC,IAAI,KAAK,CAAC,MAAM,KAAK,MAAM,EAAE,CAAC;YAC7B,MAAM,EAAE,IAAI,EAAE,CAAC,oBAAoB,EAAE,EAAE,MAAM,EAAE,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC;YAC/D,OAAO;gBACN,OAAO,EAAE,KAAK;gBACd,OAAO,EAAE,uBAAuB,KAAK,CAAC,MAAM,EAAE;aAC9C,CAAC;QACH,CAAC;QAED,MAAM,GAAG,GAAG,MAAM,OAAO,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC;QAEhD,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YAC9B,MAAM,EAAE,IAAI,EAAE,CAAC,mCAAmC,EAAE;gBACnD,GAAG,EAAE,GAAG,CAAC,GAAG;gBACZ,KAAK,EAAE,KAAK,CAAC,KAAK;aAClB,CAAC,CAAC;YACH,OAAO;gBACN,OAAO,EAAE,KAAK;gBACd,OAAO,EAAE,0BAA0B;aACnC,CAAC;QACH,CAAC;QAED,IAAI,GAAG,CAAC,IAAI,KAAK,OAAO,EAAE,CAAC;YAC1B,OAAO;gBACN,OAAO,EAAE,KAAK;gBACd,OAAO,EAAE,GAAG,CAAC,OAAO;aACpB,CAAC;QACH,CAAC;QAED,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;QACvB,MAAM,EAAE,IAAI,EAAE,CAAC,mBAAmB,EAAE,EAAE,GAAG,EAAE,GAAG,CAAC,GAAG,EAAE,KAAK,EAAE,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;QAE1E,OAAO;YACN,OAAO,EAAE,IAAI;YACb,OAAO,EAAE,gBAAgB;YACzB,GAAG,EAAE,GAAG,CAAC,GAAG;SACZ,CAAC;IACH,CAAC;IAED,KAAK,UAAU,WAAW,CACzB,OAAiB,EACjB,SAAS,GAAG,KAAK;QAEjB,IAAI,CAAC;YACJ,MAAM,EAAE,IAAI,EAAE,CAAC,8BAA8B,EAAE;gBAC9C,MAAM,EAAE,OAAO,CAAC,KAAK;aACrB,CAAC,CAAC;YAEH,MAAM,EAAE,SAAS,EAAE,GAAG,MAAM,aAAa,CACxC;gBACC,MAAM,EAAE,OAAO,CAAC,KAAK;gBACrB,OAAO;gBACP,OAAO,EAAE,OAAO,CAAC,OAAO;gBACxB,cAAc;gBACd,SAAS;aACT,EACD,OAAO,CACP,CAAC;YAEF,OAAO;gBACN,OAAO,EAAE,IAAI;gBACb,OAAO,EAAE,oBAAoB,SAAS,EAAE;gBACxC,SAAS;aACT,CAAC;QACH,CAAC;QAAC,OAAO,KAAK,EAAE,CAAC;YAChB,MAAM,YAAY,GACjB,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;YACxD,MAAM,EAAE,KAAK,EAAE,CAAC,6BAA6B,YAAY,EAAE,CAAC,CAAC;YAC7D,OAAO;gBACN,OAAO,EAAE,KAAK;gBACd,OAAO,EAAE,YAAY;aACrB,CAAC;QACH,CAAC;IACF,CAAC;IAED,OAAO;QACN,GAAG;QACH,WAAW;KACX,CAAC;AACH,CAAC"}
@@ -0,0 +1,4 @@
1
+ export * from "./types.js";
2
+ export type { Greptor } from "./greptor.js";
3
+ export { createGreptor } from "./greptor.js";
4
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,YAAY,CAAC;AAE3B,YAAY,EAAE,OAAO,EAAE,MAAM,cAAc,CAAC;AAC5C,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC"}
package/dist/index.js ADDED
@@ -0,0 +1,3 @@
1
+ export * from "./types.js";
2
+ export { createGreptor } from "./greptor.js";
3
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,YAAY,CAAC;AAG3B,OAAO,EAAE,aAAa,EAAE,MAAM,cAAc,CAAC"}
@@ -0,0 +1,7 @@
1
+ import OpenAI from "openai";
2
+ export type LlmClient = {
3
+ client: OpenAI;
4
+ model: string;
5
+ };
6
+ export declare function createLlmClient(providerModel: string): LlmClient;
7
+ //# sourceMappingURL=llm-factory.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-factory.d.ts","sourceRoot":"","sources":["../../src/llm/llm-factory.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAE5B,MAAM,MAAM,SAAS,GAAG;IACvB,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;CACd,CAAC;AA+DF,wBAAgB,eAAe,CAAC,aAAa,EAAE,MAAM,GAAG,SAAS,CAShE"}
@@ -0,0 +1,53 @@
1
+ import OpenAI from "openai";
2
+ function parseProviderModel(providerModel) {
3
+ if (!providerModel.includes(":")) {
4
+ return { provider: "openai", model: providerModel.trim() };
5
+ }
6
+ const colonIndex = providerModel.indexOf(":");
7
+ const rawProvider = providerModel.substring(0, colonIndex);
8
+ const rawModel = providerModel.substring(colonIndex + 1);
9
+ if (!rawProvider || !rawModel) {
10
+ throw new Error(`Invalid provider:model format: ${providerModel}. Expected format: "provider:model"`);
11
+ }
12
+ const provider = rawProvider.trim().toLowerCase();
13
+ const model = rawModel.trim();
14
+ if (!provider || !model) {
15
+ throw new Error(`Invalid provider:model format: ${providerModel}. Both provider and model must be non-empty`);
16
+ }
17
+ return { provider, model };
18
+ }
19
+ function getProviderConfig(provider) {
20
+ const normalizedProvider = provider.toLowerCase();
21
+ switch (normalizedProvider) {
22
+ case "openai":
23
+ if (!process.env.OPENAI_API_KEY) {
24
+ throw new Error("OPENAI_API_KEY environment variable is not set.");
25
+ }
26
+ return { apiKey: process.env.OPENAI_API_KEY };
27
+ case "azure":
28
+ if (!process.env.AZURE_API_KEY || !process.env.AZURE_API_BASE_URL) {
29
+ throw new Error("AZURE_API_KEY or AZURE_API_BASE_URL environment variable is not set.");
30
+ }
31
+ return {
32
+ apiKey: process.env.AZURE_API_KEY,
33
+ apiUrl: process.env.AZURE_API_BASE_URL,
34
+ };
35
+ case "ollama":
36
+ return {
37
+ apiKey: "ollama",
38
+ apiUrl: "http://localhost:11434/v1",
39
+ };
40
+ default:
41
+ throw new Error(`Unsupported provider: ${provider}`);
42
+ }
43
+ }
44
+ export function createLlmClient(providerModel) {
45
+ const { provider, model } = parseProviderModel(providerModel);
46
+ const { apiKey, apiUrl } = getProviderConfig(provider);
47
+ const client = new OpenAI({
48
+ apiKey,
49
+ baseURL: apiUrl,
50
+ });
51
+ return { client, model };
52
+ }
53
+ //# sourceMappingURL=llm-factory.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-factory.js","sourceRoot":"","sources":["../../src/llm/llm-factory.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAO5B,SAAS,kBAAkB,CAAC,aAAqB;IAIhD,IAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;QAClC,OAAO,EAAE,QAAQ,EAAE,QAAQ,EAAE,KAAK,EAAE,aAAa,CAAC,IAAI,EAAE,EAAE,CAAC;IAC5D,CAAC;IAED,MAAM,UAAU,GAAG,aAAa,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IAC9C,MAAM,WAAW,GAAG,aAAa,CAAC,SAAS,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC;IAC3D,MAAM,QAAQ,GAAG,aAAa,CAAC,SAAS,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;IAEzD,IAAI,CAAC,WAAW,IAAI,CAAC,QAAQ,EAAE,CAAC;QAC/B,MAAM,IAAI,KAAK,CACd,kCAAkC,aAAa,qCAAqC,CACpF,CAAC;IACH,CAAC;IAED,MAAM,QAAQ,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC,WAAW,EAAE,CAAC;IAClD,MAAM,KAAK,GAAG,QAAQ,CAAC,IAAI,EAAE,CAAC;IAE9B,IAAI,CAAC,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;QACzB,MAAM,IAAI,KAAK,CACd,kCAAkC,aAAa,6CAA6C,CAC5F,CAAC;IACH,CAAC;IAED,OAAO,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC;AAC5B,CAAC;AAED,SAAS,iBAAiB,CAAC,QAAgB;IAI1C,MAAM,kBAAkB,GAAG,QAAQ,CAAC,WAAW,EAAE,CAAC;IAClD,QAAQ,kBAAkB,EAAE,CAAC;QAC5B,KAAK,QAAQ;YACZ,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;gBACjC,MAAM,IAAI,KAAK,CAAC,iDAAiD,CAAC,CAAC;YACpE,CAAC;YACD,OAAO,EAAE,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,cAAc,EAAE,CAAC;QAC/C,KAAK,OAAO;YACX,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,aAAa,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,kBAAkB,EAAE,CAAC;gBACnE,MAAM,IAAI,KAAK,CACd,sEAAsE,CACtE,CAAC;YACH,CAAC;YACD,OAAO;gBACN,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,aAAa;gBACjC,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,kBAAkB;aACtC,CAAC;QACH,KAAK,QAAQ;YACZ,OAAO;gBACN,MAAM,EAAE,QAAQ;gBAChB,MAAM,EAAE,2BAA2B;aACnC,CAAC;QACH;YACC,MAAM,IAAI,KAAK,CAAC,yBAAyB,QAAQ,EAAE,CAAC,CAAC;IACvD,CAAC;AACF,CAAC;AAED,MAAM,UAAU,eAAe,CAAC,aAAqB;IACpD,MAAM,EAAE,QAAQ,EAAE,KAAK,EAAE,GAAG,kBAAkB,CAAC,aAAa,CAAC,CAAC;IAC9D,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,GAAG,iBAAiB,CAAC,QAAQ,CAAC,CAAC;IACvD,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC;QACzB,MAAM;QACN,OAAO,EAAE,MAAM;KACf,CAAC,CAAC;IAEH,OAAO,EAAE,MAAM,EAAE,KAAK,EAAE,CAAC;AAC1B,CAAC"}
@@ -0,0 +1,3 @@
1
+ import type { MetadataSchema } from "../types.js";
2
+ export declare function generateMetadataSchema(topic: string, llmModel: string): Promise<MetadataSchema>;
3
+ //# sourceMappingURL=generate.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"generate.d.ts","sourceRoot":"","sources":["../../src/metadata-schema/generate.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,cAAc,EAAsB,MAAM,aAAa,CAAC;AAiBtE,wBAAsB,sBAAsB,CAC3C,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,GACd,OAAO,CAAC,cAAc,CAAC,CA+BzB"}
@@ -0,0 +1,43 @@
1
+ import { zodResponseFormat } from "openai/helpers/zod";
2
+ import { createLlmClient } from "../llm/llm-factory.js";
3
+ import { ResponseSchema } from "./types.js";
4
+ const PROMPT_TEMPLATE = (topic) => `
5
+ You are an expert information architect designing metadata schemas that improve text search, discovery, and retrieval within a specific knowledge topic.
6
+ Your goal is to produce a list of 5-10 **metadata fields** that are:
7
+ 1. **Search-relevant** — users or AI agents are likely to query or filter text by these fields.
8
+ 2. **Domain-relevant** — reflect concepts, entities, and descriptors naturally present in this domain.
9
+ 3. **Extractable** — values can be identified directly from text (no scoring or inferred metrics like confidence, relevance, etc.).
10
+ 4. **Reusable** — should support both keyword search (grep/ripgrep) and structured filtering.
11
+
12
+ Allowed field types: string, string[], number, number[], boolean, enum, enum[], date.
13
+ Use array types when multiple values are expected per chunk.
14
+
15
+ **TOPIC**: ${topic}
16
+ `;
17
+ export async function generateMetadataSchema(topic, llmModel) {
18
+ const { client, model } = createLlmClient(llmModel);
19
+ const messages = [
20
+ { role: "user", content: PROMPT_TEMPLATE(topic) },
21
+ ];
22
+ const completion = await client.chat.completions.parse({
23
+ model,
24
+ messages,
25
+ response_format: zodResponseFormat(ResponseSchema, "metadata_fields"),
26
+ });
27
+ const parsed = completion.choices[0]?.message?.parsed;
28
+ if (!parsed?.metadata_fields) {
29
+ throw new Error("Failed to parse metadata schema from LLM response");
30
+ }
31
+ return parsed.metadata_fields.map((field) => {
32
+ const metadataField = {
33
+ name: field.name,
34
+ type: field.type,
35
+ description: field.description,
36
+ };
37
+ if (Array.isArray(field.enumValues)) {
38
+ metadataField.enumValues = field.enumValues;
39
+ }
40
+ return metadataField;
41
+ });
42
+ }
43
+ //# sourceMappingURL=generate.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"generate.js","sourceRoot":"","sources":["../../src/metadata-schema/generate.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAEvD,OAAO,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AAExD,OAAO,EAAE,cAAc,EAAE,MAAM,YAAY,CAAC;AAE5C,MAAM,eAAe,GAAG,CAAC,KAAa,EAAE,EAAE,CAAC;;;;;;;;;;;aAW9B,KAAK;CACjB,CAAC;AAEF,MAAM,CAAC,KAAK,UAAU,sBAAsB,CAC3C,KAAa,EACb,QAAgB;IAEhB,MAAM,EAAE,MAAM,EAAE,KAAK,EAAE,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAC;IAEpD,MAAM,QAAQ,GAAiC;QAC9C,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,eAAe,CAAC,KAAK,CAAC,EAAE;KACjD,CAAC;IAEF,MAAM,UAAU,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,KAAK,CAAC;QACtD,KAAK;QACL,QAAQ;QACR,eAAe,EAAE,iBAAiB,CAAC,cAAc,EAAE,iBAAiB,CAAC;KACrE,CAAC,CAAC;IAEH,MAAM,MAAM,GAAG,UAAU,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,MAAM,CAAC;IACtD,IAAI,CAAC,MAAM,EAAE,eAAe,EAAE,CAAC;QAC9B,MAAM,IAAI,KAAK,CAAC,mDAAmD,CAAC,CAAC;IACtE,CAAC;IAED,OAAO,MAAM,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,EAAE;QAC3C,MAAM,aAAa,GAAuB;YACzC,IAAI,EAAE,KAAK,CAAC,IAAI;YAChB,IAAI,EAAE,KAAK,CAAC,IAAI;YAChB,WAAW,EAAE,KAAK,CAAC,WAAW;SAC9B,CAAC;QAEF,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC;YACrC,aAAa,CAAC,UAAU,GAAG,KAAK,CAAC,UAAU,CAAC;QAC7C,CAAC;QAED,OAAO,aAAa,CAAC;IACtB,CAAC,CAAC,CAAC;AACJ,CAAC"}
@@ -0,0 +1,5 @@
1
+ import type { Logger } from "../types.js";
2
+ import type { MetadataSchema } from "../types.js";
3
+ export declare const METADATA_SCHEMA_FILENAME = "metadata-schema.yaml";
4
+ export declare function initializeMetadataSchema(baseDir: string, llmModel: string, topic: string, metadataSchema?: MetadataSchema, logger?: Logger): Promise<MetadataSchema>;
5
+ //# sourceMappingURL=initialize.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"initialize.d.ts","sourceRoot":"","sources":["../../src/metadata-schema/initialize.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAC1C,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAIlD,eAAO,MAAM,wBAAwB,yBAAyB,CAAC;AAc/D,wBAAsB,wBAAwB,CAC7C,OAAO,EAAE,MAAM,EACf,QAAQ,EAAE,MAAM,EAChB,KAAK,EAAE,MAAM,EACb,cAAc,CAAC,EAAE,cAAc,EAC/B,MAAM,CAAC,EAAE,MAAM,GACb,OAAO,CAAC,cAAc,CAAC,CA4BzB"}