geo-ai-core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +136 -0
- package/dist/ai/index.cjs +252 -0
- package/dist/ai/index.cjs.map +1 -0
- package/dist/ai/index.d.cts +67 -0
- package/dist/ai/index.d.ts +67 -0
- package/dist/ai/index.mjs +221 -0
- package/dist/ai/index.mjs.map +1 -0
- package/dist/index.cjs +660 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +206 -0
- package/dist/index.d.ts +206 -0
- package/dist/index.mjs +624 -0
- package/dist/index.mjs.map +1 -0
- package/dist/types-B3qT6_qa.d.cts +101 -0
- package/dist/types-B3qT6_qa.d.ts +101 -0
- package/package.json +34 -0
package/README.md
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
# geo-ai-core
|
|
2
|
+
|
|
3
|
+
[](https://npmjs.com/package/geo-ai-core)
|
|
4
|
+
|
|
5
|
+
Part of the [GEO AI ecosystem](https://github.com/madeburo/GEO-AI)
|
|
6
|
+
|
|
7
|
+
Zero-dependency TypeScript engine for Generative Engine Optimization (GEO). Optimizes websites for AI search engines — ChatGPT, Claude, Gemini, Perplexity, DeepSeek, Grok and more.
|
|
8
|
+
|
|
9
|
+
Works with any Node.js framework or plain server.
|
|
10
|
+
|
|
11
|
+
## Installation
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
npm install geo-ai-core
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Quick Start
|
|
18
|
+
|
|
19
|
+
```typescript
|
|
20
|
+
import { createGeoAI } from 'geo-ai-core';
|
|
21
|
+
|
|
22
|
+
const geo = createGeoAI({
|
|
23
|
+
siteName: 'My Site',
|
|
24
|
+
siteUrl: 'https://example.com',
|
|
25
|
+
provider: {
|
|
26
|
+
Products: [
|
|
27
|
+
{ title: 'Widget', url: '/products/widget', description: 'A great widget' },
|
|
28
|
+
],
|
|
29
|
+
Blog: [
|
|
30
|
+
{ title: 'Hello World', url: '/blog/hello', description: 'First post' },
|
|
31
|
+
],
|
|
32
|
+
},
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
// llms.txt / llms-full.txt
|
|
36
|
+
const llmsTxt = await geo.generateLlms(false);
|
|
37
|
+
const llmsFullTxt = await geo.generateLlms(true);
|
|
38
|
+
|
|
39
|
+
// robots.txt block
|
|
40
|
+
const robotsTxt = geo.generateRobotsTxt();
|
|
41
|
+
|
|
42
|
+
// SEO signals
|
|
43
|
+
const metaTags = geo.generateMetaTags();
|
|
44
|
+
const linkHeader = geo.generateLinkHeader();
|
|
45
|
+
const jsonLd = geo.generateJsonLd();
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## ContentProvider
|
|
49
|
+
|
|
50
|
+
For dynamic data sources, implement the `ContentProvider` interface:
|
|
51
|
+
|
|
52
|
+
```typescript
|
|
53
|
+
import { createGeoAI, type ContentProvider } from 'geo-ai-core';
|
|
54
|
+
|
|
55
|
+
class MyProvider implements ContentProvider {
|
|
56
|
+
async getSections(options?: { locale?: string }) {
|
|
57
|
+
return [
|
|
58
|
+
{ name: 'Products', type: 'product', resources: await fetchProducts() },
|
|
59
|
+
{ name: 'Blog', type: 'page', resources: await fetchPosts() },
|
|
60
|
+
];
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const geo = createGeoAI({
|
|
65
|
+
siteName: 'My Site',
|
|
66
|
+
siteUrl: 'https://example.com',
|
|
67
|
+
provider: new MyProvider(),
|
|
68
|
+
});
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
## AI Description Generation
|
|
72
|
+
|
|
73
|
+
Separate entry point — only loaded when imported:
|
|
74
|
+
|
|
75
|
+
```typescript
|
|
76
|
+
import { AiGenerator } from 'geo-ai-core/ai';
|
|
77
|
+
|
|
78
|
+
const ai = new AiGenerator({
|
|
79
|
+
provider: 'anthropic',
|
|
80
|
+
apiKey: 'sk-...',
|
|
81
|
+
model: 'claude-sonnet-4-20250514',
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
const description = await ai.generate({
|
|
85
|
+
title: 'Premium Widget',
|
|
86
|
+
content: 'A high-quality widget...',
|
|
87
|
+
type: 'product',
|
|
88
|
+
price: '$29.99',
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
Bulk generation with progress:
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
const results = await ai.bulkGenerate(items, {
|
|
96
|
+
batchSize: 5,
|
|
97
|
+
maxItems: 50,
|
|
98
|
+
onProgress: ({ completed, total }) => console.log(`${completed}/${total}`),
|
|
99
|
+
});
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## Configuration
|
|
103
|
+
|
|
104
|
+
```typescript
|
|
105
|
+
interface GeoAIConfig {
|
|
106
|
+
siteName: string;
|
|
107
|
+
siteUrl: string;
|
|
108
|
+
provider: ContentProvider | Record<string, Resource[]>;
|
|
109
|
+
|
|
110
|
+
siteDescription?: string;
|
|
111
|
+
crawlers?: Record<string, 'allow' | 'disallow'> | 'all';
|
|
112
|
+
cache?: CacheAdapter | string; // '1h', '24h', '7d' or custom adapter
|
|
113
|
+
crypto?: { encryptionKey: string }; // 64-char hex for AES-256-GCM
|
|
114
|
+
crawlTracking?: { store?: CrawlStore; secret?: string } | true;
|
|
115
|
+
}
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## Entry Points
|
|
119
|
+
|
|
120
|
+
| Entry | Import | Contents |
|
|
121
|
+
|-------|--------|----------|
|
|
122
|
+
| Main | `geo-ai-core` | `createGeoAI`, `LlmsGenerator`, `BotRulesEngine`, `CrawlTracker`, `SeoGenerator`, `CryptoService`, cache adapters, types |
|
|
123
|
+
| AI | `geo-ai-core/ai` | `AiGenerator`, `RateLimiter`, `buildPrompt`, `classifyAiError` |
|
|
124
|
+
|
|
125
|
+
## Supported AI Crawlers
|
|
126
|
+
|
|
127
|
+
GPTBot, OAI-SearchBot, ClaudeBot, claude-web, Google-Extended, PerplexityBot, DeepSeekBot, GrokBot, meta-externalagent, PanguBot, YandexBot, SputnikBot, Bytespider, Baiduspider, Amazonbot, Applebot
|
|
128
|
+
|
|
129
|
+
## Requirements
|
|
130
|
+
|
|
131
|
+
- Node.js >= 20
|
|
132
|
+
- TypeScript >= 5.5 (recommended)
|
|
133
|
+
|
|
134
|
+
## License
|
|
135
|
+
|
|
136
|
+
[GPL v2](../../LICENSE)
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/ai/index.ts
|
|
21
|
+
var ai_exports = {};
|
|
22
|
+
__export(ai_exports, {
|
|
23
|
+
AiGenerator: () => AiGenerator,
|
|
24
|
+
AiProviderError: () => AiProviderError,
|
|
25
|
+
RateLimiter: () => RateLimiter,
|
|
26
|
+
buildPrompt: () => buildPrompt,
|
|
27
|
+
classifyAiError: () => classifyAiError
|
|
28
|
+
});
|
|
29
|
+
module.exports = __toCommonJS(ai_exports);
|
|
30
|
+
|
|
31
|
+
// src/constants.ts
|
|
32
|
+
var DEFAULT_RATE_LIMIT = 10;
|
|
33
|
+
var DEFAULT_BATCH_SIZE = 5;
|
|
34
|
+
var DEFAULT_MAX_ITEMS = 50;
|
|
35
|
+
var DEFAULT_MAX_DESCRIPTION_LENGTH = 200;
|
|
36
|
+
var DEFAULT_PROMPT = `Write a concise AI-optimized description (max 200 characters) for the following {type}.
|
|
37
|
+
|
|
38
|
+
Title: {title}
|
|
39
|
+
Content: {content}
|
|
40
|
+
Price: {price}
|
|
41
|
+
Category: {category}
|
|
42
|
+
|
|
43
|
+
The description should be informative, keyword-rich, and suitable for AI search engines. Focus on the key features and value proposition.`;
|
|
44
|
+
|
|
45
|
+
// src/ai/index.ts
|
|
46
|
+
var RATE_WINDOW_MS = 6e4;
|
|
47
|
+
var RateLimiter = class {
|
|
48
|
+
state;
|
|
49
|
+
limit;
|
|
50
|
+
_now;
|
|
51
|
+
constructor(limit = DEFAULT_RATE_LIMIT, nowFn) {
|
|
52
|
+
this.limit = limit;
|
|
53
|
+
this._now = nowFn ?? (() => Date.now());
|
|
54
|
+
this.state = { count: 0, windowStart: this._now() };
|
|
55
|
+
}
|
|
56
|
+
/** Returns true if a request is allowed, false if rate-limited. */
|
|
57
|
+
tryAcquire() {
|
|
58
|
+
const now = this._now();
|
|
59
|
+
if (now - this.state.windowStart >= RATE_WINDOW_MS) {
|
|
60
|
+
this.state = { count: 0, windowStart: now };
|
|
61
|
+
}
|
|
62
|
+
if (this.state.count >= this.limit) {
|
|
63
|
+
return false;
|
|
64
|
+
}
|
|
65
|
+
this.state.count++;
|
|
66
|
+
return true;
|
|
67
|
+
}
|
|
68
|
+
/** Reset the limiter state. */
|
|
69
|
+
reset() {
|
|
70
|
+
this.state = { count: 0, windowStart: this._now() };
|
|
71
|
+
}
|
|
72
|
+
};
|
|
73
|
+
function buildPrompt(template, context) {
|
|
74
|
+
return template.replace(/\{title\}/g, () => context.title ?? "").replace(/\{content\}/g, () => context.content ?? "").replace(/\{type\}/g, () => context.type ?? "").replace(/\{price\}/g, () => context.price ?? "").replace(/\{category\}/g, () => context.category ?? "");
|
|
75
|
+
}
|
|
76
|
+
function classifyAiError(statusCode, body) {
|
|
77
|
+
if (statusCode === 401 || statusCode === 403) {
|
|
78
|
+
return {
|
|
79
|
+
type: "auth_error",
|
|
80
|
+
message: "Invalid API key. Please check your key in settings.",
|
|
81
|
+
statusCode
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
if (statusCode === 429) {
|
|
85
|
+
return {
|
|
86
|
+
type: "rate_limit",
|
|
87
|
+
message: "AI provider rate limit exceeded. Please wait and try again.",
|
|
88
|
+
statusCode
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
if (statusCode >= 500) {
|
|
92
|
+
return {
|
|
93
|
+
type: "server_error",
|
|
94
|
+
message: "AI service is temporarily unavailable. Please try later.",
|
|
95
|
+
statusCode
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
return {
|
|
99
|
+
type: "unknown",
|
|
100
|
+
message: body?.error?.message ?? "Unknown AI provider error.",
|
|
101
|
+
statusCode
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
var AiProviderError = class extends Error {
|
|
105
|
+
type;
|
|
106
|
+
statusCode;
|
|
107
|
+
constructor(error) {
|
|
108
|
+
super(error.message);
|
|
109
|
+
this.name = "AiProviderError";
|
|
110
|
+
this.type = error.type;
|
|
111
|
+
this.statusCode = error.statusCode;
|
|
112
|
+
}
|
|
113
|
+
};
|
|
114
|
+
var AiGenerator = class {
|
|
115
|
+
config;
|
|
116
|
+
rateLimiter;
|
|
117
|
+
_fetch;
|
|
118
|
+
constructor(config, fetchFn) {
|
|
119
|
+
this.config = config;
|
|
120
|
+
this.rateLimiter = new RateLimiter(config.rateLimit ?? DEFAULT_RATE_LIMIT);
|
|
121
|
+
this._fetch = fetchFn ?? globalThis.fetch.bind(globalThis);
|
|
122
|
+
}
|
|
123
|
+
defaultModel() {
|
|
124
|
+
return this.config.provider === "anthropic" ? "claude-sonnet-4-5-20250514" : "gpt-4o-mini";
|
|
125
|
+
}
|
|
126
|
+
/** Generate a single AI description. */
|
|
127
|
+
async generate(context) {
|
|
128
|
+
if (!this.rateLimiter.tryAcquire()) {
|
|
129
|
+
throw new AiProviderError({
|
|
130
|
+
type: "rate_limit",
|
|
131
|
+
message: "Internal rate limit exceeded. Please wait before generating more descriptions."
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
const template = this.config.promptTemplate ?? DEFAULT_PROMPT;
|
|
135
|
+
const prompt = buildPrompt(template, context);
|
|
136
|
+
const model = this.config.model ?? this.defaultModel();
|
|
137
|
+
const maxLen = this.config.maxDescriptionLength ?? DEFAULT_MAX_DESCRIPTION_LENGTH;
|
|
138
|
+
const result = this.config.provider === "anthropic" ? await this.callClaude(model, prompt) : await this.callOpenAI(model, prompt);
|
|
139
|
+
return result.slice(0, maxLen);
|
|
140
|
+
}
|
|
141
|
+
// ── Provider calls ───────────────────────────────────────────────────
|
|
142
|
+
async callClaude(model, prompt) {
|
|
143
|
+
let response;
|
|
144
|
+
try {
|
|
145
|
+
response = await this._fetch("https://api.anthropic.com/v1/messages", {
|
|
146
|
+
method: "POST",
|
|
147
|
+
headers: {
|
|
148
|
+
"Content-Type": "application/json",
|
|
149
|
+
"x-api-key": this.config.apiKey,
|
|
150
|
+
"anthropic-version": "2023-06-01"
|
|
151
|
+
},
|
|
152
|
+
body: JSON.stringify({
|
|
153
|
+
model,
|
|
154
|
+
max_tokens: 256,
|
|
155
|
+
messages: [{ role: "user", content: prompt }]
|
|
156
|
+
})
|
|
157
|
+
});
|
|
158
|
+
} catch {
|
|
159
|
+
throw new AiProviderError({
|
|
160
|
+
type: "network_error",
|
|
161
|
+
message: "Failed to connect to Anthropic API."
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
if (!response.ok) {
|
|
165
|
+
const body2 = await response.json().catch(() => ({}));
|
|
166
|
+
throw new AiProviderError(classifyAiError(response.status, body2));
|
|
167
|
+
}
|
|
168
|
+
const body = await response.json();
|
|
169
|
+
const text = body?.content?.[0]?.text;
|
|
170
|
+
if (typeof text !== "string") {
|
|
171
|
+
throw new AiProviderError({
|
|
172
|
+
type: "unknown",
|
|
173
|
+
message: "Unexpected Claude API response format."
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
return text.trim();
|
|
177
|
+
}
|
|
178
|
+
async callOpenAI(model, prompt) {
|
|
179
|
+
let response;
|
|
180
|
+
try {
|
|
181
|
+
response = await this._fetch(
|
|
182
|
+
"https://api.openai.com/v1/chat/completions",
|
|
183
|
+
{
|
|
184
|
+
method: "POST",
|
|
185
|
+
headers: {
|
|
186
|
+
"Content-Type": "application/json",
|
|
187
|
+
Authorization: `Bearer ${this.config.apiKey}`
|
|
188
|
+
},
|
|
189
|
+
body: JSON.stringify({
|
|
190
|
+
model,
|
|
191
|
+
max_tokens: 256,
|
|
192
|
+
messages: [{ role: "user", content: prompt }]
|
|
193
|
+
})
|
|
194
|
+
}
|
|
195
|
+
);
|
|
196
|
+
} catch {
|
|
197
|
+
throw new AiProviderError({
|
|
198
|
+
type: "network_error",
|
|
199
|
+
message: "Failed to connect to OpenAI API."
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
if (!response.ok) {
|
|
203
|
+
const body2 = await response.json().catch(() => ({}));
|
|
204
|
+
throw new AiProviderError(classifyAiError(response.status, body2));
|
|
205
|
+
}
|
|
206
|
+
const body = await response.json();
|
|
207
|
+
const text = body?.choices?.[0]?.message?.content;
|
|
208
|
+
if (typeof text !== "string") {
|
|
209
|
+
throw new AiProviderError({
|
|
210
|
+
type: "unknown",
|
|
211
|
+
message: "Unexpected OpenAI API response format."
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
return text.trim();
|
|
215
|
+
}
|
|
216
|
+
// ── Bulk generation ────────────────────────────────────────────────────
|
|
217
|
+
/**
|
|
218
|
+
* Bulk-generate AI descriptions for multiple contexts.
|
|
219
|
+
* Processes in batches with progress callback.
|
|
220
|
+
*/
|
|
221
|
+
async bulkGenerate(contexts, options) {
|
|
222
|
+
const batchSize = options?.batchSize ?? DEFAULT_BATCH_SIZE;
|
|
223
|
+
const maxItems = options?.maxItems ?? DEFAULT_MAX_ITEMS;
|
|
224
|
+
const items = contexts.slice(0, maxItems);
|
|
225
|
+
const results = [];
|
|
226
|
+
for (let i = 0; i < items.length; i += batchSize) {
|
|
227
|
+
const batch = items.slice(i, i + batchSize);
|
|
228
|
+
for (const ctx of batch) {
|
|
229
|
+
try {
|
|
230
|
+
const result = await this.generate(ctx);
|
|
231
|
+
results.push({ context: ctx, result });
|
|
232
|
+
} catch (err) {
|
|
233
|
+
const aiErr = err instanceof AiProviderError ? { type: err.type, message: err.message, statusCode: err.statusCode } : { type: "unknown", message: String(err) };
|
|
234
|
+
results.push({ context: ctx, result: null, error: aiErr });
|
|
235
|
+
}
|
|
236
|
+
if (options?.onProgress) {
|
|
237
|
+
options.onProgress(results.length, items.length, ctx);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
return results;
|
|
242
|
+
}
|
|
243
|
+
};
|
|
244
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
245
|
+
0 && (module.exports = {
|
|
246
|
+
AiGenerator,
|
|
247
|
+
AiProviderError,
|
|
248
|
+
RateLimiter,
|
|
249
|
+
buildPrompt,
|
|
250
|
+
classifyAiError
|
|
251
|
+
});
|
|
252
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/ai/index.ts","../../src/constants.ts"],"sourcesContent":["/**\n * AiGenerator — AI description generation via Anthropic Claude or OpenAI.\n *\n * Separate entry point: `geo-ai-core/ai`\n * Supports single and bulk generation with rate limiting,\n * prompt template placeholders, and error classification.\n *\n * Requirements: 5.1–5.10\n */\n\nimport type {\n AiContext,\n AiBulkConfig,\n AiError,\n AiGeneratorConfig,\n AiProvider,\n Resource,\n} from '../types';\n\nimport {\n DEFAULT_RATE_LIMIT,\n DEFAULT_BATCH_SIZE,\n DEFAULT_MAX_ITEMS,\n DEFAULT_MAX_DESCRIPTION_LENGTH,\n DEFAULT_PROMPT,\n} from '../constants';\n\n// Re-export types for consumers of geo-ai-core/ai\nexport type { AiContext, AiBulkConfig, AiError, AiGeneratorConfig, AiProvider };\n\n// ── Rate limiter (sliding window) ────────────────────────────────────────\n\nconst RATE_WINDOW_MS = 60_000; // 1 minute\n\ninterface RateLimiterState {\n count: number;\n windowStart: number;\n}\n\n/**\n * Simple sliding-window rate limiter for AI API calls.\n * In-memory, per-process — not shared across instances.\n */\nexport class RateLimiter {\n private state: RateLimiterState;\n private limit: number;\n private _now: () => number;\n\n constructor(limit = DEFAULT_RATE_LIMIT, nowFn?: () => number) {\n this.limit = limit;\n this._now = nowFn ?? (() => Date.now());\n this.state = { count: 0, windowStart: this._now() };\n }\n\n /** Returns true if a request is allowed, false if rate-limited. */\n tryAcquire(): boolean {\n const now = this._now();\n if (now - this.state.windowStart >= RATE_WINDOW_MS) {\n this.state = { count: 0, windowStart: now };\n }\n if (this.state.count >= this.limit) {\n return false;\n }\n this.state.count++;\n return true;\n }\n\n /** Reset the limiter state. */\n reset(): void {\n this.state = { count: 0, windowStart: this._now() };\n }\n}\n\n// ── Prompt template ──────────────────────────────────────────────────────\n\n/**\n * Replaces placeholders {title}, {content}, {type}, {price}, {category}\n * in the prompt template with actual values from the context.\n *\n * Uses function replacer to avoid issues with special replacement patterns\n * (e.g. `$` characters in values).\n */\nexport function buildPrompt(template: string, context: AiContext): string {\n return template\n .replace(/\\{title\\}/g, () => context.title ?? '')\n .replace(/\\{content\\}/g, () => context.content ?? '')\n .replace(/\\{type\\}/g, () => context.type ?? '')\n .replace(/\\{price\\}/g, () => context.price ?? '')\n .replace(/\\{category\\}/g, () => context.category ?? '');\n}\n\n// ── Error classification ─────────────────────────────────────────────────\n\n/**\n * Classifies an AI provider HTTP error into a user-friendly category.\n */\nexport function classifyAiError(\n statusCode: number,\n body?: unknown,\n): AiError {\n if (statusCode === 401 || statusCode === 403) {\n return {\n type: 'auth_error',\n message: 'Invalid API key. Please check your key in settings.',\n statusCode,\n };\n }\n if (statusCode === 429) {\n return {\n type: 'rate_limit',\n message: 'AI provider rate limit exceeded. Please wait and try again.',\n statusCode,\n };\n }\n if (statusCode >= 500) {\n return {\n type: 'server_error',\n message: 'AI service is temporarily unavailable. Please try later.',\n statusCode,\n };\n }\n return {\n type: 'unknown',\n message:\n (body as Record<string, Record<string, string>>)?.error?.message ??\n 'Unknown AI provider error.',\n statusCode,\n };\n}\n\n// ── AiProviderError ──────────────────────────────────────────────────────\n\nexport class AiProviderError extends Error {\n readonly type: AiError['type'];\n readonly statusCode?: number;\n\n constructor(error: AiError) {\n super(error.message);\n this.name = 'AiProviderError';\n this.type = error.type;\n this.statusCode = error.statusCode;\n }\n}\n\n// ── Fetch type ───────────────────────────────────────────────────────────\n\ntype FetchFn = typeof globalThis.fetch;\n\n// ── AiGenerator ──────────────────────────────────────────────────────────\n\nexport class AiGenerator {\n private config: AiGeneratorConfig;\n private rateLimiter: RateLimiter;\n private _fetch: FetchFn;\n\n constructor(config: AiGeneratorConfig, fetchFn?: FetchFn) {\n this.config = config;\n this.rateLimiter = new RateLimiter(config.rateLimit ?? DEFAULT_RATE_LIMIT);\n this._fetch = fetchFn ?? globalThis.fetch.bind(globalThis);\n }\n\n private defaultModel(): string {\n return this.config.provider === 'anthropic'\n ? 'claude-sonnet-4-5-20250514'\n : 'gpt-4o-mini';\n }\n\n /** Generate a single AI description. */\n async generate(context: AiContext): Promise<string> {\n if (!this.rateLimiter.tryAcquire()) {\n throw new AiProviderError({\n type: 'rate_limit',\n message: 'Internal rate limit exceeded. Please wait before generating more descriptions.',\n });\n }\n\n const template = this.config.promptTemplate ?? DEFAULT_PROMPT;\n const prompt = buildPrompt(template, context);\n const model = this.config.model ?? this.defaultModel();\n const maxLen = this.config.maxDescriptionLength ?? DEFAULT_MAX_DESCRIPTION_LENGTH;\n\n const result =\n this.config.provider === 'anthropic'\n ? await this.callClaude(model, prompt)\n : await this.callOpenAI(model, prompt);\n\n return result.slice(0, maxLen);\n }\n\n // ── Provider calls ───────────────────────────────────────────────────\n\n private async callClaude(model: string, prompt: string): Promise<string> {\n let response: Response;\n try {\n response = await this._fetch('https://api.anthropic.com/v1/messages', {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'x-api-key': this.config.apiKey,\n 'anthropic-version': '2023-06-01',\n },\n body: JSON.stringify({\n model,\n max_tokens: 256,\n messages: [{ role: 'user', content: prompt }],\n }),\n });\n } catch {\n throw new AiProviderError({\n type: 'network_error',\n message: 'Failed to connect to Anthropic API.',\n });\n }\n\n if (!response.ok) {\n const body = await response.json().catch(() => ({}));\n throw new AiProviderError(classifyAiError(response.status, body));\n }\n\n const body = await response.json();\n const text = body?.content?.[0]?.text;\n if (typeof text !== 'string') {\n throw new AiProviderError({\n type: 'unknown',\n message: 'Unexpected Claude API response format.',\n });\n }\n return text.trim();\n }\n\n private async callOpenAI(model: string, prompt: string): Promise<string> {\n let response: Response;\n try {\n response = await this._fetch(\n 'https://api.openai.com/v1/chat/completions',\n {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this.config.apiKey}`,\n },\n body: JSON.stringify({\n model,\n max_tokens: 256,\n messages: [{ role: 'user', content: prompt }],\n }),\n },\n );\n } catch {\n throw new AiProviderError({\n type: 'network_error',\n message: 'Failed to connect to OpenAI API.',\n });\n }\n\n if (!response.ok) {\n const body = await response.json().catch(() => ({}));\n throw new AiProviderError(classifyAiError(response.status, body));\n }\n\n const body = await response.json();\n const text = body?.choices?.[0]?.message?.content;\n if (typeof text !== 'string') {\n throw new AiProviderError({\n type: 'unknown',\n message: 'Unexpected OpenAI API response format.',\n });\n }\n return text.trim();\n }\n\n // ── Bulk generation ────────────────────────────────────────────────────\n\n /**\n * Bulk-generate AI descriptions for multiple contexts.\n * Processes in batches with progress callback.\n */\n async bulkGenerate(\n contexts: AiContext[],\n options?: AiBulkConfig,\n ): Promise<Array<{ context: AiContext; result: string | null; error?: AiError }>> {\n const batchSize = options?.batchSize ?? DEFAULT_BATCH_SIZE;\n const maxItems = options?.maxItems ?? DEFAULT_MAX_ITEMS;\n const items = contexts.slice(0, maxItems);\n const results: Array<{ context: AiContext; result: string | null; error?: AiError }> = [];\n\n for (let i = 0; i < items.length; i += batchSize) {\n const batch = items.slice(i, i + batchSize);\n\n for (const ctx of batch) {\n try {\n const result = await this.generate(ctx);\n results.push({ context: ctx, result });\n } catch (err) {\n const aiErr: AiError =\n err instanceof AiProviderError\n ? { type: err.type, message: err.message, statusCode: err.statusCode }\n : { type: 'unknown', message: String(err) };\n results.push({ context: ctx, result: null, error: aiErr });\n }\n\n if (options?.onProgress) {\n options.onProgress(results.length, items.length, ctx as unknown as Resource);\n }\n }\n }\n\n return results;\n }\n}\n","/**\n * Supported AI crawlers: bot User-Agent identifier → human-readable name.\n * Minimum 13 bots per spec requirement 3.1.\n */\nexport const AI_BOTS: Record<string, string> = {\n GPTBot: 'OpenAI / ChatGPT',\n 'OAI-SearchBot': 'OpenAI / Copilot Search',\n ClaudeBot: 'Anthropic / Claude',\n 'Google-Extended': 'Google / Gemini',\n PerplexityBot: 'Perplexity AI',\n DeepSeekBot: 'DeepSeek',\n GrokBot: 'xAI / Grok',\n 'meta-externalagent': 'Meta / LLaMA',\n PanguBot: 'Alibaba / Qwen',\n YandexBot: 'Yandex / YandexGPT',\n SputnikBot: 'Sber / GigaChat',\n Bytespider: 'ByteDance / Douyin',\n Baiduspider: 'Baidu / ERNIE',\n 'claude-web': 'Anthropic / Claude Web',\n Amazonbot: 'Amazon / Alexa',\n Applebot: 'Apple / Siri & Spotlight',\n};\n\n/** Default maximum words for content in llms-full.txt */\nexport const DEFAULT_MAX_CONTENT_WORDS = 200;\n\n/** Default AI API requests per minute */\nexport const DEFAULT_RATE_LIMIT = 10;\n\n/** Default items per batch in bulk AI generation */\nexport const DEFAULT_BATCH_SIZE = 5;\n\n/** Default maximum items for bulk AI generation */\nexport const DEFAULT_MAX_ITEMS = 50;\n\n/** Default maximum AI description length (characters) */\nexport const DEFAULT_MAX_DESCRIPTION_LENGTH = 200;\n\n/** Default prompt template for AI description generation */\nexport const DEFAULT_PROMPT = `Write a concise AI-optimized description (max 200 characters) for the following {type}.\n\nTitle: {title}\nContent: {content}\nPrice: {price}\nCategory: {category}\n\nThe description should be informative, keyword-rich, and suitable for AI search engines. Focus on the key features and value proposition.`;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AC2BO,IAAM,qBAAqB;AAG3B,IAAM,qBAAqB;AAG3B,IAAM,oBAAoB;AAG1B,IAAM,iCAAiC;AAGvC,IAAM,iBAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ADP9B,IAAM,iBAAiB;AAWhB,IAAM,cAAN,MAAkB;AAAA,EACf;AAAA,EACA;AAAA,EACA;AAAA,EAER,YAAY,QAAQ,oBAAoB,OAAsB;AAC5D,SAAK,QAAQ;AACb,SAAK,OAAO,UAAU,MAAM,KAAK,IAAI;AACrC,SAAK,QAAQ,EAAE,OAAO,GAAG,aAAa,KAAK,KAAK,EAAE;AAAA,EACpD;AAAA;AAAA,EAGA,aAAsB;AACpB,UAAM,MAAM,KAAK,KAAK;AACtB,QAAI,MAAM,KAAK,MAAM,eAAe,gBAAgB;AAClD,WAAK,QAAQ,EAAE,OAAO,GAAG,aAAa,IAAI;AAAA,IAC5C;AACA,QAAI,KAAK,MAAM,SAAS,KAAK,OAAO;AAClC,aAAO;AAAA,IACT;AACA,SAAK,MAAM;AACX,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,QAAc;AACZ,SAAK,QAAQ,EAAE,OAAO,GAAG,aAAa,KAAK,KAAK,EAAE;AAAA,EACpD;AACF;AAWO,SAAS,YAAY,UAAkB,SAA4B;AACxE,SAAO,SACJ,QAAQ,cAAc,MAAM,QAAQ,SAAS,EAAE,EAC/C,QAAQ,gBAAgB,MAAM,QAAQ,WAAW,EAAE,EACnD,QAAQ,aAAa,MAAM,QAAQ,QAAQ,EAAE,EAC7C,QAAQ,cAAc,MAAM,QAAQ,SAAS,EAAE,EAC/C,QAAQ,iBAAiB,MAAM,QAAQ,YAAY,EAAE;AAC1D;AAOO,SAAS,gBACd,YACA,MACS;AACT,MAAI,eAAe,OAAO,eAAe,KAAK;AAC5C,WAAO;AAAA,MACL,MAAM;AAAA,MACN,SAAS;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MAAI,eAAe,KAAK;AACtB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,SAAS;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MAAI,cAAc,KAAK;AACrB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,SAAS;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,SACG,MAAiD,OAAO,WACzD;AAAA,IACF;AAAA,EACF;AACF;AAIO,IAAM,kBAAN,cAA8B,MAAM;AAAA,EAChC;AAAA,EACA;AAAA,EAET,YAAY,OAAgB;AAC1B,UAAM,MAAM,OAAO;AACnB,SAAK,OAAO;AACZ,SAAK,OAAO,MAAM;AAClB,SAAK,aAAa,MAAM;AAAA,EAC1B;AACF;AAQO,IAAM,cAAN,MAAkB;AAAA,EACf;AAAA,EACA;AAAA,EACA;AAAA,EAER,YAAY,QAA2B,SAAmB;AACxD,SAAK,SAAS;AACd,SAAK,cAAc,IAAI,YAAY,OAAO,aAAa,kBAAkB;AACzE,SAAK,SAAS,WAAW,WAAW,MAAM,KAAK,UAAU;AAAA,EAC3D;AAAA,EAEQ,eAAuB;AAC7B,WAAO,KAAK,OAAO,aAAa,cAC5B,+BACA;AAAA,EACN;AAAA;AAAA,EAGA,MAAM,SAAS,SAAqC;AAClD,QAAI,CAAC,KAAK,YAAY,WAAW,GAAG;AAClC,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,WAAW,KAAK,OAAO,kBAAkB;AAC/C,UAAM,SAAS,YAAY,UAAU,OAAO;AAC5C,UAAM,QAAQ,KAAK,OAAO,SAAS,KAAK,aAAa;AACrD,UAAM,SAAS,KAAK,OAAO,wBAAwB;AAEnD,UAAM,SACJ,KAAK,OAAO,aAAa,cACrB,MAAM,KAAK,WAAW,OAAO,MAAM,IACnC,MAAM,KAAK,WAAW,OAAO,MAAM;AAEzC,WAAO,OAAO,MAAM,GAAG,MAAM;AAAA,EAC/B;AAAA;AAAA,EAIA,MAAc,WAAW,OAAe,QAAiC;AACvE,QAAI;AACJ,QAAI;AACF,iBAAW,MAAM,KAAK,OAAO,yCAAyC;AAAA,QACpE,QAAQ;AAAA,QACR,SAAS;AAAA,UACP,gBAAgB;AAAA,UAChB,aAAa,KAAK,OAAO;AAAA,UACzB,qBAAqB;AAAA,QACvB;AAAA,QACA,MAAM,KAAK,UAAU;AAAA,UACnB;AAAA,UACA,YAAY;AAAA,UACZ,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,QAC9C,CAAC;AAAA,MACH,CAAC;AAAA,IACH,QAAQ;AACN,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,CAAC,SAAS,IAAI;AAChB,YAAMA,QAAO,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,CAAC,EAAE;AACnD,YAAM,IAAI,gBAAgB,gBAAgB,SAAS,QAAQA,KAAI,CAAC;AAAA,IAClE;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,UAAM,OAAO,MAAM,UAAU,CAAC,GAAG;AACjC,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AACA,WAAO,KAAK,KAAK;AAAA,EACnB;AAAA,EAEA,MAAc,WAAW,OAAe,QAAiC;AACvE,QAAI;AACJ,QAAI;AACF,iBAAW,MAAM,KAAK;AAAA,QACpB;AAAA,QACA;AAAA,UACE,QAAQ;AAAA,UACR,SAAS;AAAA,YACP,gBAAgB;AAAA,YAChB,eAAe,UAAU,KAAK,OAAO,MAAM;AAAA,UAC7C;AAAA,UACA,MAAM,KAAK,UAAU;AAAA,YACnB;AAAA,YACA,YAAY;AAAA,YACZ,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,OAAO,CAAC;AAAA,UAC9C,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,QAAQ;AACN,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,CAAC,SAAS,IAAI;AAChB,YAAMA,QAAO,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,CAAC,EAAE;AACnD,YAAM,IAAI,gBAAgB,gBAAgB,SAAS,QAAQA,KAAI,CAAC;AAAA,IAClE;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,UAAM,OAAO,MAAM,UAAU,CAAC,GAAG,SAAS;AAC1C,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,gBAAgB;AAAA,QACxB,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AACA,WAAO,KAAK,KAAK;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,aACJ,UACA,SACgF;AAChF,UAAM,YAAY,SAAS,aAAa;AACxC,UAAM,WAAW,SAAS,YAAY;AACtC,UAAM,QAAQ,SAAS,MAAM,GAAG,QAAQ;AACxC,UAAM,UAAiF,CAAC;AAExF,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AAChD,YAAM,QAAQ,MAAM,MAAM,GAAG,IAAI,SAAS;AAE1C,iBAAW,OAAO,OAAO;AACvB,YAAI;AACF,gBAAM,SAAS,MAAM,KAAK,SAAS,GAAG;AACtC,kBAAQ,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;AAAA,QACvC,SAAS,KAAK;AACZ,gBAAM,QACJ,eAAe,kBACX,EAAE,MAAM,IAAI,MAAM,SAAS,IAAI,SAAS,YAAY,IAAI,WAAW,IACnE,EAAE,MAAM,WAAW,SAAS,OAAO,GAAG,EAAE;AAC9C,kBAAQ,KAAK,EAAE,SAAS,KAAK,QAAQ,MAAM,OAAO,MAAM,CAAC;AAAA,QAC3D;AAEA,YAAI,SAAS,YAAY;AACvB,kBAAQ,WAAW,QAAQ,QAAQ,MAAM,QAAQ,GAA0B;AAAA,QAC7E;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AACF;","names":["body"]}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { g as AiGeneratorConfig, e as AiContext, A as AiBulkConfig, f as AiError } from '../types-B3qT6_qa.cjs';
|
|
2
|
+
export { h as AiProvider } from '../types-B3qT6_qa.cjs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* AiGenerator — AI description generation via Anthropic Claude or OpenAI.
|
|
6
|
+
*
|
|
7
|
+
* Separate entry point: `geo-ai-core/ai`
|
|
8
|
+
* Supports single and bulk generation with rate limiting,
|
|
9
|
+
* prompt template placeholders, and error classification.
|
|
10
|
+
*
|
|
11
|
+
* Requirements: 5.1–5.10
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Simple sliding-window rate limiter for AI API calls.
|
|
16
|
+
* In-memory, per-process — not shared across instances.
|
|
17
|
+
*/
|
|
18
|
+
declare class RateLimiter {
|
|
19
|
+
private state;
|
|
20
|
+
private limit;
|
|
21
|
+
private _now;
|
|
22
|
+
constructor(limit?: number, nowFn?: () => number);
|
|
23
|
+
/** Returns true if a request is allowed, false if rate-limited. */
|
|
24
|
+
tryAcquire(): boolean;
|
|
25
|
+
/** Reset the limiter state. */
|
|
26
|
+
reset(): void;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Replaces placeholders {title}, {content}, {type}, {price}, {category}
|
|
30
|
+
* in the prompt template with actual values from the context.
|
|
31
|
+
*
|
|
32
|
+
* Uses function replacer to avoid issues with special replacement patterns
|
|
33
|
+
* (e.g. `$` characters in values).
|
|
34
|
+
*/
|
|
35
|
+
declare function buildPrompt(template: string, context: AiContext): string;
|
|
36
|
+
/**
|
|
37
|
+
* Classifies an AI provider HTTP error into a user-friendly category.
|
|
38
|
+
*/
|
|
39
|
+
declare function classifyAiError(statusCode: number, body?: unknown): AiError;
|
|
40
|
+
declare class AiProviderError extends Error {
|
|
41
|
+
readonly type: AiError['type'];
|
|
42
|
+
readonly statusCode?: number;
|
|
43
|
+
constructor(error: AiError);
|
|
44
|
+
}
|
|
45
|
+
type FetchFn = typeof globalThis.fetch;
|
|
46
|
+
declare class AiGenerator {
|
|
47
|
+
private config;
|
|
48
|
+
private rateLimiter;
|
|
49
|
+
private _fetch;
|
|
50
|
+
constructor(config: AiGeneratorConfig, fetchFn?: FetchFn);
|
|
51
|
+
private defaultModel;
|
|
52
|
+
/** Generate a single AI description. */
|
|
53
|
+
generate(context: AiContext): Promise<string>;
|
|
54
|
+
private callClaude;
|
|
55
|
+
private callOpenAI;
|
|
56
|
+
/**
|
|
57
|
+
* Bulk-generate AI descriptions for multiple contexts.
|
|
58
|
+
* Processes in batches with progress callback.
|
|
59
|
+
*/
|
|
60
|
+
bulkGenerate(contexts: AiContext[], options?: AiBulkConfig): Promise<Array<{
|
|
61
|
+
context: AiContext;
|
|
62
|
+
result: string | null;
|
|
63
|
+
error?: AiError;
|
|
64
|
+
}>>;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export { AiBulkConfig, AiContext, AiError, AiGenerator, AiGeneratorConfig, AiProviderError, RateLimiter, buildPrompt, classifyAiError };
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { g as AiGeneratorConfig, e as AiContext, A as AiBulkConfig, f as AiError } from '../types-B3qT6_qa.js';
|
|
2
|
+
export { h as AiProvider } from '../types-B3qT6_qa.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* AiGenerator — AI description generation via Anthropic Claude or OpenAI.
|
|
6
|
+
*
|
|
7
|
+
* Separate entry point: `geo-ai-core/ai`
|
|
8
|
+
* Supports single and bulk generation with rate limiting,
|
|
9
|
+
* prompt template placeholders, and error classification.
|
|
10
|
+
*
|
|
11
|
+
* Requirements: 5.1–5.10
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Simple sliding-window rate limiter for AI API calls.
|
|
16
|
+
* In-memory, per-process — not shared across instances.
|
|
17
|
+
*/
|
|
18
|
+
declare class RateLimiter {
|
|
19
|
+
private state;
|
|
20
|
+
private limit;
|
|
21
|
+
private _now;
|
|
22
|
+
constructor(limit?: number, nowFn?: () => number);
|
|
23
|
+
/** Returns true if a request is allowed, false if rate-limited. */
|
|
24
|
+
tryAcquire(): boolean;
|
|
25
|
+
/** Reset the limiter state. */
|
|
26
|
+
reset(): void;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Replaces placeholders {title}, {content}, {type}, {price}, {category}
|
|
30
|
+
* in the prompt template with actual values from the context.
|
|
31
|
+
*
|
|
32
|
+
* Uses function replacer to avoid issues with special replacement patterns
|
|
33
|
+
* (e.g. `$` characters in values).
|
|
34
|
+
*/
|
|
35
|
+
declare function buildPrompt(template: string, context: AiContext): string;
|
|
36
|
+
/**
|
|
37
|
+
* Classifies an AI provider HTTP error into a user-friendly category.
|
|
38
|
+
*/
|
|
39
|
+
declare function classifyAiError(statusCode: number, body?: unknown): AiError;
|
|
40
|
+
declare class AiProviderError extends Error {
|
|
41
|
+
readonly type: AiError['type'];
|
|
42
|
+
readonly statusCode?: number;
|
|
43
|
+
constructor(error: AiError);
|
|
44
|
+
}
|
|
45
|
+
type FetchFn = typeof globalThis.fetch;
|
|
46
|
+
declare class AiGenerator {
|
|
47
|
+
private config;
|
|
48
|
+
private rateLimiter;
|
|
49
|
+
private _fetch;
|
|
50
|
+
constructor(config: AiGeneratorConfig, fetchFn?: FetchFn);
|
|
51
|
+
private defaultModel;
|
|
52
|
+
/** Generate a single AI description. */
|
|
53
|
+
generate(context: AiContext): Promise<string>;
|
|
54
|
+
private callClaude;
|
|
55
|
+
private callOpenAI;
|
|
56
|
+
/**
|
|
57
|
+
* Bulk-generate AI descriptions for multiple contexts.
|
|
58
|
+
* Processes in batches with progress callback.
|
|
59
|
+
*/
|
|
60
|
+
bulkGenerate(contexts: AiContext[], options?: AiBulkConfig): Promise<Array<{
|
|
61
|
+
context: AiContext;
|
|
62
|
+
result: string | null;
|
|
63
|
+
error?: AiError;
|
|
64
|
+
}>>;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export { AiBulkConfig, AiContext, AiError, AiGenerator, AiGeneratorConfig, AiProviderError, RateLimiter, buildPrompt, classifyAiError };
|