scrapex 1.0.0-alpha.1 → 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +164 -5
  2. package/dist/enhancer-ByjRD-t5.mjs +769 -0
  3. package/dist/enhancer-ByjRD-t5.mjs.map +1 -0
  4. package/dist/enhancer-j0xqKDJm.cjs +847 -0
  5. package/dist/enhancer-j0xqKDJm.cjs.map +1 -0
  6. package/dist/index-CDgcRnig.d.cts +268 -0
  7. package/dist/index-CDgcRnig.d.cts.map +1 -0
  8. package/dist/index-piS5wtki.d.mts +268 -0
  9. package/dist/index-piS5wtki.d.mts.map +1 -0
  10. package/dist/index.cjs +1192 -37
  11. package/dist/index.cjs.map +1 -1
  12. package/dist/index.d.cts +318 -2
  13. package/dist/index.d.cts.map +1 -1
  14. package/dist/index.d.mts +318 -2
  15. package/dist/index.d.mts.map +1 -1
  16. package/dist/index.mjs +1164 -6
  17. package/dist/index.mjs.map +1 -1
  18. package/dist/llm/index.cjs +250 -232
  19. package/dist/llm/index.cjs.map +1 -1
  20. package/dist/llm/index.d.cts +132 -85
  21. package/dist/llm/index.d.cts.map +1 -1
  22. package/dist/llm/index.d.mts +132 -85
  23. package/dist/llm/index.d.mts.map +1 -1
  24. package/dist/llm/index.mjs +243 -236
  25. package/dist/llm/index.mjs.map +1 -1
  26. package/dist/parsers/index.cjs +10 -199
  27. package/dist/parsers/index.d.cts +2 -133
  28. package/dist/parsers/index.d.mts +2 -133
  29. package/dist/parsers/index.mjs +2 -191
  30. package/dist/parsers-Bneuws8x.cjs +569 -0
  31. package/dist/parsers-Bneuws8x.cjs.map +1 -0
  32. package/dist/parsers-CwkYnyWY.mjs +482 -0
  33. package/dist/parsers-CwkYnyWY.mjs.map +1 -0
  34. package/dist/types-CadAXrme.d.mts +674 -0
  35. package/dist/types-CadAXrme.d.mts.map +1 -0
  36. package/dist/types-DPEtPihB.d.cts +674 -0
  37. package/dist/types-DPEtPihB.d.cts.map +1 -0
  38. package/package.json +15 -16
  39. package/dist/enhancer-Q6CSc1gA.mjs +0 -220
  40. package/dist/enhancer-Q6CSc1gA.mjs.map +0 -1
  41. package/dist/enhancer-oM4BhYYS.cjs +0 -268
  42. package/dist/enhancer-oM4BhYYS.cjs.map +0 -1
  43. package/dist/parsers/index.cjs.map +0 -1
  44. package/dist/parsers/index.d.cts.map +0 -1
  45. package/dist/parsers/index.d.mts.map +0 -1
  46. package/dist/parsers/index.mjs.map +0 -1
  47. package/dist/types-CNQZVW36.d.mts +0 -150
  48. package/dist/types-CNQZVW36.d.mts.map +0 -1
  49. package/dist/types-D0HYR95H.d.cts +0 -150
  50. package/dist/types-D0HYR95H.d.cts.map +0 -1
@@ -1,70 +1,99 @@
1
- const require_enhancer = require('../enhancer-oM4BhYYS.cjs');
1
+ const require_parsers = require('../parsers-Bneuws8x.cjs');
2
+ const require_enhancer = require('../enhancer-j0xqKDJm.cjs');
3
+ let zod = require("zod");
2
4
 
3
- //#region src/llm/anthropic.ts
4
- const DEFAULT_MODEL$1 = "claude-3-5-haiku-20241022";
5
- const DEFAULT_MAX_TOKENS$1 = 1024;
5
+ //#region src/llm/http.ts
6
6
  /**
7
- * Anthropic Claude provider
8
- *
9
- * Requires @anthropic-ai/sdk as a peer dependency.
10
- *
11
- * @example
12
- * ```ts
13
- * const provider = new AnthropicProvider({ apiKey: 'sk-...' });
14
- * const result = await scrape(url, { llm: provider, enhance: ['summarize'] });
15
- * ```
7
+ * HTTP-based LLM Provider using native fetch.
8
+ * Provides a unified interface for any REST-based LLM API.
16
9
  */
17
- var AnthropicProvider = class {
18
- name = "anthropic";
19
- client;
20
- model;
21
- constructor(config = {}) {
22
- const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY;
23
- if (!apiKey) throw new require_enhancer.ScrapeError("Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass apiKey in config.", "LLM_ERROR");
24
- this.model = config.model ?? DEFAULT_MODEL$1;
25
- try {
26
- const { Anthropic } = require("@anthropic-ai/sdk");
27
- this.client = new Anthropic({
28
- apiKey,
29
- baseURL: config.baseUrl
10
+ /**
11
+ * HTTP-based LLM provider.
12
+ * Works with any REST API using native fetch.
13
+ */
14
+ var HttpLLMProvider = class extends require_enhancer.BaseHttpProvider {
15
+ name;
16
+ requestBuilder;
17
+ responseMapper;
18
+ jsonMode;
19
+ constructor(config) {
20
+ super(config);
21
+ this.name = "http-llm";
22
+ this.jsonMode = config.jsonMode ?? false;
23
+ this.requestBuilder = config.requestBuilder ?? ((prompt, opts) => {
24
+ const messages = [];
25
+ if (opts.systemPrompt) messages.push({
26
+ role: "system",
27
+ content: opts.systemPrompt
30
28
  });
31
- } catch {
32
- throw new require_enhancer.ScrapeError("@anthropic-ai/sdk is required for Anthropic provider. Install with: npm install @anthropic-ai/sdk", "LLM_ERROR");
33
- }
29
+ messages.push({
30
+ role: "user",
31
+ content: prompt
32
+ });
33
+ const request = {
34
+ model: this.model,
35
+ messages
36
+ };
37
+ if (opts.temperature !== void 0) request.temperature = opts.temperature;
38
+ if (opts.maxTokens !== void 0) request.max_tokens = opts.maxTokens;
39
+ return request;
40
+ });
41
+ this.responseMapper = config.responseMapper ?? ((response) => {
42
+ const resp = response;
43
+ if (Array.isArray(resp.choices) && resp.choices.length > 0) {
44
+ const choice = resp.choices[0];
45
+ if (choice.message?.content) return choice.message.content;
46
+ }
47
+ if (Array.isArray(resp.content)) {
48
+ const textBlock = resp.content.find((c) => c.type === "text");
49
+ if (textBlock?.text) return textBlock.text;
50
+ }
51
+ throw new require_enhancer.ScrapeError("Unable to parse LLM response. Provide a custom responseMapper.", "VALIDATION_ERROR");
52
+ });
34
53
  }
54
+ /**
55
+ * Generate a text completion.
56
+ */
35
57
  async complete(prompt, options = {}) {
36
- try {
37
- const content = (await this.client.messages.create({
38
- model: this.model,
39
- max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS$1,
40
- messages: [{
41
- role: "user",
42
- content: prompt
43
- }],
44
- system: options.systemPrompt,
45
- temperature: options.temperature
46
- })).content[0];
47
- if (content?.type === "text" && content.text) return content.text;
48
- throw new require_enhancer.ScrapeError("Unexpected or empty response from Anthropic", "LLM_ERROR");
49
- } catch (error) {
50
- if (error instanceof require_enhancer.ScrapeError) throw error;
51
- throw new require_enhancer.ScrapeError(`Anthropic API error: ${error instanceof Error ? error.message : String(error)}`, "LLM_ERROR", void 0, error instanceof Error ? error : void 0);
52
- }
58
+ let body = this.requestBuilder(prompt, options);
59
+ if (this.jsonMode && typeof body === "object" && body !== null) body = {
60
+ ...body,
61
+ response_format: { type: "json_object" }
62
+ };
63
+ const { data } = await this.fetch(this.baseUrl, { body });
64
+ const content = this.responseMapper(data);
65
+ if (!content) throw new require_enhancer.ScrapeError("Empty response from LLM", "LLM_ERROR");
66
+ return content;
53
67
  }
68
+ /**
69
+ * Generate a structured JSON completion with Zod validation.
70
+ */
54
71
  async completeJSON(prompt, schema, options = {}) {
55
72
  const jsonPrompt = `${prompt}
56
73
 
57
74
  Respond ONLY with valid JSON matching this schema:
58
- ${JSON.stringify(zodToJsonSchema$1(schema), null, 2)}
75
+ ${JSON.stringify(zodToJsonSchema(schema), null, 2)}
59
76
 
60
77
  Do not include any explanation or markdown formatting. Just the JSON object.`;
61
- const response = await this.complete(jsonPrompt, {
78
+ const useJsonMode = this.jsonMode;
79
+ let body = this.requestBuilder(jsonPrompt, {
62
80
  ...options,
63
81
  systemPrompt: options.systemPrompt ?? "You are a helpful assistant that responds only with valid JSON."
64
82
  });
83
+ if (useJsonMode && typeof body === "object" && body !== null) body = {
84
+ ...body,
85
+ response_format: { type: "json_object" }
86
+ };
87
+ const { data } = await this.fetch(this.baseUrl, { body });
88
+ const content = this.responseMapper(data);
89
+ if (!content) throw new require_enhancer.ScrapeError("Empty response from LLM", "LLM_ERROR");
65
90
  try {
66
- const jsonMatch = response.match(/\{[\s\S]*\}/);
67
- if (!jsonMatch) throw new Error("No JSON object found in response");
91
+ const trimmed = content.trim();
92
+ try {
93
+ return schema.parse(JSON.parse(trimmed));
94
+ } catch {}
95
+ const jsonMatch = content.match(/[[{][\s\S]*[\]}]/);
96
+ if (!jsonMatch) throw new Error("No JSON found in response");
68
97
  const parsed = JSON.parse(jsonMatch[0]);
69
98
  return schema.parse(parsed);
70
99
  } catch (error) {
@@ -73,244 +102,233 @@ Do not include any explanation or markdown formatting. Just the JSON object.`;
73
102
  }
74
103
  };
75
104
  /**
76
- * Convert a Zod schema to a simple JSON Schema representation
77
- * (simplified version for prompt engineering)
105
+ * Convert a Zod schema to a JSON Schema representation.
106
+ * Uses Zod's built-in toJSONSchema method (Zod 4+).
107
+ * Used for prompting LLMs to return structured data.
78
108
  */
79
- function zodToJsonSchema$1(schema) {
109
+ function zodToJsonSchema(schema) {
110
+ if (typeof zod.z.toJSONSchema === "function") {
111
+ const { $schema, ...rest } = zod.z.toJSONSchema(schema);
112
+ return rest;
113
+ }
80
114
  const def = schema._def;
81
- switch (def.typeName) {
82
- case "ZodObject": {
115
+ switch (def.type) {
116
+ case "object": {
83
117
  const shape = schema.shape;
84
118
  const properties = {};
85
- for (const [key, value] of Object.entries(shape)) properties[key] = zodToJsonSchema$1(value);
119
+ const required = [];
120
+ for (const [key, value] of Object.entries(shape)) {
121
+ properties[key] = zodToJsonSchema(value);
122
+ if (value._def.type !== "optional") required.push(key);
123
+ }
86
124
  return {
87
125
  type: "object",
88
- properties
126
+ properties,
127
+ required
89
128
  };
90
129
  }
91
- case "ZodArray": return {
130
+ case "array": return {
92
131
  type: "array",
93
- items: zodToJsonSchema$1(def.type)
94
- };
95
- case "ZodString": return { type: "string" };
96
- case "ZodNumber": return { type: "number" };
97
- case "ZodBoolean": return { type: "boolean" };
98
- case "ZodEnum": return {
99
- type: "string",
100
- enum: def.values
132
+ items: zodToJsonSchema(def.element)
101
133
  };
134
+ case "string": return { type: "string" };
135
+ case "number": return { type: "number" };
136
+ case "boolean": return { type: "boolean" };
137
+ case "enum": {
138
+ const enumDef = def;
139
+ return {
140
+ type: "string",
141
+ enum: Object.values(enumDef.entries)
142
+ };
143
+ }
144
+ case "optional": return zodToJsonSchema(def.innerType);
102
145
  default: return { type: "string" };
103
146
  }
104
147
  }
105
148
 
106
149
  //#endregion
107
- //#region src/llm/openai.ts
108
- const DEFAULT_MODEL = "gpt-4o-mini";
109
- const DEFAULT_MAX_TOKENS = 1024;
110
- const DEFAULT_BASE_URL = "https://api.openai.com/v1";
150
+ //#region src/llm/presets.ts
111
151
  /**
112
- * OpenAI-compatible provider
113
- *
114
- * Works with:
115
- * - OpenAI API
116
- * - Ollama (http://localhost:11434/v1)
117
- * - LM Studio (http://localhost:1234/v1)
118
- * - LocalAI
119
- * - vLLM
120
- * - Any OpenAI-compatible API
121
- *
122
- * Requires `openai` as a peer dependency.
152
+ * Preset factory functions for common LLM providers.
153
+ * All presets use the HttpLLMProvider with appropriate configuration.
154
+ */
155
+ /**
156
+ * Create an OpenAI LLM provider.
123
157
  *
124
158
  * @example
125
159
  * ```ts
126
- * // OpenAI
127
- * const provider = new OpenAIProvider({ apiKey: 'sk-...' });
128
- *
129
- * // Ollama
130
- * const provider = new OpenAIProvider({
131
- * baseUrl: 'http://localhost:11434/v1',
132
- * model: 'llama3.2',
133
- * apiKey: 'ollama' // Ollama doesn't require a real key
134
- * });
160
+ * const provider = createOpenAI({ apiKey: 'sk-...' });
161
+ * const result = await scrape(url, { llm: provider, enhance: ['summarize'] });
162
+ * ```
163
+ */
164
+ function createOpenAI(options) {
165
+ const apiKey = options?.apiKey ?? process.env.OPENAI_API_KEY;
166
+ if (!apiKey) throw new Error("OpenAI API key required. Set OPENAI_API_KEY env var or pass apiKey option.");
167
+ return new HttpLLMProvider({
168
+ baseUrl: options?.baseUrl ?? "https://api.openai.com/v1/chat/completions",
169
+ model: options?.model ?? "gpt-4o-mini",
170
+ headers: { Authorization: `Bearer ${apiKey}` },
171
+ jsonMode: true
172
+ });
173
+ }
174
+ /**
175
+ * Create an Anthropic Claude LLM provider.
135
176
  *
136
- * // LM Studio
137
- * const provider = new OpenAIProvider({
138
- * baseUrl: 'http://localhost:1234/v1',
139
- * model: 'local-model',
140
- * apiKey: 'lm-studio'
141
- * });
177
+ * @example
178
+ * ```ts
179
+ * const provider = createAnthropic({ apiKey: 'sk-...' });
180
+ * const result = await scrape(url, { llm: provider, enhance: ['summarize'] });
142
181
  * ```
143
182
  */
144
- var OpenAIProvider = class {
145
- name = "openai";
146
- client;
147
- model;
148
- constructor(config = {}) {
149
- const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY;
150
- const baseUrl = config.baseUrl ?? DEFAULT_BASE_URL;
151
- if (!apiKey && baseUrl === DEFAULT_BASE_URL) throw new require_enhancer.ScrapeError("OpenAI API key required. Set OPENAI_API_KEY env var or pass apiKey in config.", "LLM_ERROR");
152
- this.model = config.model ?? DEFAULT_MODEL;
153
- try {
154
- const { OpenAI } = require("openai");
155
- this.client = new OpenAI({
156
- apiKey: apiKey ?? "local",
157
- baseURL: baseUrl
158
- });
159
- } catch {
160
- throw new require_enhancer.ScrapeError("openai package is required for OpenAI provider. Install with: npm install openai", "LLM_ERROR");
161
- }
162
- }
163
- async complete(prompt, options = {}) {
164
- try {
165
- const client = this.client;
166
- const messages = [];
167
- if (options.systemPrompt) messages.push({
168
- role: "system",
169
- content: options.systemPrompt
170
- });
171
- messages.push({
172
- role: "user",
173
- content: prompt
174
- });
175
- const content = (await client.chat.completions.create({
176
- model: this.model,
177
- max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
178
- messages,
179
- temperature: options.temperature
180
- })).choices[0]?.message?.content;
181
- if (!content) throw new require_enhancer.ScrapeError("Empty response from OpenAI", "LLM_ERROR");
182
- return content;
183
- } catch (error) {
184
- if (error instanceof require_enhancer.ScrapeError) throw error;
185
- throw new require_enhancer.ScrapeError(`OpenAI API error: ${error instanceof Error ? error.message : String(error)}`, "LLM_ERROR", void 0, error instanceof Error ? error : void 0);
186
- }
187
- }
188
- async completeJSON(prompt, schema, options = {}) {
189
- const client = this.client;
190
- try {
191
- const messages = [{
192
- role: "system",
193
- content: options.systemPrompt ?? "You are a helpful assistant that extracts information from content."
194
- }, {
183
+ function createAnthropic(options) {
184
+ const apiKey = options?.apiKey ?? process.env.ANTHROPIC_API_KEY;
185
+ if (!apiKey) throw new Error("Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass apiKey option.");
186
+ const model = options?.model ?? "claude-3-5-haiku-20241022";
187
+ return new HttpLLMProvider({
188
+ baseUrl: "https://api.anthropic.com/v1/messages",
189
+ model,
190
+ headers: {
191
+ "x-api-key": apiKey,
192
+ "anthropic-version": "2023-06-01"
193
+ },
194
+ requestBuilder: (prompt, opts) => ({
195
+ model,
196
+ max_tokens: opts.maxTokens ?? 1024,
197
+ messages: [{
195
198
  role: "user",
196
199
  content: prompt
197
- }];
198
- const content = (await client.chat.completions.create({
199
- model: this.model,
200
- max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
201
- messages,
202
- temperature: options.temperature,
203
- response_format: { type: "json_object" }
204
- })).choices[0]?.message?.content;
205
- if (!content) throw new require_enhancer.ScrapeError("Empty response from OpenAI", "LLM_ERROR");
206
- const parsed = JSON.parse(content);
207
- return schema.parse(parsed);
208
- } catch (error) {
209
- if (error instanceof require_enhancer.ScrapeError) throw error;
210
- const jsonPrompt = `${prompt}
211
-
212
- Respond ONLY with valid JSON matching this schema:
213
- ${JSON.stringify(zodToJsonSchema(schema), null, 2)}
214
-
215
- Do not include any explanation or markdown formatting. Just the JSON object.`;
216
- const response = await this.complete(jsonPrompt, {
217
- ...options,
218
- systemPrompt: "You respond only with valid JSON."
219
- });
220
- try {
221
- const jsonMatch = response.match(/\{[\s\S]*\}/);
222
- if (!jsonMatch) throw new Error("No JSON object found in response");
223
- const parsed = JSON.parse(jsonMatch[0]);
224
- return schema.parse(parsed);
225
- } catch (parseError) {
226
- throw new require_enhancer.ScrapeError(`Failed to parse LLM response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`, "VALIDATION_ERROR", void 0, parseError instanceof Error ? parseError : void 0);
227
- }
228
- }
229
- }
230
- };
231
- /**
232
- * Convert a Zod schema to JSON Schema for structured outputs
233
- */
234
- function zodToJsonSchema(schema) {
235
- const def = schema._def;
236
- switch (def.typeName) {
237
- case "ZodObject": {
238
- const shape = schema.shape;
239
- const properties = {};
240
- const required = [];
241
- for (const [key, value] of Object.entries(shape)) {
242
- properties[key] = zodToJsonSchema(value);
243
- if (value._def.typeName !== "ZodOptional") required.push(key);
244
- }
245
- return {
246
- type: "object",
247
- properties,
248
- required
249
- };
250
- }
251
- case "ZodArray": return {
252
- type: "array",
253
- items: zodToJsonSchema(def.type)
254
- };
255
- case "ZodString": return { type: "string" };
256
- case "ZodNumber": return { type: "number" };
257
- case "ZodBoolean": return { type: "boolean" };
258
- case "ZodEnum": return {
259
- type: "string",
260
- enum: def.values
261
- };
262
- case "ZodOptional": return zodToJsonSchema(def.innerType);
263
- default: return { type: "string" };
264
- }
200
+ }],
201
+ ...opts.systemPrompt && { system: opts.systemPrompt },
202
+ ...opts.temperature !== void 0 && { temperature: opts.temperature }
203
+ }),
204
+ responseMapper: (res) => res.content.find((item) => item.type === "text")?.text ?? ""
205
+ });
265
206
  }
266
207
  /**
267
- * Create an OpenAI provider with default settings
208
+ * Create a Groq LLM provider.
209
+ * Groq provides fast inference for open-source models.
210
+ *
211
+ * @example
212
+ * ```ts
213
+ * const provider = createGroq({ model: 'llama-3.1-70b-versatile' });
214
+ * ```
268
215
  */
269
- function createOpenAI(config) {
270
- return new OpenAIProvider(config);
216
+ function createGroq(options) {
217
+ const apiKey = options?.apiKey ?? process.env.GROQ_API_KEY;
218
+ if (!apiKey) throw new Error("Groq API key required. Set GROQ_API_KEY env var or pass apiKey option.");
219
+ return new HttpLLMProvider({
220
+ baseUrl: "https://api.groq.com/openai/v1/chat/completions",
221
+ model: options?.model ?? "llama-3.1-70b-versatile",
222
+ headers: { Authorization: `Bearer ${apiKey}` },
223
+ jsonMode: true
224
+ });
271
225
  }
272
226
  /**
273
- * Create an Ollama provider
227
+ * Create an Ollama LLM provider for local models.
274
228
  *
275
229
  * @example
276
230
  * ```ts
277
231
  * const provider = createOllama({ model: 'llama3.2' });
278
232
  * ```
279
233
  */
280
- function createOllama(config = { model: "llama3.2" }) {
281
- return new OpenAIProvider({
282
- baseUrl: `http://localhost:${config.port ?? 11434}/v1`,
283
- model: config.model,
284
- apiKey: "ollama"
234
+ function createOllama(options) {
235
+ return new HttpLLMProvider({
236
+ baseUrl: options.baseUrl ?? "http://localhost:11434/v1/chat/completions",
237
+ model: options.model,
238
+ requireHttps: false,
239
+ allowPrivate: true
285
240
  });
286
241
  }
287
242
  /**
288
- * Create an LM Studio provider
243
+ * Create an LM Studio LLM provider for local models.
289
244
  *
290
245
  * @example
291
246
  * ```ts
292
247
  * const provider = createLMStudio({ model: 'local-model' });
293
248
  * ```
294
249
  */
295
- function createLMStudio(config = { model: "local-model" }) {
296
- return new OpenAIProvider({
297
- baseUrl: `http://localhost:${config.port ?? 1234}/v1`,
298
- model: config.model,
299
- apiKey: "lm-studio"
250
+ function createLMStudio(options) {
251
+ return new HttpLLMProvider({
252
+ baseUrl: options.baseUrl ?? "http://localhost:1234/v1/chat/completions",
253
+ model: options.model,
254
+ requireHttps: false,
255
+ allowPrivate: true
256
+ });
257
+ }
258
+ /**
259
+ * Create a Together AI LLM provider.
260
+ *
261
+ * @example
262
+ * ```ts
263
+ * const provider = createTogether({ model: 'meta-llama/Llama-3.2-3B-Instruct-Turbo' });
264
+ * ```
265
+ */
266
+ function createTogether(options) {
267
+ const apiKey = options?.apiKey ?? process.env.TOGETHER_API_KEY;
268
+ if (!apiKey) throw new Error("Together API key required. Set TOGETHER_API_KEY env var or pass apiKey option.");
269
+ return new HttpLLMProvider({
270
+ baseUrl: "https://api.together.xyz/v1/chat/completions",
271
+ model: options?.model ?? "meta-llama/Llama-3.2-3B-Instruct-Turbo",
272
+ headers: { Authorization: `Bearer ${apiKey}` },
273
+ jsonMode: true
274
+ });
275
+ }
276
+ /**
277
+ * Create an OpenRouter LLM provider.
278
+ * OpenRouter provides access to many models through a unified API.
279
+ *
280
+ * @example
281
+ * ```ts
282
+ * const provider = createOpenRouter({
283
+ * model: 'anthropic/claude-3.5-sonnet',
284
+ * });
285
+ * ```
286
+ */
287
+ function createOpenRouter(options) {
288
+ const apiKey = options.apiKey ?? process.env.OPENROUTER_API_KEY;
289
+ if (!apiKey) throw new Error("OpenRouter API key required. Set OPENROUTER_API_KEY env var or pass apiKey option.");
290
+ const headers = { Authorization: `Bearer ${apiKey}` };
291
+ if (options.siteUrl) headers["HTTP-Referer"] = options.siteUrl;
292
+ if (options.siteName) headers["X-Title"] = options.siteName;
293
+ return new HttpLLMProvider({
294
+ baseUrl: "https://openrouter.ai/api/v1/chat/completions",
295
+ model: options.model,
296
+ headers
300
297
  });
301
298
  }
299
+ /**
300
+ * Create a generic HTTP LLM provider.
301
+ * Use this for any OpenAI-compatible API.
302
+ *
303
+ * @example
304
+ * ```ts
305
+ * const provider = createHttpLLM({
306
+ * baseUrl: 'https://my-api.com/v1/chat/completions',
307
+ * model: 'my-model',
308
+ * headers: { Authorization: 'Bearer ...' },
309
+ * });
310
+ * ```
311
+ */
312
+ function createHttpLLM(config) {
313
+ return new HttpLLMProvider(config);
314
+ }
302
315
 
303
316
  //#endregion
304
- exports.AnthropicProvider = AnthropicProvider;
305
317
  exports.ClassifySchema = require_enhancer.ClassifySchema;
306
318
  exports.EntitiesSchema = require_enhancer.EntitiesSchema;
307
- exports.OpenAIProvider = OpenAIProvider;
319
+ exports.HttpLLMProvider = HttpLLMProvider;
308
320
  exports.SummarySchema = require_enhancer.SummarySchema;
309
321
  exports.TagsSchema = require_enhancer.TagsSchema;
310
322
  exports.ask = require_enhancer.ask;
323
+ exports.createAnthropic = createAnthropic;
324
+ exports.createGroq = createGroq;
325
+ exports.createHttpLLM = createHttpLLM;
311
326
  exports.createLMStudio = createLMStudio;
312
327
  exports.createOllama = createOllama;
313
328
  exports.createOpenAI = createOpenAI;
329
+ exports.createOpenRouter = createOpenRouter;
330
+ exports.createTogether = createTogether;
314
331
  exports.enhance = require_enhancer.enhance;
315
332
  exports.extract = require_enhancer.extract;
333
+ exports.zodToJsonSchema = zodToJsonSchema;
316
334
  //# sourceMappingURL=index.cjs.map