@ooneex/ai 1.1.14 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -29,11 +29,6 @@ class AiException extends Exception {
29
29
  this.name = "AiException";
30
30
  }
31
31
  }
32
- // src/AnthropicAi.ts
33
- import { AppEnv } from "@ooneex/app-env";
34
- import { inject } from "@ooneex/container";
35
- import { createAnthropicChat } from "@tanstack/ai-anthropic";
36
-
37
32
  // src/BaseAi.ts
38
33
  import { jsonSchemaToTypeString } from "@ooneex/validation";
39
34
  import { chat } from "@tanstack/ai";
@@ -8269,8 +8264,8 @@ ${config3.context}`);
8269
8264
  toMessages(messages) {
8270
8265
  return messages.map((msg) => ({ role: msg.role, content: msg.content }));
8271
8266
  }
8272
- async executeChat(content, systemPrompt, config3) {
8273
- const adapter = this.createChatAdapter(config3);
8267
+ async executeChat(content, systemPrompt, config3, task) {
8268
+ const adapter = this.createChatAdapter(config3, task);
8274
8269
  const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8275
8270
  const userMessage = { role: "user", content: `${systemPrompt}
8276
8271
 
@@ -8286,70 +8281,70 @@ ${content}` };
8286
8281
  }
8287
8282
  async makeShorter(content, config3) {
8288
8283
  const prompt = this.buildPrompt("Condense the following text while preserving its core meaning and key information. Remove redundancies and unnecessary details.", config3);
8289
- return this.executeChat(content, prompt, config3);
8284
+ return this.executeChat(content, prompt, config3, "makeShorter");
8290
8285
  }
8291
8286
  async makeLonger(content, config3) {
8292
8287
  const prompt = this.buildPrompt("Expand the following text by adding relevant details, examples, and explanations while maintaining coherence and the original message.", config3);
8293
- return this.executeChat(content, prompt, config3);
8288
+ return this.executeChat(content, prompt, config3, "makeLonger");
8294
8289
  }
8295
8290
  async summarize(content, config3) {
8296
8291
  const prompt = this.buildPrompt("Provide a clear and comprehensive summary of the following text, capturing all essential points and main ideas.", config3);
8297
- return this.executeChat(content, prompt, config3);
8292
+ return this.executeChat(content, prompt, config3, "summarize");
8298
8293
  }
8299
8294
  async concise(content, config3) {
8300
8295
  const prompt = this.buildPrompt("Rewrite the following text in the most concise form possible without losing essential meaning.", config3);
8301
- return this.executeChat(content, prompt, config3);
8296
+ return this.executeChat(content, prompt, config3, "concise");
8302
8297
  }
8303
8298
  async paragraph(content, config3) {
8304
8299
  const prompt = this.buildPrompt("Transform the following text into well-structured paragraph format with clear topic sentences and logical flow.", config3);
8305
- return this.executeChat(content, prompt, config3);
8300
+ return this.executeChat(content, prompt, config3, "paragraph");
8306
8301
  }
8307
8302
  async bulletPoints(content, config3) {
8308
8303
  const prompt = this.buildPrompt("Convert the following text into a clear, organized list of bullet points highlighting the key information.", config3);
8309
- return this.executeChat(content, prompt, config3);
8304
+ return this.executeChat(content, prompt, config3, "bulletPoints");
8310
8305
  }
8311
8306
  async rephrase(content, config3) {
8312
8307
  const prompt = this.buildPrompt("Rephrase the following text using different words and sentence structures while preserving the original meaning.", config3);
8313
- return this.executeChat(content, prompt, config3);
8308
+ return this.executeChat(content, prompt, config3, "rephrase");
8314
8309
  }
8315
8310
  async simplify(content, config3) {
8316
8311
  const prompt = this.buildPrompt("Simplify the following text by using plain language, shorter sentences, and avoiding jargon. Make it accessible to a general audience.", config3);
8317
- return this.executeChat(content, prompt, config3);
8312
+ return this.executeChat(content, prompt, config3, "simplify");
8318
8313
  }
8319
8314
  async changeTone(content, tone, config3) {
8320
8315
  const prompt = this.buildPrompt(`Rewrite the following text in a ${tone} tone while maintaining clarity.`, config3);
8321
- return this.executeChat(content, prompt, config3);
8316
+ return this.executeChat(content, prompt, config3, "changeTone");
8322
8317
  }
8323
8318
  async proofread(content, config3) {
8324
8319
  const prompt = this.buildPrompt("Proofread and correct the following text for grammar, spelling, punctuation, and clarity issues. Return the corrected version.", config3);
8325
- return this.executeChat(content, prompt, config3);
8320
+ return this.executeChat(content, prompt, config3, "proofread");
8326
8321
  }
8327
8322
  async translate(content, config3) {
8328
8323
  const targetLanguage = config3?.language ?? "en";
8329
8324
  const prompt = this.buildPrompt(`Translate the following text accurately into ${targetLanguage}, preserving the original meaning, tone, and nuance.`, config3);
8330
- return this.executeChat(content, prompt, config3);
8325
+ return this.executeChat(content, prompt, config3, "translate");
8331
8326
  }
8332
8327
  async explain(content, config3) {
8333
8328
  const prompt = this.buildPrompt("Provide a clear explanation of the following text, breaking down complex concepts and clarifying the meaning.", config3);
8334
- return this.executeChat(content, prompt, config3);
8329
+ return this.executeChat(content, prompt, config3, "explain");
8335
8330
  }
8336
8331
  async expandIdeas(content, config3) {
8337
8332
  const prompt = this.buildPrompt("Expand on the ideas presented in the following text by exploring related concepts, implications, and additional perspectives.", config3);
8338
- return this.executeChat(content, prompt, config3);
8333
+ return this.executeChat(content, prompt, config3, "expandIdeas");
8339
8334
  }
8340
8335
  async fixGrammar(content, config3) {
8341
8336
  const prompt = this.buildPrompt("Fix all grammatical errors in the following text, including subject-verb agreement, tense consistency, and sentence structure.", config3);
8342
- return this.executeChat(content, prompt, config3);
8337
+ return this.executeChat(content, prompt, config3, "fixGrammar");
8343
8338
  }
8344
8339
  async generateTitle(content, config3) {
8345
8340
  const prompt = this.buildPrompt("Generate a compelling, descriptive title for the following text that captures its main theme and engages readers.", config3);
8346
- return this.executeChat(content, prompt, config3);
8341
+ return this.executeChat(content, prompt, config3, "generateTitle");
8347
8342
  }
8348
8343
  async extractKeywords(content, config3) {
8349
8344
  const count = config3?.count;
8350
8345
  const countInstruction = count ? ` Return exactly ${count} keywords.` : "";
8351
8346
  const prompt = this.buildPrompt(`Extract the most important keywords and key phrases from the following text. Return only the keywords as a comma-separated list without numbering, brackets, or additional formatting.${countInstruction}`, config3);
8352
- const result = await this.executeChat(content, prompt, config3);
8347
+ const result = await this.executeChat(content, prompt, config3, "extractKeywords");
8353
8348
  const items = result.split(",").map((keyword) => keyword.trim()).filter((keyword) => keyword.length > 0);
8354
8349
  return count ? items.slice(0, count) : items;
8355
8350
  }
@@ -8357,7 +8352,7 @@ ${content}` };
8357
8352
  const count = config3?.count;
8358
8353
  const countInstruction = count ? ` Return exactly ${count} categories.` : "";
8359
8354
  const prompt = this.buildPrompt(`Identify the most relevant categories or topics that best describe the following text. Return only the categories as a comma-separated list without numbering, brackets, or additional formatting.${countInstruction}`, config3);
8360
- const result = await this.executeChat(content, prompt, config3);
8355
+ const result = await this.executeChat(content, prompt, config3, "extractCategories");
8361
8356
  const items = result.split(",").map((category) => category.trim()).filter((category) => category.length > 0);
8362
8357
  return count ? items.slice(0, count) : items;
8363
8358
  }
@@ -8365,7 +8360,7 @@ ${content}` };
8365
8360
  const count = config3?.count;
8366
8361
  const countInstruction = count ? ` Return at most ${count} topics.` : "";
8367
8362
  const prompt = this.buildPrompt(`Extract the main topics discussed in the following text. Return only the topics as a comma-separated list without numbering, brackets, or additional formatting.${countInstruction}`, config3);
8368
- const result = await this.executeChat(content, prompt, config3);
8363
+ const result = await this.executeChat(content, prompt, config3, "extractTopics");
8369
8364
  const items = result.split(",").map((topic) => topic.trim()).filter((topic) => topic.length > 0);
8370
8365
  return count ? items.slice(0, count) : items;
8371
8366
  }
@@ -8393,7 +8388,7 @@ ${content}` };
8393
8388
  jsonFormat
8394
8389
  ].join(`
8395
8390
  `), config3);
8396
- const result = await this.executeChat(subject, prompt, config3);
8391
+ const result = await this.executeChat(subject, prompt, config3, "generateCaseQuestion");
8397
8392
  const cleaned = result.replace(/```json\n?|\n?```/g, "").trim();
8398
8393
  const parsed2 = JSON.parse(cleaned);
8399
8394
  return parsed2;
@@ -8413,7 +8408,7 @@ ${content}` };
8413
8408
  `{"front":"...","back":"...","explanation":"..."}`
8414
8409
  ].join(`
8415
8410
  `), config3);
8416
- const result = await this.executeChat(subject, prompt, config3);
8411
+ const result = await this.executeChat(subject, prompt, config3, "generateFlashcard");
8417
8412
  const cleaned = result.replace(/```json\n?|\n?```/g, "").trim();
8418
8413
  const parsed2 = JSON.parse(cleaned);
8419
8414
  return parsed2;
@@ -8435,13 +8430,32 @@ ${content}` };
8435
8430
  `{"question":"...","choices":[{"text":"...","isCorrect":true/false,"explanation":"..."},...]}`
8436
8431
  ].join(`
8437
8432
  `), config3);
8438
- const result = await this.executeChat(subject, prompt, config3);
8433
+ const result = await this.executeChat(subject, prompt, config3, "generateQuestion");
8439
8434
  const cleaned = result.replace(/```json\n?|\n?```/g, "").trim();
8440
8435
  const parsed2 = JSON.parse(cleaned);
8441
8436
  return parsed2;
8442
8437
  }
8438
+ async describeImage(source, config3) {
8439
+ const adapter = this.createChatAdapter(config3, "describeImage");
8440
+ const systemPrompt = this.buildPrompt("Describe the provided image in detail. Include the main subject, visual elements, colors, composition, and any notable features. Provide a clear, objective description without personal opinions or interpretations.", config3);
8441
+ const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8442
+ const userMessage = {
8443
+ role: "user",
8444
+ content: [
8445
+ { type: "text", content: systemPrompt },
8446
+ { type: "image", source }
8447
+ ]
8448
+ };
8449
+ const messages = [...baseMessages, userMessage];
8450
+ const result = await chat({
8451
+ adapter,
8452
+ messages,
8453
+ stream: false
8454
+ });
8455
+ return result.trim();
8456
+ }
8443
8457
  async imageToMarkdown(source, config3) {
8444
- const adapter = this.createChatAdapter(config3);
8458
+ const adapter = this.createChatAdapter(config3, "imageToMarkdown");
8445
8459
  const systemPrompt = this.buildPrompt("Convert the content of the provided image into well-structured Markdown. Preserve the document structure including headings, lists, tables, code blocks, and formatting. Transcribe all visible text accurately.", config3);
8446
8460
  const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8447
8461
  const userMessage = {
@@ -8459,8 +8473,65 @@ ${content}` };
8459
8473
  });
8460
8474
  return result.trim();
8461
8475
  }
8476
+ async imageToText(source, config3) {
8477
+ const adapter = this.createChatAdapter(config3, "imageToText");
8478
+ const systemPrompt = this.buildPrompt("Extract and return all visible text from the provided image. Preserve the reading order and paragraph structure. Do not add any interpretation, commentary, or formatting beyond what is visible in the image.", config3);
8479
+ const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8480
+ const userMessage = {
8481
+ role: "user",
8482
+ content: [
8483
+ { type: "text", content: systemPrompt },
8484
+ { type: "image", source }
8485
+ ]
8486
+ };
8487
+ const messages = [...baseMessages, userMessage];
8488
+ const result = await chat({
8489
+ adapter,
8490
+ messages,
8491
+ stream: false
8492
+ });
8493
+ return result.trim();
8494
+ }
8495
+ async speechToText(source, config3) {
8496
+ const adapter = this.createChatAdapter(config3, "speechToText");
8497
+ const systemPrompt = this.buildPrompt("Transcribe the provided audio into text. Preserve the original speech content accurately, including punctuation and paragraph breaks where appropriate. Do not add any interpretation, commentary, or summary.", config3);
8498
+ const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8499
+ const userMessage = {
8500
+ role: "user",
8501
+ content: [
8502
+ { type: "text", content: systemPrompt },
8503
+ { type: "audio", source }
8504
+ ]
8505
+ };
8506
+ const messages = [...baseMessages, userMessage];
8507
+ const result = await chat({
8508
+ adapter,
8509
+ messages,
8510
+ stream: false
8511
+ });
8512
+ return result.trim();
8513
+ }
8514
+ async videoToText(source, config3) {
8515
+ const adapter = this.createChatAdapter(config3, "videoToText");
8516
+ const systemPrompt = this.buildPrompt("Describe the content of the provided video in detail. Include the main events, actions, visual elements, and any spoken dialogue or text visible in the video. Provide a clear, chronological description.", config3);
8517
+ const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
8518
+ const userMessage = {
8519
+ role: "user",
8520
+ content: [
8521
+ { type: "text", content: systemPrompt },
8522
+ { type: "video", source }
8523
+ ]
8524
+ };
8525
+ const messages = [...baseMessages, userMessage];
8526
+ const result = await chat({
8527
+ adapter,
8528
+ messages,
8529
+ stream: false
8530
+ });
8531
+ return result.trim();
8532
+ }
8462
8533
  async run(prompt, config3) {
8463
- const adapter = this.createRunAdapter(config3);
8534
+ const adapter = this.createRunAdapter(config3, "run");
8464
8535
  let defaultPrompt = "Process the following request and respond appropriately. If the request asks for structured data, return valid JSON.";
8465
8536
  if (config3?.output) {
8466
8537
  const schema2 = config3.output.toJsonSchema();
@@ -8498,7 +8569,7 @@ ${prompt}` };
8498
8569
  return parsed2;
8499
8570
  }
8500
8571
  async* runStream(prompt, config3) {
8501
- const adapter = this.createRunAdapter(config3);
8572
+ const adapter = this.createRunAdapter(config3, "runStream");
8502
8573
  const defaultPrompt = "Process the following request and respond appropriately.";
8503
8574
  const systemPrompt = this.buildPrompt(defaultPrompt, config3);
8504
8575
  const baseMessages = config3?.messages ? this.toMessages(config3.messages) : [];
@@ -8519,7 +8590,6 @@ ${prompt}` };
8519
8590
  }
8520
8591
  }
8521
8592
  }
8522
-
8523
8593
  // src/decorators.ts
8524
8594
  import { container, EContainerScope } from "@ooneex/container";
8525
8595
  var decorator = {
@@ -8529,360 +8599,226 @@ var decorator = {
8529
8599
  };
8530
8600
  }
8531
8601
  };
8532
-
8533
- // src/AnthropicAi.ts
8534
- class AnthropicAi extends BaseAi {
8602
+ // src/OpenRouterAi.ts
8603
+ import { AppEnv } from "@ooneex/app-env";
8604
+ import { inject } from "@ooneex/container";
8605
+ import { createOpenRouterText } from "@tanstack/ai-openrouter";
8606
+ class OpenRouterAi extends BaseAi {
8535
8607
  env;
8608
+ static DEFAULT_MODELS = {
8609
+ makeShorter: "qwen/qwen3.6-plus",
8610
+ makeLonger: "qwen/qwen3.6-plus",
8611
+ concise: "qwen/qwen3.6-plus",
8612
+ paragraph: "qwen/qwen3.6-plus",
8613
+ bulletPoints: "qwen/qwen3.6-plus",
8614
+ rephrase: "qwen/qwen3.6-plus",
8615
+ simplify: "qwen/qwen3.6-plus",
8616
+ changeTone: "qwen/qwen3.6-plus",
8617
+ proofread: "anthropic/claude-sonnet-4.6",
8618
+ fixGrammar: "anthropic/claude-sonnet-4.6",
8619
+ translate: "qwen/qwen3.6-plus",
8620
+ summarize: "deepseek/deepseek-v3.2",
8621
+ explain: "deepseek/deepseek-v3.2",
8622
+ expandIdeas: "deepseek/deepseek-v3.2",
8623
+ generateTitle: "qwen/qwen3.6-plus",
8624
+ extractKeywords: "qwen/qwen3.6-plus",
8625
+ extractCategories: "qwen/qwen3.6-plus",
8626
+ extractTopics: "qwen/qwen3.6-plus",
8627
+ generateCaseQuestion: "anthropic/claude-sonnet-4.6",
8628
+ generateFlashcard: "anthropic/claude-sonnet-4.6",
8629
+ generateQuestion: "anthropic/claude-sonnet-4.6",
8630
+ describeImage: "google/gemini-2.5-flash",
8631
+ imageToMarkdown: "google/gemini-2.5-flash",
8632
+ imageToText: "google/gemini-2.5-flash",
8633
+ textToImage: "openai/dall-e-3",
8634
+ imageToImage: "openai/dall-e-2",
8635
+ speechToText: "google/gemini-2.5-flash",
8636
+ textToSpeech: "openai/tts-1",
8637
+ videoToText: "google/gemini-2.5-flash",
8638
+ textToVideo: "google/veo-3",
8639
+ run: "deepseek/deepseek-v3.2",
8640
+ runStream: "deepseek/deepseek-v3.2"
8641
+ };
8536
8642
  constructor(env2) {
8537
8643
  super();
8538
8644
  this.env = env2;
8539
8645
  }
8540
8646
  getApiKey(config3) {
8541
- const apiKey = config3?.apiKey || this.env.ANTHROPIC_API_KEY;
8647
+ const apiKey = config3?.apiKey || this.env.OPENROUTER_API_KEY;
8542
8648
  if (!apiKey) {
8543
- throw new AiException("Anthropic API key is required. Provide an API key through config options or set the ANTHROPIC_API_KEY environment variable.", "API_KEY_REQUIRED");
8649
+ throw new AiException("OpenRouter API key is required. Provide an API key through config options or set the OPENROUTER_API_KEY environment variable.", "API_KEY_REQUIRED");
8544
8650
  }
8545
8651
  return apiKey;
8546
8652
  }
8547
- createChatAdapter(config3) {
8548
- const apiKey = this.getApiKey(config3);
8549
- const model = config3?.model ?? "claude-sonnet-4-5";
8550
- return createAnthropicChat(model, apiKey);
8551
- }
8552
- createRunAdapter(config3) {
8553
- return this.createChatAdapter(config3);
8554
- }
8555
- }
8556
- AnthropicAi = __legacyDecorateClassTS([
8557
- decorator.ai(),
8558
- __legacyDecorateParamTS(0, inject(AppEnv)),
8559
- __legacyMetadataTS("design:paramtypes", [
8560
- typeof AppEnv === "undefined" ? Object : AppEnv
8561
- ])
8562
- ], AnthropicAi);
8563
- // src/GeminiAi.ts
8564
- import { AppEnv as AppEnv2 } from "@ooneex/app-env";
8565
- import { inject as inject2 } from "@ooneex/container";
8566
- import { generateImage, generateSpeech } from "@tanstack/ai";
8567
- import { createGeminiChat, createGeminiImage, createGeminiSpeech } from "@tanstack/ai-gemini";
8568
- class GeminiAi extends BaseAi {
8569
- env;
8570
- constructor(env2) {
8571
- super();
8572
- this.env = env2;
8573
- }
8574
- getApiKey(config3) {
8575
- const apiKey = config3?.apiKey || this.env.GEMINI_API_KEY;
8576
- if (!apiKey) {
8577
- throw new AiException("Gemini API key is required. Provide an API key through config options or set the GEMINI_API_KEY environment variable.", "API_KEY_REQUIRED");
8653
+ async fetchWithRetry(url2, options, retries = 3, delayMs = 1000) {
8654
+ for (let attempt = 0;attempt <= retries; attempt++) {
8655
+ const response = await fetch(url2, options);
8656
+ if (response.ok || attempt === retries)
8657
+ return response;
8658
+ if ([429, 500, 502, 503, 504].includes(response.status)) {
8659
+ await new Promise((resolve) => setTimeout(resolve, delayMs * 2 ** attempt));
8660
+ continue;
8661
+ }
8662
+ return response;
8578
8663
  }
8579
- return apiKey;
8580
- }
8581
- createChatAdapter(config3) {
8582
- const apiKey = this.getApiKey(config3);
8583
- const model = config3?.model ?? "gemini-2.0-flash";
8584
- return createGeminiChat(model, apiKey);
8664
+ throw new AiException("Max retries exceeded", "MAX_RETRIES_EXCEEDED");
8585
8665
  }
8586
- createRunAdapter(config3) {
8587
- const apiKey = this.getApiKey(config3);
8588
- const model = config3?.model ?? "gemini-2.5-pro";
8589
- return createGeminiChat(model, apiKey);
8590
- }
8591
- async textToSpeech(text, options) {
8592
- const apiKey = this.getApiKey(options);
8593
- const model = options?.model ?? "gemini-2.5-flash-preview-tts";
8594
- const adapter = createGeminiSpeech(model, apiKey);
8595
- const modelOptions = {};
8596
- if (options?.voice) {
8597
- modelOptions.voiceConfig = {
8598
- prebuiltVoiceConfig: {
8599
- voiceName: options.voice
8600
- }
8601
- };
8602
- }
8603
- if (options?.instructions) {
8604
- modelOptions.systemInstruction = options.instructions;
8605
- }
8606
- if (options?.language) {
8607
- modelOptions.languageCode = options.language;
8608
- }
8609
- const speechOptions = { adapter, text };
8610
- if (options?.format) {
8611
- speechOptions.format = options.format;
8612
- }
8613
- if (options?.speed) {
8614
- speechOptions.speed = options.speed;
8666
+ getDefaultModel(task) {
8667
+ if (task && task in OpenRouterAi.DEFAULT_MODELS) {
8668
+ return OpenRouterAi.DEFAULT_MODELS[task];
8615
8669
  }
8616
- if (Object.keys(modelOptions).length > 0) {
8617
- speechOptions.modelOptions = modelOptions;
8618
- }
8619
- return generateSpeech(speechOptions);
8670
+ return "google/gemini-2.5-flash";
8620
8671
  }
8621
- async generateImage(prompt, options) {
8622
- const apiKey = this.getApiKey(options);
8623
- const model = options?.model ?? "imagen-4.0-generate-001";
8624
- const adapter = createGeminiImage(model, apiKey);
8625
- const imageOptions = { adapter, prompt };
8626
- if (options?.numberOfImages) {
8627
- imageOptions.numberOfImages = options.numberOfImages;
8628
- }
8629
- if (options?.size) {
8630
- imageOptions.size = options.size;
8631
- }
8632
- const modelOptions = {};
8633
- if (options?.aspectRatio) {
8634
- modelOptions.aspectRatio = options.aspectRatio;
8635
- }
8636
- if (options?.personGeneration) {
8637
- modelOptions.personGeneration = options.personGeneration;
8638
- }
8639
- if (options?.negativePrompt) {
8640
- modelOptions.negativePrompt = options.negativePrompt;
8641
- }
8642
- if (options?.addWatermark !== undefined) {
8643
- modelOptions.addWatermark = options.addWatermark;
8644
- }
8645
- if (options?.outputMimeType) {
8646
- modelOptions.outputMimeType = options.outputMimeType;
8647
- }
8648
- if (Object.keys(modelOptions).length > 0) {
8649
- imageOptions.modelOptions = modelOptions;
8672
+ async textToImage(prompt, config3) {
8673
+ const apiKey = this.getApiKey(config3);
8674
+ const model = config3?.model ?? OpenRouterAi.DEFAULT_MODELS.textToImage ?? "openai/dall-e-3";
8675
+ const response = await this.fetchWithRetry("https://openrouter.ai/api/v1/images/generations", {
8676
+ method: "POST",
8677
+ headers: {
8678
+ Authorization: `Bearer ${apiKey}`,
8679
+ "Content-Type": "application/json"
8680
+ },
8681
+ body: JSON.stringify({
8682
+ model,
8683
+ prompt,
8684
+ size: config3?.size ?? "1024x1024",
8685
+ quality: config3?.quality ?? "standard"
8686
+ })
8687
+ });
8688
+ if (!response.ok) {
8689
+ const error = await response.text();
8690
+ throw new AiException(`OpenRouter image generation failed: ${error}`, "IMAGE_GENERATION_FAILED");
8650
8691
  }
8651
- return generateImage(imageOptions);
8652
- }
8653
- }
8654
- GeminiAi = __legacyDecorateClassTS([
8655
- decorator.ai(),
8656
- __legacyDecorateParamTS(0, inject2(AppEnv2)),
8657
- __legacyMetadataTS("design:paramtypes", [
8658
- typeof AppEnv2 === "undefined" ? Object : AppEnv2
8659
- ])
8660
- ], GeminiAi);
8661
- // src/GroqAi.ts
8662
- import { AppEnv as AppEnv3 } from "@ooneex/app-env";
8663
- import { inject as inject3 } from "@ooneex/container";
8664
- import { random } from "@ooneex/utils";
8665
- import { createGroqText } from "@tanstack/ai-groq";
8666
- class GroqAi extends BaseAi {
8667
- env;
8668
- constructor(env2) {
8669
- super();
8670
- this.env = env2;
8692
+ const result = await response.json();
8693
+ const image = result.data[0];
8694
+ return {
8695
+ url: image.url,
8696
+ revisedPrompt: image.revised_prompt
8697
+ };
8671
8698
  }
8672
- getApiKey(config3) {
8673
- const apiKey = config3?.apiKey || this.env.GROQ_API_KEY;
8674
- if (!apiKey) {
8675
- throw new AiException("Groq API key is required. Provide an API key through config options or set the GROQ_API_KEY environment variable.", "API_KEY_REQUIRED");
8699
+ async imageToImage(source, prompt, config3) {
8700
+ const apiKey = this.getApiKey(config3);
8701
+ const model = config3?.model ?? OpenRouterAi.DEFAULT_MODELS.imageToImage ?? "openai/dall-e-2";
8702
+ const response = await this.fetchWithRetry("https://openrouter.ai/api/v1/images/edits", {
8703
+ method: "POST",
8704
+ headers: {
8705
+ Authorization: `Bearer ${apiKey}`,
8706
+ "Content-Type": "application/json"
8707
+ },
8708
+ body: JSON.stringify({
8709
+ model,
8710
+ prompt,
8711
+ image: source.value,
8712
+ size: config3?.size ?? "1024x1024"
8713
+ })
8714
+ });
8715
+ if (!response.ok) {
8716
+ const error = await response.text();
8717
+ throw new AiException(`OpenRouter image editing failed: ${error}`, "IMAGE_EDITING_FAILED");
8676
8718
  }
8677
- return apiKey;
8719
+ const result = await response.json();
8720
+ const image = result.data[0];
8721
+ return {
8722
+ url: image.url,
8723
+ revisedPrompt: image.revised_prompt
8724
+ };
8678
8725
  }
8679
- createChatAdapter(config3) {
8726
+ async textToSpeech(text, config3) {
8680
8727
  const apiKey = this.getApiKey(config3);
8681
- const model = config3?.model ?? "llama-3.3-70b-versatile";
8682
- return createGroqText(model, apiKey);
8683
- }
8684
- createRunAdapter(config3) {
8685
- return this.createChatAdapter(config3);
8686
- }
8687
- async textToSpeech(text, options) {
8688
- const apiKey = this.getApiKey(options);
8689
- const model = options?.model ?? "canopylabs/orpheus-v1-english";
8690
- const voice = options?.voice ?? "autumn";
8691
- const format = options?.format ?? "wav";
8692
- const body = {
8693
- model,
8694
- input: text,
8695
- voice,
8696
- response_format: format
8697
- };
8698
- if (options?.sampleRate) {
8699
- body.sample_rate = options.sampleRate;
8700
- }
8701
- const response = await fetch("https://api.groq.com/openai/v1/audio/speech", {
8728
+ const model = config3?.model ?? OpenRouterAi.DEFAULT_MODELS.textToSpeech ?? "openai/tts-1";
8729
+ const format = config3?.format ?? "mp3";
8730
+ const response = await this.fetchWithRetry("https://openrouter.ai/api/v1/audio/speech", {
8702
8731
  method: "POST",
8703
8732
  headers: {
8704
8733
  Authorization: `Bearer ${apiKey}`,
8705
8734
  "Content-Type": "application/json"
8706
8735
  },
8707
- body: JSON.stringify(body)
8736
+ body: JSON.stringify({
8737
+ model,
8738
+ input: text,
8739
+ voice: config3?.voice ?? "alloy",
8740
+ response_format: format,
8741
+ speed: config3?.speed ?? 1
8742
+ })
8708
8743
  });
8709
8744
  if (!response.ok) {
8710
8745
  const error = await response.text();
8711
- throw new AiException(`Groq TTS request failed (${response.status}): ${error}`, "TTS_FAILED");
8746
+ throw new AiException(`OpenRouter speech generation failed: ${error}`, "SPEECH_GENERATION_FAILED");
8712
8747
  }
8713
- const arrayBuffer = await response.arrayBuffer();
8714
- const audio = Buffer.from(arrayBuffer).toString("base64");
8748
+ const buffer = await response.arrayBuffer();
8749
+ const audio = Buffer.from(buffer).toString("base64");
8715
8750
  return {
8716
- id: response.headers.get("x-request-id") ?? random.nanoid(),
8717
- model,
8718
8751
  audio,
8719
- format
8752
+ format,
8753
+ contentType: `audio/${format}`
8720
8754
  };
8721
8755
  }
8722
- }
8723
- GroqAi = __legacyDecorateClassTS([
8724
- decorator.ai(),
8725
- __legacyDecorateParamTS(0, inject3(AppEnv3)),
8726
- __legacyMetadataTS("design:paramtypes", [
8727
- typeof AppEnv3 === "undefined" ? Object : AppEnv3
8728
- ])
8729
- ], GroqAi);
8730
- // src/OllamaAi.ts
8731
- import { AppEnv as AppEnv4 } from "@ooneex/app-env";
8732
- import { inject as inject4 } from "@ooneex/container";
8733
- import { createOllamaChat } from "@tanstack/ai-ollama";
8734
- class OllamaAi extends BaseAi {
8735
- env;
8736
- constructor(env2) {
8737
- super();
8738
- this.env = env2;
8739
- }
8740
- getHost(config3) {
8741
- return config3?.host || this.env.OLLAMA_HOST || "http://localhost:11434";
8742
- }
8743
- createChatAdapter(config3) {
8744
- const host = this.getHost(config3);
8745
- const model = config3?.model ?? "llama3";
8746
- return createOllamaChat(model, host);
8747
- }
8748
- createRunAdapter(config3) {
8749
- return this.createChatAdapter(config3);
8750
- }
8751
- }
8752
- OllamaAi = __legacyDecorateClassTS([
8753
- decorator.ai(),
8754
- __legacyDecorateParamTS(0, inject4(AppEnv4)),
8755
- __legacyMetadataTS("design:paramtypes", [
8756
- typeof AppEnv4 === "undefined" ? Object : AppEnv4
8757
- ])
8758
- ], OllamaAi);
8759
- // src/OpenAi.ts
8760
- import { AppEnv as AppEnv5 } from "@ooneex/app-env";
8761
- import { inject as inject5 } from "@ooneex/container";
8762
- import { generateImage as generateImage2, generateSpeech as generateSpeech2, generateTranscription } from "@tanstack/ai";
8763
- import {
8764
- createOpenaiChat,
8765
- createOpenaiImage,
8766
- createOpenaiSpeech,
8767
- createOpenaiTranscription
8768
- } from "@tanstack/ai-openai";
8769
- class OpenAi extends BaseAi {
8770
- env;
8771
- constructor(env2) {
8772
- super();
8773
- this.env = env2;
8774
- }
8775
- getApiKey(config3) {
8776
- const apiKey = config3?.apiKey || this.env.OPENAI_API_KEY;
8777
- if (!apiKey) {
8778
- throw new AiException("OpenAI API key is required. Provide an API key through config options or set the OPENAI_API_KEY environment variable.", "API_KEY_REQUIRED");
8779
- }
8780
- return apiKey;
8781
- }
8782
- createChatAdapter(config3) {
8756
+ async textToVideo(prompt, config3) {
8783
8757
  const apiKey = this.getApiKey(config3);
8784
- const model = config3?.model ?? "gpt-4o-mini";
8785
- return createOpenaiChat(model, apiKey);
8758
+ const model = config3?.model ?? OpenRouterAi.DEFAULT_MODELS.textToVideo ?? "google/veo-3";
8759
+ const response = await this.fetchWithRetry("https://openrouter.ai/api/v1/video/generations", {
8760
+ method: "POST",
8761
+ headers: {
8762
+ Authorization: `Bearer ${apiKey}`,
8763
+ "Content-Type": "application/json"
8764
+ },
8765
+ body: JSON.stringify({ model, prompt })
8766
+ });
8767
+ if (!response.ok) {
8768
+ const error = await response.text();
8769
+ throw new AiException(`OpenRouter video generation failed: ${error}`, "VIDEO_GENERATION_FAILED");
8770
+ }
8771
+ const result = await response.json();
8772
+ return {
8773
+ jobId: result.id,
8774
+ url: result.url,
8775
+ status: result.status,
8776
+ error: result.error
8777
+ };
8786
8778
  }
8787
- createRunAdapter(config3) {
8779
+ async getVideoStatus(jobId, config3) {
8788
8780
  const apiKey = this.getApiKey(config3);
8789
- const model = config3?.model ?? "gpt-4o";
8790
- return createOpenaiChat(model, apiKey);
8791
- }
8792
- async textToSpeech(text, options) {
8793
- const apiKey = this.getApiKey(options);
8794
- const model = options?.model ?? "tts-1";
8795
- const adapter = createOpenaiSpeech(model, apiKey);
8796
- const speechOptions = { adapter, text };
8797
- if (options?.voice) {
8798
- speechOptions.voice = options.voice;
8799
- }
8800
- if (options?.format) {
8801
- speechOptions.format = options.format;
8802
- }
8803
- if (options?.speed) {
8804
- speechOptions.speed = options.speed;
8805
- }
8806
- const instructionParts = [];
8807
- if (options?.language) {
8808
- instructionParts.push(`Speak in ${options.language}.`);
8809
- }
8810
- if (options?.instructions) {
8811
- instructionParts.push(options.instructions);
8812
- }
8813
- if (instructionParts.length > 0) {
8814
- speechOptions.modelOptions = { instructions: instructionParts.join(" ") };
8781
+ const response = await this.fetchWithRetry(`https://openrouter.ai/api/v1/video/generations/${jobId}`, {
8782
+ method: "GET",
8783
+ headers: {
8784
+ Authorization: `Bearer ${apiKey}`
8785
+ }
8786
+ });
8787
+ if (!response.ok) {
8788
+ const error = await response.text();
8789
+ throw new AiException(`OpenRouter video status check failed: ${error}`, "VIDEO_STATUS_FAILED");
8815
8790
  }
8816
- return generateSpeech2(speechOptions);
8791
+ const result = await response.json();
8792
+ return {
8793
+ jobId: result.id,
8794
+ url: result.url,
8795
+ status: result.status,
8796
+ error: result.error
8797
+ };
8817
8798
  }
8818
- async speechToText(audio, options) {
8819
- const apiKey = this.getApiKey(options);
8820
- const model = options?.model ?? "gpt-4o-transcribe";
8821
- const adapter = createOpenaiTranscription(model, apiKey);
8822
- const transcriptionOptions = { adapter, audio };
8823
- if (options?.language) {
8824
- transcriptionOptions.language = options.language;
8825
- }
8826
- if (options?.prompt) {
8827
- transcriptionOptions.prompt = options.prompt;
8828
- }
8829
- if (options?.responseFormat) {
8830
- transcriptionOptions.responseFormat = options.responseFormat;
8831
- }
8832
- if (options?.modelOptions) {
8833
- transcriptionOptions.modelOptions = options.modelOptions;
8834
- }
8835
- return generateTranscription(transcriptionOptions);
8799
+ createChatAdapter(config3, task) {
8800
+ const apiKey = this.getApiKey(config3);
8801
+ const model = config3?.model ?? this.getDefaultModel(task);
8802
+ return createOpenRouterText(model, apiKey);
8836
8803
  }
8837
- async generateImage(prompt, options) {
8838
- const apiKey = this.getApiKey(options);
8839
- const model = options?.model ?? "dall-e-3";
8840
- const adapter = createOpenaiImage(model, apiKey);
8841
- const imageOptions = { adapter, prompt };
8842
- if (options?.numberOfImages) {
8843
- imageOptions.numberOfImages = options.numberOfImages;
8844
- }
8845
- if (options?.size) {
8846
- imageOptions.size = options.size;
8847
- }
8848
- const modelOptions = {};
8849
- if (options?.quality) {
8850
- modelOptions.quality = options.quality;
8851
- }
8852
- if (options?.background) {
8853
- modelOptions.background = options.background;
8854
- }
8855
- if (options?.outputFormat) {
8856
- modelOptions.output_format = options.outputFormat;
8857
- }
8858
- if (options?.moderation) {
8859
- modelOptions.moderation = options.moderation;
8860
- }
8861
- if (options?.style) {
8862
- modelOptions.style = options.style;
8863
- }
8864
- if (Object.keys(modelOptions).length > 0) {
8865
- imageOptions.modelOptions = modelOptions;
8866
- }
8867
- return generateImage2(imageOptions);
8804
+ createRunAdapter(config3, task) {
8805
+ const apiKey = this.getApiKey(config3);
8806
+ const model = config3?.model ?? this.getDefaultModel(task);
8807
+ return createOpenRouterText(model, apiKey);
8868
8808
  }
8869
8809
  }
8870
- OpenAi = __legacyDecorateClassTS([
8810
+ OpenRouterAi = __legacyDecorateClassTS([
8871
8811
  decorator.ai(),
8872
- __legacyDecorateParamTS(0, inject5(AppEnv5)),
8812
+ __legacyDecorateParamTS(0, inject(AppEnv)),
8873
8813
  __legacyMetadataTS("design:paramtypes", [
8874
- typeof AppEnv5 === "undefined" ? Object : AppEnv5
8814
+ typeof AppEnv === "undefined" ? Object : AppEnv
8875
8815
  ])
8876
- ], OpenAi);
8816
+ ], OpenRouterAi);
8877
8817
  export {
8878
8818
  decorator,
8879
- OpenAi,
8880
- OllamaAi,
8881
- GroqAi,
8882
- GeminiAi,
8819
+ OpenRouterAi,
8883
8820
  BaseAi,
8884
- AnthropicAi,
8885
8821
  AiException
8886
8822
  };
8887
8823
 
8888
- //# debugId=D319B9DE62941E0C64756E2164756E21
8824
+ //# debugId=16C4BADB200DA36B64756E2164756E21