tekimax-ts 0.1.8 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,8 @@
1
1
  <div align="center">
2
2
  <img src="https://raw.githubusercontent.com/TEKIMAX/tekimax-ts/main/apps/docs/public/tekimax-logo.png" alt="Tekimax SDK Logo" width="200" />
3
3
  <h1>Tekimax TS</h1>
4
- <p><strong>A type-safe, framework-agnostic AI SDK for building AI-powered apps.</strong></p>
4
+ <p><strong>Universal AI Adapter Layer.</strong></p>
5
+ <p>A type-safe, framework-agnostic AI SDK for building AI-powered apps.</p>
5
6
 
6
7
  [![npm version](https://img.shields.io/npm/v/tekimax-ts.svg)](https://www.npmjs.com/package/tekimax-ts)
7
8
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
@@ -12,7 +13,15 @@
12
13
 
13
14
  ```bash
14
15
  npm install tekimax-ts
15
- ```
16
+
17
+ ## 🌟 Features
18
+
19
+ - **Universal API**: One interface for all providers. Switch from OpenAI to Ollama with a single config change.
20
+ - **Type Safety**: End-to-end TypeScript support. Zod schemas for runtime validation.
21
+ - **Zero Latency**: Lightweight adapter pattern with zero runtime overhead.
22
+ - **Zero CVEs**: Hardened supply chain using Chainguard images.
23
+
24
+ ## 💻 Usage
16
25
 
17
26
  ## 💻 Usage
18
27
 
@@ -21,11 +30,28 @@ npm install tekimax-ts
21
30
  The `Tekimax` client is the unified entry point. It wraps any provider (OpenAI, Anthropic, Ollama, etc.) and exposes a consistent multi-modal interface.
22
31
 
23
32
  ```typescript
24
- import { Tekimax, OpenAIProvider } from 'tekimax-ts'
25
-
33
+ import {
34
+ Tekimax,
35
+ OpenAIProvider,
36
+ AnthropicProvider,
37
+ OllamaProvider,
38
+ GeminiProvider
39
+ } from 'tekimax-ts'
40
+
41
+ // OpenAI
26
42
  const client = new Tekimax({
27
43
  provider: new OpenAIProvider({ apiKey: process.env.OPENAI_API_KEY })
28
44
  })
45
+
46
+ // Anthropic
47
+ const claude = new Tekimax({
48
+ provider: new AnthropicProvider({ apiKey: process.env.ANTHROPIC_API_KEY })
49
+ })
50
+
51
+ // Ollama (Local)
52
+ const local = new Tekimax({
53
+ provider: new OllamaProvider({ baseUrl: 'http://localhost:11434' })
54
+ })
29
55
  ```
30
56
 
31
57
  ### 2. Multi-Modal Interfaces
package/dist/index.cjs CHANGED
@@ -455,6 +455,52 @@ var GeminiProvider = class {
455
455
  return { text: "" };
456
456
  });
457
457
  }
458
+ async analyzeVideo(options) {
459
+ let videoPart;
460
+ const videoInput = options.video;
461
+ if (typeof videoInput === "string") {
462
+ if (videoInput.startsWith("http")) {
463
+ const resp = await fetch(videoInput);
464
+ const arrayBuffer = await resp.arrayBuffer();
465
+ const base64 = Buffer.from(arrayBuffer).toString("base64");
466
+ videoPart = {
467
+ inlineData: {
468
+ data: base64,
469
+ mimeType: "video/mp4"
470
+ }
471
+ };
472
+ } else {
473
+ videoPart = {
474
+ inlineData: {
475
+ data: videoInput,
476
+ mimeType: "video/mp4"
477
+ }
478
+ };
479
+ }
480
+ } else if (typeof Buffer !== "undefined" && videoInput instanceof Buffer) {
481
+ videoPart = {
482
+ inlineData: {
483
+ data: videoInput.toString("base64"),
484
+ mimeType: "video/mp4"
485
+ }
486
+ };
487
+ } else {
488
+ throw new Error("Unsupported video format");
489
+ }
490
+ const model = this.client.getGenerativeModel({ model: options.model });
491
+ const result = await model.generateContent([
492
+ options.prompt || "Analyze this video",
493
+ videoPart
494
+ ]);
495
+ return {
496
+ content: result.response.text(),
497
+ usage: {
498
+ inputTokens: result.response.usageMetadata?.promptTokenCount,
499
+ outputTokens: result.response.usageMetadata?.candidatesTokenCount,
500
+ totalTokens: result.response.usageMetadata?.totalTokenCount
501
+ }
502
+ };
503
+ }
458
504
  async chat(options) {
459
505
  const model = this.client.getGenerativeModel({
460
506
  model: options.model || "gemini-pro",
@@ -740,6 +786,70 @@ var OpenAIProvider = class {
740
786
  dangerouslyAllowBrowser: options.dangerouslyAllowBrowser
741
787
  });
742
788
  }
789
+ async generateSpeech(options) {
790
+ const response = await this.client.audio.speech.create({
791
+ model: options.model || "tts-1",
792
+ voice: options.voice,
793
+ input: options.input,
794
+ response_format: options.response_format,
795
+ speed: options.speed
796
+ });
797
+ const buffer = await response.arrayBuffer();
798
+ return {
799
+ buffer,
800
+ headers: {
801
+ "content-type": response.type
802
+ }
803
+ };
804
+ }
805
+ async generateImage(options) {
806
+ const response = await this.client.images.generate({
807
+ model: options.model || "dall-e-3",
808
+ prompt: options.prompt,
809
+ n: options.n,
810
+ size: options.size,
811
+ quality: options.quality,
812
+ style: options.style,
813
+ response_format: options.response_format,
814
+ user: options.user
815
+ });
816
+ return {
817
+ created: response.created,
818
+ data: response.data
819
+ };
820
+ }
821
+ async analyzeImage(options) {
822
+ const messages = [
823
+ ...options.messages ? this.mapMessages(options.messages) : [],
824
+ {
825
+ role: "user",
826
+ content: [
827
+ { type: "text", text: options.prompt || "Describe this image" },
828
+ {
829
+ type: "image_url",
830
+ image_url: {
831
+ url: options.image instanceof Buffer ? `data:image/png;base64,${options.image.toString("base64")}` : options.image
832
+ }
833
+ }
834
+ ]
835
+ }
836
+ ];
837
+ const response = await this.client.chat.completions.create({
838
+ model: options.model || "gpt-4o",
839
+ messages,
840
+ max_tokens: 1e3
841
+ });
842
+ const choice = response.choices[0];
843
+ if (!choice) throw new Error("No choice returned from OpenAI");
844
+ return {
845
+ content: choice.message.content || "",
846
+ usage: response.usage ? {
847
+ inputTokens: response.usage.prompt_tokens,
848
+ outputTokens: response.usage.completion_tokens,
849
+ totalTokens: response.usage.total_tokens
850
+ } : void 0
851
+ };
852
+ }
743
853
  async chat(options) {
744
854
  const response = await this.client.chat.completions.create({
745
855
  model: options.model,
@@ -1416,22 +1526,24 @@ var fileToBase64 = (file) => {
1416
1526
  });
1417
1527
  };
1418
1528
  var parseThinking = (content) => {
1419
- const thinkRegex = /<think>([\s\S]*?)<\/think>/;
1420
- const match = content.match(thinkRegex);
1421
- if (match) {
1422
- return {
1423
- think: match[1],
1424
- rest: content.replace(thinkRegex, "").trim()
1425
- };
1529
+ const startTag = "<think>";
1530
+ const endTag = "</think>";
1531
+ const startIndex = content.indexOf(startTag);
1532
+ if (startIndex === -1) {
1533
+ return { think: null, rest: content };
1426
1534
  }
1427
- if (content.startsWith("<think>")) {
1535
+ const endIndex = content.indexOf(endTag, startIndex);
1536
+ if (endIndex === -1) {
1428
1537
  return {
1429
- think: content.replace("<think>", ""),
1538
+ think: content.substring(startIndex + startTag.length),
1430
1539
  rest: ""
1431
- // Still thinking
1540
+ // Still thinking, no rest content yet
1432
1541
  };
1433
1542
  }
1434
- return { think: null, rest: content };
1543
+ return {
1544
+ think: content.substring(startIndex + startTag.length, endIndex),
1545
+ rest: (content.substring(0, startIndex) + content.substring(endIndex + endTag.length)).trim()
1546
+ };
1435
1547
  };
1436
1548
 
1437
1549
  // src/namespaces/text.ts
package/dist/index.d.cts CHANGED
@@ -1,5 +1,5 @@
1
- import { A as AIProvider, C as ChatOptions, a as ChatResult, S as StreamChunk, T as Tool$1, M as Message$1, G as GenerateTextResult } from './tekimax-C-DJ0z27.cjs';
2
- export { b as ContentPart, I as ImageAnalysisOptions, c as ImageAnalysisResult, d as ImageContent, e as ImageContentPart, f as ImageEditOptions, g as ImageGenerationOptions, h as ImageResult, i as MessageRole, j as SpeechGenerationOptions, k as SpeechResult, l as Tekimax, m as TekimaxOptions, n as TextContentPart, o as ToolCall, p as ToolDefinition, V as VideoAnalysisOptions, q as VideoAnalysisResult, r as VideoContent, s as VideoGenerationOptions, t as VideoResult } from './tekimax-C-DJ0z27.cjs';
1
+ import { A as AIProvider, C as ChatOptions, a as ChatResult, S as StreamChunk, T as Tool$1, M as Message$1, G as GenerateTextResult, V as VideoAnalysisOptions, b as VideoAnalysisResult, c as SpeechGenerationOptions, d as SpeechResult, I as ImageGenerationOptions, e as ImageResult, f as ImageAnalysisOptions, g as ImageAnalysisResult } from './tekimax-BaM7MkG5.cjs';
2
+ export { h as ContentPart, i as ImageContent, j as ImageContentPart, k as ImageEditOptions, l as MessageRole, m as Tekimax, n as TekimaxOptions, o as TextContentPart, p as ToolCall, q as ToolDefinition, r as VideoContent, s as VideoGenerationOptions, t as VideoResult } from './tekimax-BaM7MkG5.cjs';
3
3
 
4
4
  /**
5
5
  * Generated by Kubb (https://kubb.dev/).
@@ -2741,10 +2741,10 @@ declare function generateText({ adapter, ...options }: GenerateTextOptions & {
2741
2741
 
2742
2742
  declare const fileToBase64: (file: File) => Promise<string>;
2743
2743
  declare const parseThinking: (content: string) => {
2744
- think: string | undefined;
2744
+ think: null;
2745
2745
  rest: string;
2746
2746
  } | {
2747
- think: null;
2747
+ think: string;
2748
2748
  rest: string;
2749
2749
  };
2750
2750
 
@@ -2767,6 +2767,7 @@ declare class GeminiProvider implements AIProvider {
2767
2767
  apiKey: string;
2768
2768
  });
2769
2769
  private convertContent;
2770
+ analyzeVideo(options: VideoAnalysisOptions): Promise<VideoAnalysisResult>;
2770
2771
  chat(options: ChatOptions): Promise<ChatResult>;
2771
2772
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2772
2773
  private mapHistory;
@@ -2793,6 +2794,9 @@ declare class OpenAIProvider implements AIProvider {
2793
2794
  apiKey: string;
2794
2795
  dangerouslyAllowBrowser?: boolean;
2795
2796
  });
2797
+ generateSpeech(options: SpeechGenerationOptions): Promise<SpeechResult>;
2798
+ generateImage(options: ImageGenerationOptions): Promise<ImageResult>;
2799
+ analyzeImage(options: ImageAnalysisOptions): Promise<ImageAnalysisResult>;
2796
2800
  chat(options: ChatOptions): Promise<ChatResult>;
2797
2801
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2798
2802
  private mapMessages;
@@ -2820,4 +2824,4 @@ declare class OpenRouterProvider implements AIProvider {
2820
2824
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2821
2825
  }
2822
2826
 
2823
- export { AIProvider, AnthropicProvider, types as ApiTypes, ChatOptions, ChatResult, GeminiProvider, type GenerateTextOptions, GenerateTextResult, GrokProvider, Message$1 as Message, OllamaProvider, OpenAIProvider, OpenRouterProvider, StreamChunk, TekimaxClient, TekimaxProvider, Tool$1 as Tool, fileToBase64, generateText, parseThinking, tool };
2827
+ export { AIProvider, AnthropicProvider, types as ApiTypes, ChatOptions, ChatResult, GeminiProvider, type GenerateTextOptions, GenerateTextResult, GrokProvider, ImageAnalysisOptions, ImageAnalysisResult, ImageGenerationOptions, ImageResult, Message$1 as Message, OllamaProvider, OpenAIProvider, OpenRouterProvider, SpeechGenerationOptions, SpeechResult, StreamChunk, TekimaxClient, TekimaxProvider, Tool$1 as Tool, VideoAnalysisOptions, VideoAnalysisResult, fileToBase64, generateText, parseThinking, tool };
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
- import { A as AIProvider, C as ChatOptions, a as ChatResult, S as StreamChunk, T as Tool$1, M as Message$1, G as GenerateTextResult } from './tekimax-C-DJ0z27.js';
2
- export { b as ContentPart, I as ImageAnalysisOptions, c as ImageAnalysisResult, d as ImageContent, e as ImageContentPart, f as ImageEditOptions, g as ImageGenerationOptions, h as ImageResult, i as MessageRole, j as SpeechGenerationOptions, k as SpeechResult, l as Tekimax, m as TekimaxOptions, n as TextContentPart, o as ToolCall, p as ToolDefinition, V as VideoAnalysisOptions, q as VideoAnalysisResult, r as VideoContent, s as VideoGenerationOptions, t as VideoResult } from './tekimax-C-DJ0z27.js';
1
+ import { A as AIProvider, C as ChatOptions, a as ChatResult, S as StreamChunk, T as Tool$1, M as Message$1, G as GenerateTextResult, V as VideoAnalysisOptions, b as VideoAnalysisResult, c as SpeechGenerationOptions, d as SpeechResult, I as ImageGenerationOptions, e as ImageResult, f as ImageAnalysisOptions, g as ImageAnalysisResult } from './tekimax-BaM7MkG5.js';
2
+ export { h as ContentPart, i as ImageContent, j as ImageContentPart, k as ImageEditOptions, l as MessageRole, m as Tekimax, n as TekimaxOptions, o as TextContentPart, p as ToolCall, q as ToolDefinition, r as VideoContent, s as VideoGenerationOptions, t as VideoResult } from './tekimax-BaM7MkG5.js';
3
3
 
4
4
  /**
5
5
  * Generated by Kubb (https://kubb.dev/).
@@ -2741,10 +2741,10 @@ declare function generateText({ adapter, ...options }: GenerateTextOptions & {
2741
2741
 
2742
2742
  declare const fileToBase64: (file: File) => Promise<string>;
2743
2743
  declare const parseThinking: (content: string) => {
2744
- think: string | undefined;
2744
+ think: null;
2745
2745
  rest: string;
2746
2746
  } | {
2747
- think: null;
2747
+ think: string;
2748
2748
  rest: string;
2749
2749
  };
2750
2750
 
@@ -2767,6 +2767,7 @@ declare class GeminiProvider implements AIProvider {
2767
2767
  apiKey: string;
2768
2768
  });
2769
2769
  private convertContent;
2770
+ analyzeVideo(options: VideoAnalysisOptions): Promise<VideoAnalysisResult>;
2770
2771
  chat(options: ChatOptions): Promise<ChatResult>;
2771
2772
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2772
2773
  private mapHistory;
@@ -2793,6 +2794,9 @@ declare class OpenAIProvider implements AIProvider {
2793
2794
  apiKey: string;
2794
2795
  dangerouslyAllowBrowser?: boolean;
2795
2796
  });
2797
+ generateSpeech(options: SpeechGenerationOptions): Promise<SpeechResult>;
2798
+ generateImage(options: ImageGenerationOptions): Promise<ImageResult>;
2799
+ analyzeImage(options: ImageAnalysisOptions): Promise<ImageAnalysisResult>;
2796
2800
  chat(options: ChatOptions): Promise<ChatResult>;
2797
2801
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2798
2802
  private mapMessages;
@@ -2820,4 +2824,4 @@ declare class OpenRouterProvider implements AIProvider {
2820
2824
  chatStream(options: ChatOptions): AsyncIterable<StreamChunk>;
2821
2825
  }
2822
2826
 
2823
- export { AIProvider, AnthropicProvider, types as ApiTypes, ChatOptions, ChatResult, GeminiProvider, type GenerateTextOptions, GenerateTextResult, GrokProvider, Message$1 as Message, OllamaProvider, OpenAIProvider, OpenRouterProvider, StreamChunk, TekimaxClient, TekimaxProvider, Tool$1 as Tool, fileToBase64, generateText, parseThinking, tool };
2827
+ export { AIProvider, AnthropicProvider, types as ApiTypes, ChatOptions, ChatResult, GeminiProvider, type GenerateTextOptions, GenerateTextResult, GrokProvider, ImageAnalysisOptions, ImageAnalysisResult, ImageGenerationOptions, ImageResult, Message$1 as Message, OllamaProvider, OpenAIProvider, OpenRouterProvider, SpeechGenerationOptions, SpeechResult, StreamChunk, TekimaxClient, TekimaxProvider, Tool$1 as Tool, VideoAnalysisOptions, VideoAnalysisResult, fileToBase64, generateText, parseThinking, tool };
package/dist/index.js CHANGED
@@ -410,6 +410,52 @@ var GeminiProvider = class {
410
410
  return { text: "" };
411
411
  });
412
412
  }
413
+ async analyzeVideo(options) {
414
+ let videoPart;
415
+ const videoInput = options.video;
416
+ if (typeof videoInput === "string") {
417
+ if (videoInput.startsWith("http")) {
418
+ const resp = await fetch(videoInput);
419
+ const arrayBuffer = await resp.arrayBuffer();
420
+ const base64 = Buffer.from(arrayBuffer).toString("base64");
421
+ videoPart = {
422
+ inlineData: {
423
+ data: base64,
424
+ mimeType: "video/mp4"
425
+ }
426
+ };
427
+ } else {
428
+ videoPart = {
429
+ inlineData: {
430
+ data: videoInput,
431
+ mimeType: "video/mp4"
432
+ }
433
+ };
434
+ }
435
+ } else if (typeof Buffer !== "undefined" && videoInput instanceof Buffer) {
436
+ videoPart = {
437
+ inlineData: {
438
+ data: videoInput.toString("base64"),
439
+ mimeType: "video/mp4"
440
+ }
441
+ };
442
+ } else {
443
+ throw new Error("Unsupported video format");
444
+ }
445
+ const model = this.client.getGenerativeModel({ model: options.model });
446
+ const result = await model.generateContent([
447
+ options.prompt || "Analyze this video",
448
+ videoPart
449
+ ]);
450
+ return {
451
+ content: result.response.text(),
452
+ usage: {
453
+ inputTokens: result.response.usageMetadata?.promptTokenCount,
454
+ outputTokens: result.response.usageMetadata?.candidatesTokenCount,
455
+ totalTokens: result.response.usageMetadata?.totalTokenCount
456
+ }
457
+ };
458
+ }
413
459
  async chat(options) {
414
460
  const model = this.client.getGenerativeModel({
415
461
  model: options.model || "gemini-pro",
@@ -695,6 +741,70 @@ var OpenAIProvider = class {
695
741
  dangerouslyAllowBrowser: options.dangerouslyAllowBrowser
696
742
  });
697
743
  }
744
+ async generateSpeech(options) {
745
+ const response = await this.client.audio.speech.create({
746
+ model: options.model || "tts-1",
747
+ voice: options.voice,
748
+ input: options.input,
749
+ response_format: options.response_format,
750
+ speed: options.speed
751
+ });
752
+ const buffer = await response.arrayBuffer();
753
+ return {
754
+ buffer,
755
+ headers: {
756
+ "content-type": response.type
757
+ }
758
+ };
759
+ }
760
+ async generateImage(options) {
761
+ const response = await this.client.images.generate({
762
+ model: options.model || "dall-e-3",
763
+ prompt: options.prompt,
764
+ n: options.n,
765
+ size: options.size,
766
+ quality: options.quality,
767
+ style: options.style,
768
+ response_format: options.response_format,
769
+ user: options.user
770
+ });
771
+ return {
772
+ created: response.created,
773
+ data: response.data
774
+ };
775
+ }
776
+ async analyzeImage(options) {
777
+ const messages = [
778
+ ...options.messages ? this.mapMessages(options.messages) : [],
779
+ {
780
+ role: "user",
781
+ content: [
782
+ { type: "text", text: options.prompt || "Describe this image" },
783
+ {
784
+ type: "image_url",
785
+ image_url: {
786
+ url: options.image instanceof Buffer ? `data:image/png;base64,${options.image.toString("base64")}` : options.image
787
+ }
788
+ }
789
+ ]
790
+ }
791
+ ];
792
+ const response = await this.client.chat.completions.create({
793
+ model: options.model || "gpt-4o",
794
+ messages,
795
+ max_tokens: 1e3
796
+ });
797
+ const choice = response.choices[0];
798
+ if (!choice) throw new Error("No choice returned from OpenAI");
799
+ return {
800
+ content: choice.message.content || "",
801
+ usage: response.usage ? {
802
+ inputTokens: response.usage.prompt_tokens,
803
+ outputTokens: response.usage.completion_tokens,
804
+ totalTokens: response.usage.total_tokens
805
+ } : void 0
806
+ };
807
+ }
698
808
  async chat(options) {
699
809
  const response = await this.client.chat.completions.create({
700
810
  model: options.model,
@@ -1371,22 +1481,24 @@ var fileToBase64 = (file) => {
1371
1481
  });
1372
1482
  };
1373
1483
  var parseThinking = (content) => {
1374
- const thinkRegex = /<think>([\s\S]*?)<\/think>/;
1375
- const match = content.match(thinkRegex);
1376
- if (match) {
1377
- return {
1378
- think: match[1],
1379
- rest: content.replace(thinkRegex, "").trim()
1380
- };
1484
+ const startTag = "<think>";
1485
+ const endTag = "</think>";
1486
+ const startIndex = content.indexOf(startTag);
1487
+ if (startIndex === -1) {
1488
+ return { think: null, rest: content };
1381
1489
  }
1382
- if (content.startsWith("<think>")) {
1490
+ const endIndex = content.indexOf(endTag, startIndex);
1491
+ if (endIndex === -1) {
1383
1492
  return {
1384
- think: content.replace("<think>", ""),
1493
+ think: content.substring(startIndex + startTag.length),
1385
1494
  rest: ""
1386
- // Still thinking
1495
+ // Still thinking, no rest content yet
1387
1496
  };
1388
1497
  }
1389
- return { think: null, rest: content };
1498
+ return {
1499
+ think: content.substring(startIndex + startTag.length, endIndex),
1500
+ rest: (content.substring(0, startIndex) + content.substring(endIndex + endTag.length)).trim()
1501
+ };
1390
1502
  };
1391
1503
 
1392
1504
  // src/namespaces/text.ts
@@ -1,4 +1,4 @@
1
- import { M as Message, A as AIProvider, l as Tekimax, T as Tool } from '../tekimax-C-DJ0z27.cjs';
1
+ import { M as Message, A as AIProvider, m as Tekimax, T as Tool } from '../tekimax-BaM7MkG5.cjs';
2
2
 
3
3
  interface UseChatOptions {
4
4
  adapter?: AIProvider;
@@ -1,4 +1,4 @@
1
- import { M as Message, A as AIProvider, l as Tekimax, T as Tool } from '../tekimax-C-DJ0z27.js';
1
+ import { M as Message, A as AIProvider, m as Tekimax, T as Tool } from '../tekimax-BaM7MkG5.js';
2
2
 
3
3
  interface UseChatOptions {
4
4
  adapter?: AIProvider;