@cencori/ai-sdk 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,21 +1,21 @@
1
1
  # @cencori/ai-sdk
2
2
 
3
- The Cencori AI SDK — the infrastructure layer for AI applications. Works with [Vercel AI SDK](https://github.com/vercel/ai), TanStack AI, and more.
3
+ The Cencori AI SDK — the infrastructure layer for AI applications.
4
4
 
5
5
  ## Installation
6
6
 
7
7
  ```bash
8
- npm install @cencori/ai-sdk ai
8
+ npm install @cencori/ai-sdk
9
9
  ```
10
10
 
11
- ## Quick Start
11
+ ## Vercel AI SDK Integration
12
12
 
13
13
  ```typescript
14
- import { cencori } from '@cencori/ai-sdk';
14
+ import { cencori } from '@cencori/ai-sdk/vercel';
15
15
  import { streamText } from 'ai';
16
16
 
17
17
  const result = await streamText({
18
- model: cencori('gemini-2.5-flash'),
18
+ model: cencori('gpt-4o'),
19
19
  messages: [{ role: 'user', content: 'Hello!' }]
20
20
  });
21
21
 
@@ -24,31 +24,23 @@ for await (const chunk of result.textStream) {
24
24
  }
25
25
  ```
26
26
 
27
- ## Usage with Next.js App Router
27
+ ## TanStack AI Integration (Coming Soon)
28
28
 
29
29
  ```typescript
30
- // app/api/chat/route.ts
31
- import { cencori } from '@cencori/ai-sdk';
32
- import { streamText } from 'ai';
33
-
34
- export async function POST(req: Request) {
35
- const { messages } = await req.json();
30
+ import { cencori } from '@cencori/ai-sdk/tanstack';
31
+ import { chat } from '@tanstack/ai';
36
32
 
37
- const result = await streamText({
38
- model: cencori('gemini-2.5-flash'),
39
- messages
40
- });
41
-
42
- return result.toUIMessageStreamResponse();
43
- }
33
+ const result = await chat({
34
+ adapter: cencori,
35
+ model: 'gpt-4o',
36
+ messages: [{ role: 'user', content: 'Hello!' }]
37
+ });
44
38
  ```
45
39
 
46
40
  ## Configuration
47
41
 
48
42
  ### Environment Variable
49
43
 
50
- Set the `CENCORI_API_KEY` environment variable:
51
-
52
44
  ```bash
53
45
  CENCORI_API_KEY=csk_your_key_here
54
46
  ```
@@ -56,50 +48,32 @@ CENCORI_API_KEY=csk_your_key_here
56
48
  ### Custom Configuration
57
49
 
58
50
  ```typescript
59
- import { createCencori } from '@cencori/ai-sdk';
51
+ import { createCencori } from '@cencori/ai-sdk/vercel';
60
52
 
61
53
  const cencori = createCencori({
62
54
  apiKey: 'csk_your_key_here',
63
- baseUrl: 'https://cencori.com', // optional
64
- });
65
-
66
- const result = await streamText({
67
- model: cencori('gpt-4o'),
68
- messages: [{ role: 'user', content: 'Hello!' }]
55
+ baseUrl: 'https://cencori.com',
69
56
  });
70
57
  ```
71
58
 
72
59
  ## Supported Models
73
60
 
74
- Use any model supported by Cencori:
75
-
76
61
  | Provider | Models |
77
62
  |----------|--------|
78
63
  | OpenAI | `gpt-4o`, `gpt-4o-mini`, `o1` |
79
- | Anthropic | `claude-3-opus`, `claude-3-5-sonnet`, `claude-3-haiku` |
64
+ | Anthropic | `claude-3-5-sonnet`, `claude-3-opus`, `claude-3-haiku` |
80
65
  | Google | `gemini-2.5-flash`, `gemini-2.0-flash`, `gemini-3-pro` |
81
66
  | xAI | `grok-4`, `grok-3` |
82
67
  | Mistral | `mistral-large`, `codestral` |
83
68
  | DeepSeek | `deepseek-v3.2`, `deepseek-reasoner` |
84
- | + More | Groq, Cohere, Perplexity, Together, Meta, Qwen, HuggingFace |
69
+ | + More | Groq, Cohere, Perplexity, Together |
85
70
 
86
71
  ## Why Cencori?
87
72
 
88
- Unlike raw AI SDKs, Cencori gives you:
89
-
90
73
  - 🔒 **Security** — PII filtering, jailbreak detection, content moderation
91
74
  - 📊 **Observability** — Request logs, latency metrics, cost tracking
92
75
  - 💰 **Cost Control** — Budgets, alerts, per-route analytics
93
- - 🔌 **Multi-Provider** — One API key for OpenAI, Claude, Gemini, and more
94
-
95
- ## Features
96
-
97
- - ✅ Drop-in Vercel AI SDK compatibility
98
- - ✅ Works with `streamText()`, `generateText()`, `useChat()`
99
- - ✅ Built-in content safety filtering
100
- - ✅ Rate limiting protection
101
- - ✅ Full analytics in Cencori dashboard
102
- - ✅ Multi-provider support with one API key
76
+ - 🔌 **Multi-Provider** — One API key for all AI providers
103
77
 
104
78
  ## License
105
79
 
package/dist/index.d.mts CHANGED
@@ -1,118 +1,2 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
-
3
- /**
4
- * Cencori Chat Language Model
5
- *
6
- * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
- */
8
-
9
- interface CencoriChatModelSettings {
10
- apiKey: string;
11
- baseUrl: string;
12
- headers?: Record<string, string>;
13
- userId?: string;
14
- }
15
- declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
- readonly specificationVersion: "v3";
17
- readonly provider = "cencori";
18
- readonly modelId: string;
19
- readonly supportedUrls: Record<string, RegExp[]>;
20
- private readonly settings;
21
- constructor(modelId: string, settings: CencoriChatModelSettings);
22
- private getHeaders;
23
- private convertMessages;
24
- private mapFinishReason;
25
- private buildUsage;
26
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
- }
29
-
30
- /**
31
- * Types for Cencori AI Provider
32
- */
33
- interface CencoriProviderSettings {
34
- /**
35
- * Cencori API key (csk_ or cpk_ prefix)
36
- */
37
- apiKey?: string;
38
- /**
39
- * Base URL for the Cencori API
40
- * @default 'https://cencori.com'
41
- */
42
- baseUrl?: string;
43
- /**
44
- * Custom headers to include in requests
45
- */
46
- headers?: Record<string, string>;
47
- }
48
- interface CencoriChatSettings {
49
- /**
50
- * Optional user ID for rate limiting and analytics
51
- */
52
- userId?: string;
53
- }
54
-
55
- /**
56
- * Cencori AI Provider for Vercel AI SDK
57
- *
58
- * Use Cencori with streamText(), generateText(), and useChat()
59
- */
60
-
61
- interface CencoriProvider {
62
- /**
63
- * Create a Cencori chat model for use with Vercel AI SDK
64
- *
65
- * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
- * @param settings - Optional model-specific settings
67
- * @returns A LanguageModelV1 compatible model
68
- *
69
- * @example
70
- * import { cencori } from '@cencori/ai-sdk';
71
- * import { streamText } from 'ai';
72
- *
73
- * const result = await streamText({
74
- * model: cencori('gemini-2.5-flash'),
75
- * messages: [{ role: 'user', content: 'Hello!' }]
76
- * });
77
- */
78
- (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
- /**
80
- * Create a chat model (alias for the provider function)
81
- */
82
- chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
- }
84
- /**
85
- * Create a Cencori provider instance
86
- *
87
- * @param options - Provider configuration options
88
- * @returns A Cencori provider
89
- *
90
- * @example
91
- * import { createCencori } from '@cencori/ai-sdk';
92
- *
93
- * const cencori = createCencori({
94
- * apiKey: process.env.CENCORI_API_KEY
95
- * });
96
- *
97
- * const result = await streamText({
98
- * model: cencori('gemini-2.5-flash'),
99
- * messages: [{ role: 'user', content: 'Hello!' }]
100
- * });
101
- */
102
- declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
- /**
104
- * Default Cencori provider instance
105
- * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
- *
107
- * @example
108
- * import { cencori } from '@cencori/ai-sdk';
109
- * import { streamText } from 'ai';
110
- *
111
- * const result = await streamText({
112
- * model: cencori('gemini-2.5-flash'),
113
- * messages: [{ role: 'user', content: 'Hello!' }]
114
- * });
115
- */
116
- declare const cencori: CencoriProvider;
117
-
118
- export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };
1
+ export { CencoriChatLanguageModel, CencoriChatSettings, CencoriProvider, CencoriProviderSettings, cencori, createCencori } from './vercel/index.mjs';
2
+ import '@ai-sdk/provider';
package/dist/index.d.ts CHANGED
@@ -1,118 +1,2 @@
1
- import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
-
3
- /**
4
- * Cencori Chat Language Model
5
- *
6
- * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
- */
8
-
9
- interface CencoriChatModelSettings {
10
- apiKey: string;
11
- baseUrl: string;
12
- headers?: Record<string, string>;
13
- userId?: string;
14
- }
15
- declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
- readonly specificationVersion: "v3";
17
- readonly provider = "cencori";
18
- readonly modelId: string;
19
- readonly supportedUrls: Record<string, RegExp[]>;
20
- private readonly settings;
21
- constructor(modelId: string, settings: CencoriChatModelSettings);
22
- private getHeaders;
23
- private convertMessages;
24
- private mapFinishReason;
25
- private buildUsage;
26
- doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
- doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
- }
29
-
30
- /**
31
- * Types for Cencori AI Provider
32
- */
33
- interface CencoriProviderSettings {
34
- /**
35
- * Cencori API key (csk_ or cpk_ prefix)
36
- */
37
- apiKey?: string;
38
- /**
39
- * Base URL for the Cencori API
40
- * @default 'https://cencori.com'
41
- */
42
- baseUrl?: string;
43
- /**
44
- * Custom headers to include in requests
45
- */
46
- headers?: Record<string, string>;
47
- }
48
- interface CencoriChatSettings {
49
- /**
50
- * Optional user ID for rate limiting and analytics
51
- */
52
- userId?: string;
53
- }
54
-
55
- /**
56
- * Cencori AI Provider for Vercel AI SDK
57
- *
58
- * Use Cencori with streamText(), generateText(), and useChat()
59
- */
60
-
61
- interface CencoriProvider {
62
- /**
63
- * Create a Cencori chat model for use with Vercel AI SDK
64
- *
65
- * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
- * @param settings - Optional model-specific settings
67
- * @returns A LanguageModelV1 compatible model
68
- *
69
- * @example
70
- * import { cencori } from '@cencori/ai-sdk';
71
- * import { streamText } from 'ai';
72
- *
73
- * const result = await streamText({
74
- * model: cencori('gemini-2.5-flash'),
75
- * messages: [{ role: 'user', content: 'Hello!' }]
76
- * });
77
- */
78
- (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
- /**
80
- * Create a chat model (alias for the provider function)
81
- */
82
- chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
- }
84
- /**
85
- * Create a Cencori provider instance
86
- *
87
- * @param options - Provider configuration options
88
- * @returns A Cencori provider
89
- *
90
- * @example
91
- * import { createCencori } from '@cencori/ai-sdk';
92
- *
93
- * const cencori = createCencori({
94
- * apiKey: process.env.CENCORI_API_KEY
95
- * });
96
- *
97
- * const result = await streamText({
98
- * model: cencori('gemini-2.5-flash'),
99
- * messages: [{ role: 'user', content: 'Hello!' }]
100
- * });
101
- */
102
- declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
- /**
104
- * Default Cencori provider instance
105
- * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
- *
107
- * @example
108
- * import { cencori } from '@cencori/ai-sdk';
109
- * import { streamText } from 'ai';
110
- *
111
- * const result = await streamText({
112
- * model: cencori('gemini-2.5-flash'),
113
- * messages: [{ role: 'user', content: 'Hello!' }]
114
- * });
115
- */
116
- declare const cencori: CencoriProvider;
117
-
118
- export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };
1
+ export { CencoriChatLanguageModel, CencoriChatSettings, CencoriProvider, CencoriProviderSettings, cencori, createCencori } from './vercel/index.js';
2
+ import '@ai-sdk/provider';
package/dist/index.js CHANGED
@@ -18,15 +18,15 @@ var __copyProps = (to, from, except, desc) => {
18
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
19
 
20
20
  // src/index.ts
21
- var index_exports = {};
22
- __export(index_exports, {
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
23
  CencoriChatLanguageModel: () => CencoriChatLanguageModel,
24
24
  cencori: () => cencori,
25
25
  createCencori: () => createCencori
26
26
  });
27
- module.exports = __toCommonJS(index_exports);
27
+ module.exports = __toCommonJS(src_exports);
28
28
 
29
- // src/cencori-chat-model.ts
29
+ // src/vercel/cencori-chat-model.ts
30
30
  var CencoriChatLanguageModel = class {
31
31
  constructor(modelId, settings) {
32
32
  this.specificationVersion = "v3";
@@ -263,7 +263,7 @@ var CencoriChatLanguageModel = class {
263
263
  }
264
264
  };
265
265
 
266
- // src/cencori-provider.ts
266
+ // src/vercel/cencori-provider.ts
267
267
  function createCencori(options = {}) {
268
268
  const baseUrl = options.baseUrl ?? "https://cencori.com";
269
269
  const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/cencori-chat-model.ts","../src/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nexport { cencori, createCencori } from './cencori-provider';\nexport type { CencoriProvider } from './cencori-provider';\nexport type { CencoriProviderSettings, CencoriChatSettings } from './types';\nexport { CencoriChatLanguageModel } from './cencori-chat-model';\n","/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}
1
+ {"version":3,"sources":["../src/index.ts","../src/vercel/cencori-chat-model.ts","../src/vercel/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori AI SDK - Main Entry Point\n * \n * This package provides integrations for multiple AI SDK ecosystems:\n * \n * @example Vercel AI SDK\n * import { cencori } from '@cencori/ai-sdk/vercel';\n * \n * @example TanStack AI (coming soon)\n * import { cencori } from '@cencori/ai-sdk/tanstack';\n */\n\n// Re-export Vercel integration as default for backwards compatibility\nexport * from './vercel';\n","/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}
package/dist/index.mjs CHANGED
@@ -1,4 +1,4 @@
1
- // src/cencori-chat-model.ts
1
+ // src/vercel/cencori-chat-model.ts
2
2
  var CencoriChatLanguageModel = class {
3
3
  constructor(modelId, settings) {
4
4
  this.specificationVersion = "v3";
@@ -235,7 +235,7 @@ var CencoriChatLanguageModel = class {
235
235
  }
236
236
  };
237
237
 
238
- // src/cencori-provider.ts
238
+ // src/vercel/cencori-provider.ts
239
239
  function createCencori(options = {}) {
240
240
  const baseUrl = options.baseUrl ?? "https://cencori.com";
241
241
  const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/cencori-chat-model.ts","../src/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";AAgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}
1
+ {"version":3,"sources":["../src/vercel/cencori-chat-model.ts","../src/vercel/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";AAgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}