@cencori/ai-sdk 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,106 @@
1
+ # @cencori/ai-sdk
2
+
3
+ The Cencori AI SDK — the infrastructure layer for AI applications. Works with [Vercel AI SDK](https://github.com/vercel/ai), TanStack AI, and more.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @cencori/ai-sdk ai
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```typescript
14
+ import { cencori } from '@cencori/ai-sdk';
15
+ import { streamText } from 'ai';
16
+
17
+ const result = await streamText({
18
+ model: cencori('gemini-2.5-flash'),
19
+ messages: [{ role: 'user', content: 'Hello!' }]
20
+ });
21
+
22
+ for await (const chunk of result.textStream) {
23
+ process.stdout.write(chunk);
24
+ }
25
+ ```
26
+
27
+ ## Usage with Next.js App Router
28
+
29
+ ```typescript
30
+ // app/api/chat/route.ts
31
+ import { cencori } from '@cencori/ai-sdk';
32
+ import { streamText } from 'ai';
33
+
34
+ export async function POST(req: Request) {
35
+ const { messages } = await req.json();
36
+
37
+ const result = await streamText({
38
+ model: cencori('gemini-2.5-flash'),
39
+ messages
40
+ });
41
+
42
+ return result.toUIMessageStreamResponse();
43
+ }
44
+ ```
45
+
46
+ ## Configuration
47
+
48
+ ### Environment Variable
49
+
50
+ Set the `CENCORI_API_KEY` environment variable:
51
+
52
+ ```bash
53
+ CENCORI_API_KEY=csk_your_key_here
54
+ ```
55
+
56
+ ### Custom Configuration
57
+
58
+ ```typescript
59
+ import { createCencori } from '@cencori/ai-sdk';
60
+
61
+ const cencori = createCencori({
62
+ apiKey: 'csk_your_key_here',
63
+ baseUrl: 'https://cencori.com', // optional
64
+ });
65
+
66
+ const result = await streamText({
67
+ model: cencori('gpt-4o'),
68
+ messages: [{ role: 'user', content: 'Hello!' }]
69
+ });
70
+ ```
71
+
72
+ ## Supported Models
73
+
74
+ Use any model supported by Cencori:
75
+
76
+ | Provider | Models |
77
+ |----------|--------|
78
+ | OpenAI | `gpt-4o`, `gpt-4o-mini`, `o1` |
79
+ | Anthropic | `claude-3-opus`, `claude-3-5-sonnet`, `claude-3-haiku` |
80
+ | Google | `gemini-2.5-flash`, `gemini-2.0-flash`, `gemini-3-pro` |
81
+ | xAI | `grok-4`, `grok-3` |
82
+ | Mistral | `mistral-large`, `codestral` |
83
+ | DeepSeek | `deepseek-v3.2`, `deepseek-reasoner` |
84
+ | + More | Groq, Cohere, Perplexity, Together, Meta, Qwen, HuggingFace |
85
+
86
+ ## Why Cencori?
87
+
88
+ Unlike raw AI SDKs, Cencori gives you:
89
+
90
+ - 🔒 **Security** — PII filtering, jailbreak detection, content moderation
91
+ - 📊 **Observability** — Request logs, latency metrics, cost tracking
92
+ - 💰 **Cost Control** — Budgets, alerts, per-route analytics
93
+ - 🔌 **Multi-Provider** — One API key for OpenAI, Claude, Gemini, and more
94
+
95
+ ## Features
96
+
97
+ - ✅ Drop-in Vercel AI SDK compatibility
98
+ - ✅ Works with `streamText()`, `generateText()`, `useChat()`
99
+ - ✅ Built-in content safety filtering
100
+ - ✅ Rate limiting protection
101
+ - ✅ Full analytics in Cencori dashboard
102
+ - ✅ Multi-provider support with one API key
103
+
104
+ ## License
105
+
106
+ MIT © FohnAI
@@ -0,0 +1,118 @@
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
+
3
+ /**
4
+ * Cencori Chat Language Model
5
+ *
6
+ * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
+ */
8
+
9
+ interface CencoriChatModelSettings {
10
+ apiKey: string;
11
+ baseUrl: string;
12
+ headers?: Record<string, string>;
13
+ userId?: string;
14
+ }
15
+ declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
+ readonly specificationVersion: "v3";
17
+ readonly provider = "cencori";
18
+ readonly modelId: string;
19
+ readonly supportedUrls: Record<string, RegExp[]>;
20
+ private readonly settings;
21
+ constructor(modelId: string, settings: CencoriChatModelSettings);
22
+ private getHeaders;
23
+ private convertMessages;
24
+ private mapFinishReason;
25
+ private buildUsage;
26
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
+ }
29
+
30
+ /**
31
+ * Types for Cencori AI Provider
32
+ */
33
+ interface CencoriProviderSettings {
34
+ /**
35
+ * Cencori API key (csk_ or cpk_ prefix)
36
+ */
37
+ apiKey?: string;
38
+ /**
39
+ * Base URL for the Cencori API
40
+ * @default 'https://cencori.com'
41
+ */
42
+ baseUrl?: string;
43
+ /**
44
+ * Custom headers to include in requests
45
+ */
46
+ headers?: Record<string, string>;
47
+ }
48
+ interface CencoriChatSettings {
49
+ /**
50
+ * Optional user ID for rate limiting and analytics
51
+ */
52
+ userId?: string;
53
+ }
54
+
55
+ /**
56
+ * Cencori AI Provider for Vercel AI SDK
57
+ *
58
+ * Use Cencori with streamText(), generateText(), and useChat()
59
+ */
60
+
61
+ interface CencoriProvider {
62
+ /**
63
+ * Create a Cencori chat model for use with Vercel AI SDK
64
+ *
65
+ * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
+ * @param settings - Optional model-specific settings
67
+ * @returns A LanguageModelV1 compatible model
68
+ *
69
+ * @example
70
+ * import { cencori } from '@cencori/ai-sdk';
71
+ * import { streamText } from 'ai';
72
+ *
73
+ * const result = await streamText({
74
+ * model: cencori('gemini-2.5-flash'),
75
+ * messages: [{ role: 'user', content: 'Hello!' }]
76
+ * });
77
+ */
78
+ (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
+ /**
80
+ * Create a chat model (alias for the provider function)
81
+ */
82
+ chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
+ }
84
+ /**
85
+ * Create a Cencori provider instance
86
+ *
87
+ * @param options - Provider configuration options
88
+ * @returns A Cencori provider
89
+ *
90
+ * @example
91
+ * import { createCencori } from '@cencori/ai-sdk';
92
+ *
93
+ * const cencori = createCencori({
94
+ * apiKey: process.env.CENCORI_API_KEY
95
+ * });
96
+ *
97
+ * const result = await streamText({
98
+ * model: cencori('gemini-2.5-flash'),
99
+ * messages: [{ role: 'user', content: 'Hello!' }]
100
+ * });
101
+ */
102
+ declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
+ /**
104
+ * Default Cencori provider instance
105
+ * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
+ *
107
+ * @example
108
+ * import { cencori } from '@cencori/ai-sdk';
109
+ * import { streamText } from 'ai';
110
+ *
111
+ * const result = await streamText({
112
+ * model: cencori('gemini-2.5-flash'),
113
+ * messages: [{ role: 'user', content: 'Hello!' }]
114
+ * });
115
+ */
116
+ declare const cencori: CencoriProvider;
117
+
118
+ export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };
@@ -0,0 +1,118 @@
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
+
3
+ /**
4
+ * Cencori Chat Language Model
5
+ *
6
+ * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
+ */
8
+
9
+ interface CencoriChatModelSettings {
10
+ apiKey: string;
11
+ baseUrl: string;
12
+ headers?: Record<string, string>;
13
+ userId?: string;
14
+ }
15
+ declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
+ readonly specificationVersion: "v3";
17
+ readonly provider = "cencori";
18
+ readonly modelId: string;
19
+ readonly supportedUrls: Record<string, RegExp[]>;
20
+ private readonly settings;
21
+ constructor(modelId: string, settings: CencoriChatModelSettings);
22
+ private getHeaders;
23
+ private convertMessages;
24
+ private mapFinishReason;
25
+ private buildUsage;
26
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
+ }
29
+
30
+ /**
31
+ * Types for Cencori AI Provider
32
+ */
33
+ interface CencoriProviderSettings {
34
+ /**
35
+ * Cencori API key (csk_ or cpk_ prefix)
36
+ */
37
+ apiKey?: string;
38
+ /**
39
+ * Base URL for the Cencori API
40
+ * @default 'https://cencori.com'
41
+ */
42
+ baseUrl?: string;
43
+ /**
44
+ * Custom headers to include in requests
45
+ */
46
+ headers?: Record<string, string>;
47
+ }
48
+ interface CencoriChatSettings {
49
+ /**
50
+ * Optional user ID for rate limiting and analytics
51
+ */
52
+ userId?: string;
53
+ }
54
+
55
+ /**
56
+ * Cencori AI Provider for Vercel AI SDK
57
+ *
58
+ * Use Cencori with streamText(), generateText(), and useChat()
59
+ */
60
+
61
+ interface CencoriProvider {
62
+ /**
63
+ * Create a Cencori chat model for use with Vercel AI SDK
64
+ *
65
+ * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
+ * @param settings - Optional model-specific settings
67
+ * @returns A LanguageModelV1 compatible model
68
+ *
69
+ * @example
70
+ * import { cencori } from '@cencori/ai-sdk';
71
+ * import { streamText } from 'ai';
72
+ *
73
+ * const result = await streamText({
74
+ * model: cencori('gemini-2.5-flash'),
75
+ * messages: [{ role: 'user', content: 'Hello!' }]
76
+ * });
77
+ */
78
+ (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
+ /**
80
+ * Create a chat model (alias for the provider function)
81
+ */
82
+ chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
+ }
84
+ /**
85
+ * Create a Cencori provider instance
86
+ *
87
+ * @param options - Provider configuration options
88
+ * @returns A Cencori provider
89
+ *
90
+ * @example
91
+ * import { createCencori } from '@cencori/ai-sdk';
92
+ *
93
+ * const cencori = createCencori({
94
+ * apiKey: process.env.CENCORI_API_KEY
95
+ * });
96
+ *
97
+ * const result = await streamText({
98
+ * model: cencori('gemini-2.5-flash'),
99
+ * messages: [{ role: 'user', content: 'Hello!' }]
100
+ * });
101
+ */
102
+ declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
+ /**
104
+ * Default Cencori provider instance
105
+ * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
+ *
107
+ * @example
108
+ * import { cencori } from '@cencori/ai-sdk';
109
+ * import { streamText } from 'ai';
110
+ *
111
+ * const result = await streamText({
112
+ * model: cencori('gemini-2.5-flash'),
113
+ * messages: [{ role: 'user', content: 'Hello!' }]
114
+ * });
115
+ */
116
+ declare const cencori: CencoriProvider;
117
+
118
+ export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };
package/dist/index.js ADDED
@@ -0,0 +1,305 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ CencoriChatLanguageModel: () => CencoriChatLanguageModel,
24
+ cencori: () => cencori,
25
+ createCencori: () => createCencori
26
+ });
27
+ module.exports = __toCommonJS(index_exports);
28
+
29
+ // src/cencori-chat-model.ts
30
+ var CencoriChatLanguageModel = class {
31
+ constructor(modelId, settings) {
32
+ this.specificationVersion = "v3";
33
+ this.provider = "cencori";
34
+ this.supportedUrls = {};
35
+ this.modelId = modelId;
36
+ this.settings = settings;
37
+ }
38
+ getHeaders() {
39
+ return {
40
+ "Content-Type": "application/json",
41
+ "CENCORI_API_KEY": this.settings.apiKey,
42
+ ...this.settings.headers
43
+ };
44
+ }
45
+ convertMessages(options) {
46
+ const messages = [];
47
+ const promptMessages = options.prompt;
48
+ if (!promptMessages || !Array.isArray(promptMessages)) {
49
+ return messages;
50
+ }
51
+ for (const msg of promptMessages) {
52
+ let content = "";
53
+ if (msg.role === "system") {
54
+ content = msg.content;
55
+ } else if (msg.role === "user" || msg.role === "assistant") {
56
+ const msgContent = msg.content;
57
+ if (Array.isArray(msgContent)) {
58
+ content = msgContent.filter((part) => part.type === "text").map((part) => part.text || "").join("");
59
+ } else if (typeof msgContent === "string") {
60
+ content = msgContent;
61
+ }
62
+ }
63
+ if (content && (msg.role === "system" || msg.role === "user" || msg.role === "assistant")) {
64
+ messages.push({
65
+ role: msg.role,
66
+ content
67
+ });
68
+ }
69
+ }
70
+ return messages;
71
+ }
72
+ mapFinishReason(reason) {
73
+ let unified;
74
+ switch (reason) {
75
+ case "stop":
76
+ case "end_turn":
77
+ unified = "stop";
78
+ break;
79
+ case "length":
80
+ case "max_tokens":
81
+ unified = "length";
82
+ break;
83
+ case "content_filter":
84
+ unified = "content-filter";
85
+ break;
86
+ case "tool_calls":
87
+ case "tool-calls":
88
+ unified = "tool-calls";
89
+ break;
90
+ case "error":
91
+ unified = "error";
92
+ break;
93
+ default:
94
+ unified = "stop";
95
+ }
96
+ return { unified, raw: reason };
97
+ }
98
+ buildUsage(inputTokens, outputTokens) {
99
+ return {
100
+ inputTokens: {
101
+ total: inputTokens,
102
+ noCache: inputTokens,
103
+ cacheRead: void 0,
104
+ cacheWrite: void 0
105
+ },
106
+ outputTokens: {
107
+ total: outputTokens,
108
+ text: outputTokens,
109
+ reasoning: void 0
110
+ }
111
+ };
112
+ }
113
+ async doGenerate(options) {
114
+ const messages = this.convertMessages(options);
115
+ const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {
116
+ method: "POST",
117
+ headers: this.getHeaders(),
118
+ body: JSON.stringify({
119
+ messages,
120
+ model: this.modelId,
121
+ temperature: options.temperature,
122
+ maxTokens: options.maxOutputTokens,
123
+ stream: false,
124
+ userId: this.settings.userId
125
+ }),
126
+ signal: options.abortSignal
127
+ });
128
+ if (!response.ok) {
129
+ const error = await response.json().catch(() => ({ error: "Unknown error" }));
130
+ throw new Error(`Cencori API error: ${error.error || response.statusText}`);
131
+ }
132
+ const data = await response.json();
133
+ const content = [{
134
+ type: "text",
135
+ text: data.content,
136
+ providerMetadata: void 0
137
+ }];
138
+ const warnings = [];
139
+ return {
140
+ content,
141
+ finishReason: this.mapFinishReason(data.finish_reason),
142
+ usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),
143
+ warnings
144
+ };
145
+ }
146
+ async doStream(options) {
147
+ const messages = this.convertMessages(options);
148
+ const self = this;
149
+ const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {
150
+ method: "POST",
151
+ headers: this.getHeaders(),
152
+ body: JSON.stringify({
153
+ messages,
154
+ model: this.modelId,
155
+ temperature: options.temperature,
156
+ maxTokens: options.maxOutputTokens,
157
+ stream: true,
158
+ userId: this.settings.userId
159
+ }),
160
+ signal: options.abortSignal
161
+ });
162
+ if (!response.ok) {
163
+ const error = await response.json().catch(() => ({ error: "Unknown error" }));
164
+ throw new Error(`Cencori API error: ${error.error || response.statusText}`);
165
+ }
166
+ const reader = response.body?.getReader();
167
+ if (!reader) {
168
+ throw new Error("Response body is null");
169
+ }
170
+ const decoder = new TextDecoder();
171
+ let buffer = "";
172
+ let inputTokens = 0;
173
+ let outputTokens = 0;
174
+ const textPartId = "text-0";
175
+ let started = false;
176
+ const stream = new ReadableStream({
177
+ async pull(controller) {
178
+ try {
179
+ const { done, value } = await reader.read();
180
+ if (done) {
181
+ if (started) {
182
+ controller.enqueue({
183
+ type: "text-end",
184
+ id: textPartId
185
+ });
186
+ }
187
+ controller.enqueue({
188
+ type: "finish",
189
+ finishReason: self.mapFinishReason("stop"),
190
+ usage: self.buildUsage(inputTokens, outputTokens)
191
+ });
192
+ controller.close();
193
+ return;
194
+ }
195
+ buffer += decoder.decode(value, { stream: true });
196
+ const lines = buffer.split("\n");
197
+ buffer = lines.pop() || "";
198
+ for (const line of lines) {
199
+ if (line.trim() === "") continue;
200
+ if (!line.startsWith("data: ")) continue;
201
+ const data = line.slice(6);
202
+ if (data === "[DONE]") {
203
+ if (started) {
204
+ controller.enqueue({
205
+ type: "text-end",
206
+ id: textPartId
207
+ });
208
+ }
209
+ controller.enqueue({
210
+ type: "finish",
211
+ finishReason: self.mapFinishReason("stop"),
212
+ usage: self.buildUsage(inputTokens, outputTokens)
213
+ });
214
+ controller.close();
215
+ return;
216
+ }
217
+ try {
218
+ const chunk = JSON.parse(data);
219
+ if (chunk.delta) {
220
+ if (!started) {
221
+ started = true;
222
+ controller.enqueue({
223
+ type: "text-start",
224
+ id: textPartId
225
+ });
226
+ }
227
+ outputTokens += Math.ceil(chunk.delta.length / 4);
228
+ controller.enqueue({
229
+ type: "text-delta",
230
+ id: textPartId,
231
+ delta: chunk.delta
232
+ });
233
+ }
234
+ if (chunk.finish_reason) {
235
+ if (started) {
236
+ controller.enqueue({
237
+ type: "text-end",
238
+ id: textPartId
239
+ });
240
+ }
241
+ controller.enqueue({
242
+ type: "finish",
243
+ finishReason: self.mapFinishReason(chunk.finish_reason),
244
+ usage: self.buildUsage(inputTokens, outputTokens)
245
+ });
246
+ controller.close();
247
+ return;
248
+ }
249
+ } catch {
250
+ }
251
+ }
252
+ } catch (error) {
253
+ controller.error(error);
254
+ }
255
+ },
256
+ cancel() {
257
+ reader.cancel();
258
+ }
259
+ });
260
+ return {
261
+ stream
262
+ };
263
+ }
264
+ };
265
+
266
+ // src/cencori-provider.ts
267
+ function createCencori(options = {}) {
268
+ const baseUrl = options.baseUrl ?? "https://cencori.com";
269
+ const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
270
+ if (!apiKey) {
271
+ throw new Error("Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.");
272
+ }
273
+ const createModel = (modelId, settings = {}) => {
274
+ return new CencoriChatLanguageModel(modelId, {
275
+ apiKey,
276
+ baseUrl,
277
+ headers: options.headers,
278
+ ...settings
279
+ });
280
+ };
281
+ const provider = function(modelId, settings) {
282
+ return createModel(modelId, settings);
283
+ };
284
+ provider.chat = createModel;
285
+ return provider;
286
+ }
287
+ var cencori = function(modelId, settings) {
288
+ const apiKey = process.env.CENCORI_API_KEY;
289
+ if (!apiKey) {
290
+ throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: "..." }) instead.');
291
+ }
292
+ return new CencoriChatLanguageModel(modelId, {
293
+ apiKey,
294
+ baseUrl: "https://cencori.com",
295
+ ...settings
296
+ });
297
+ };
298
+ cencori.chat = cencori;
299
+ // Annotate the CommonJS export names for ESM import in node:
300
+ 0 && (module.exports = {
301
+ CencoriChatLanguageModel,
302
+ cencori,
303
+ createCencori
304
+ });
305
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/cencori-chat-model.ts","../src/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nexport { cencori, createCencori } from './cencori-provider';\nexport type { CencoriProvider } from './cencori-provider';\nexport type { CencoriProviderSettings, CencoriChatSettings } from './types';\nexport { CencoriChatLanguageModel } from './cencori-chat-model';\n","/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}
package/dist/index.mjs ADDED
@@ -0,0 +1,276 @@
1
+ // src/cencori-chat-model.ts
2
+ var CencoriChatLanguageModel = class {
3
+ constructor(modelId, settings) {
4
+ this.specificationVersion = "v3";
5
+ this.provider = "cencori";
6
+ this.supportedUrls = {};
7
+ this.modelId = modelId;
8
+ this.settings = settings;
9
+ }
10
+ getHeaders() {
11
+ return {
12
+ "Content-Type": "application/json",
13
+ "CENCORI_API_KEY": this.settings.apiKey,
14
+ ...this.settings.headers
15
+ };
16
+ }
17
+ convertMessages(options) {
18
+ const messages = [];
19
+ const promptMessages = options.prompt;
20
+ if (!promptMessages || !Array.isArray(promptMessages)) {
21
+ return messages;
22
+ }
23
+ for (const msg of promptMessages) {
24
+ let content = "";
25
+ if (msg.role === "system") {
26
+ content = msg.content;
27
+ } else if (msg.role === "user" || msg.role === "assistant") {
28
+ const msgContent = msg.content;
29
+ if (Array.isArray(msgContent)) {
30
+ content = msgContent.filter((part) => part.type === "text").map((part) => part.text || "").join("");
31
+ } else if (typeof msgContent === "string") {
32
+ content = msgContent;
33
+ }
34
+ }
35
+ if (content && (msg.role === "system" || msg.role === "user" || msg.role === "assistant")) {
36
+ messages.push({
37
+ role: msg.role,
38
+ content
39
+ });
40
+ }
41
+ }
42
+ return messages;
43
+ }
44
+ mapFinishReason(reason) {
45
+ let unified;
46
+ switch (reason) {
47
+ case "stop":
48
+ case "end_turn":
49
+ unified = "stop";
50
+ break;
51
+ case "length":
52
+ case "max_tokens":
53
+ unified = "length";
54
+ break;
55
+ case "content_filter":
56
+ unified = "content-filter";
57
+ break;
58
+ case "tool_calls":
59
+ case "tool-calls":
60
+ unified = "tool-calls";
61
+ break;
62
+ case "error":
63
+ unified = "error";
64
+ break;
65
+ default:
66
+ unified = "stop";
67
+ }
68
+ return { unified, raw: reason };
69
+ }
70
+ buildUsage(inputTokens, outputTokens) {
71
+ return {
72
+ inputTokens: {
73
+ total: inputTokens,
74
+ noCache: inputTokens,
75
+ cacheRead: void 0,
76
+ cacheWrite: void 0
77
+ },
78
+ outputTokens: {
79
+ total: outputTokens,
80
+ text: outputTokens,
81
+ reasoning: void 0
82
+ }
83
+ };
84
+ }
85
+ async doGenerate(options) {
86
+ const messages = this.convertMessages(options);
87
+ const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {
88
+ method: "POST",
89
+ headers: this.getHeaders(),
90
+ body: JSON.stringify({
91
+ messages,
92
+ model: this.modelId,
93
+ temperature: options.temperature,
94
+ maxTokens: options.maxOutputTokens,
95
+ stream: false,
96
+ userId: this.settings.userId
97
+ }),
98
+ signal: options.abortSignal
99
+ });
100
+ if (!response.ok) {
101
+ const error = await response.json().catch(() => ({ error: "Unknown error" }));
102
+ throw new Error(`Cencori API error: ${error.error || response.statusText}`);
103
+ }
104
+ const data = await response.json();
105
+ const content = [{
106
+ type: "text",
107
+ text: data.content,
108
+ providerMetadata: void 0
109
+ }];
110
+ const warnings = [];
111
+ return {
112
+ content,
113
+ finishReason: this.mapFinishReason(data.finish_reason),
114
+ usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),
115
+ warnings
116
+ };
117
+ }
118
+ async doStream(options) {
119
+ const messages = this.convertMessages(options);
120
+ const self = this;
121
+ const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {
122
+ method: "POST",
123
+ headers: this.getHeaders(),
124
+ body: JSON.stringify({
125
+ messages,
126
+ model: this.modelId,
127
+ temperature: options.temperature,
128
+ maxTokens: options.maxOutputTokens,
129
+ stream: true,
130
+ userId: this.settings.userId
131
+ }),
132
+ signal: options.abortSignal
133
+ });
134
+ if (!response.ok) {
135
+ const error = await response.json().catch(() => ({ error: "Unknown error" }));
136
+ throw new Error(`Cencori API error: ${error.error || response.statusText}`);
137
+ }
138
+ const reader = response.body?.getReader();
139
+ if (!reader) {
140
+ throw new Error("Response body is null");
141
+ }
142
+ const decoder = new TextDecoder();
143
+ let buffer = "";
144
+ let inputTokens = 0;
145
+ let outputTokens = 0;
146
+ const textPartId = "text-0";
147
+ let started = false;
148
+ const stream = new ReadableStream({
149
+ async pull(controller) {
150
+ try {
151
+ const { done, value } = await reader.read();
152
+ if (done) {
153
+ if (started) {
154
+ controller.enqueue({
155
+ type: "text-end",
156
+ id: textPartId
157
+ });
158
+ }
159
+ controller.enqueue({
160
+ type: "finish",
161
+ finishReason: self.mapFinishReason("stop"),
162
+ usage: self.buildUsage(inputTokens, outputTokens)
163
+ });
164
+ controller.close();
165
+ return;
166
+ }
167
+ buffer += decoder.decode(value, { stream: true });
168
+ const lines = buffer.split("\n");
169
+ buffer = lines.pop() || "";
170
+ for (const line of lines) {
171
+ if (line.trim() === "") continue;
172
+ if (!line.startsWith("data: ")) continue;
173
+ const data = line.slice(6);
174
+ if (data === "[DONE]") {
175
+ if (started) {
176
+ controller.enqueue({
177
+ type: "text-end",
178
+ id: textPartId
179
+ });
180
+ }
181
+ controller.enqueue({
182
+ type: "finish",
183
+ finishReason: self.mapFinishReason("stop"),
184
+ usage: self.buildUsage(inputTokens, outputTokens)
185
+ });
186
+ controller.close();
187
+ return;
188
+ }
189
+ try {
190
+ const chunk = JSON.parse(data);
191
+ if (chunk.delta) {
192
+ if (!started) {
193
+ started = true;
194
+ controller.enqueue({
195
+ type: "text-start",
196
+ id: textPartId
197
+ });
198
+ }
199
+ outputTokens += Math.ceil(chunk.delta.length / 4);
200
+ controller.enqueue({
201
+ type: "text-delta",
202
+ id: textPartId,
203
+ delta: chunk.delta
204
+ });
205
+ }
206
+ if (chunk.finish_reason) {
207
+ if (started) {
208
+ controller.enqueue({
209
+ type: "text-end",
210
+ id: textPartId
211
+ });
212
+ }
213
+ controller.enqueue({
214
+ type: "finish",
215
+ finishReason: self.mapFinishReason(chunk.finish_reason),
216
+ usage: self.buildUsage(inputTokens, outputTokens)
217
+ });
218
+ controller.close();
219
+ return;
220
+ }
221
+ } catch {
222
+ }
223
+ }
224
+ } catch (error) {
225
+ controller.error(error);
226
+ }
227
+ },
228
+ cancel() {
229
+ reader.cancel();
230
+ }
231
+ });
232
+ return {
233
+ stream
234
+ };
235
+ }
236
+ };
237
+
238
+ // src/cencori-provider.ts
239
+ function createCencori(options = {}) {
240
+ const baseUrl = options.baseUrl ?? "https://cencori.com";
241
+ const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
242
+ if (!apiKey) {
243
+ throw new Error("Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.");
244
+ }
245
+ const createModel = (modelId, settings = {}) => {
246
+ return new CencoriChatLanguageModel(modelId, {
247
+ apiKey,
248
+ baseUrl,
249
+ headers: options.headers,
250
+ ...settings
251
+ });
252
+ };
253
+ const provider = function(modelId, settings) {
254
+ return createModel(modelId, settings);
255
+ };
256
+ provider.chat = createModel;
257
+ return provider;
258
+ }
259
+ var cencori = function(modelId, settings) {
260
+ const apiKey = process.env.CENCORI_API_KEY;
261
+ if (!apiKey) {
262
+ throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: "..." }) instead.');
263
+ }
264
+ return new CencoriChatLanguageModel(modelId, {
265
+ apiKey,
266
+ baseUrl: "https://cencori.com",
267
+ ...settings
268
+ });
269
+ };
270
+ cencori.chat = cencori;
271
+ export {
272
+ CencoriChatLanguageModel,
273
+ cencori,
274
+ createCencori
275
+ };
276
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/cencori-chat-model.ts","../src/cencori-provider.ts"],"sourcesContent":["/**\n * Cencori Chat Language Model\n * \n * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)\n */\n\nimport type {\n LanguageModelV3,\n LanguageModelV3CallOptions,\n LanguageModelV3GenerateResult,\n LanguageModelV3StreamResult,\n LanguageModelV3StreamPart,\n LanguageModelV3Content,\n LanguageModelV3Usage,\n LanguageModelV3FinishReason,\n SharedV3Warning,\n} from '@ai-sdk/provider';\n\nexport interface CencoriChatModelSettings {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n userId?: string;\n}\n\ninterface CencoriMessage {\n role: 'system' | 'user' | 'assistant';\n content: string;\n}\n\ninterface CencoriResponse {\n content: string;\n model: string;\n provider: string;\n usage: {\n prompt_tokens: number;\n completion_tokens: number;\n total_tokens: number;\n };\n cost_usd: number;\n finish_reason?: string;\n}\n\ninterface CencoriStreamChunk {\n delta: string;\n finish_reason?: string;\n}\n\nexport class CencoriChatLanguageModel implements LanguageModelV3 {\n readonly specificationVersion = 'v3' as const;\n readonly provider = 'cencori';\n\n readonly modelId: string;\n readonly supportedUrls: Record<string, RegExp[]> = {};\n private readonly settings: CencoriChatModelSettings;\n\n constructor(modelId: string, settings: CencoriChatModelSettings) {\n this.modelId = modelId;\n this.settings = settings;\n }\n\n private getHeaders(): Record<string, string> {\n return {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.settings.apiKey,\n ...this.settings.headers,\n };\n }\n\n private convertMessages(options: LanguageModelV3CallOptions): CencoriMessage[] {\n const messages: CencoriMessage[] = [];\n\n // In V3, options.prompt is directly an array of LanguageModelV3Message\n const promptMessages = options.prompt;\n\n if (!promptMessages || !Array.isArray(promptMessages)) {\n return messages;\n }\n\n for (const msg of promptMessages) {\n let content = '';\n\n if (msg.role === 'system') {\n // System messages have content as string directly\n content = msg.content as string;\n } else if (msg.role === 'user' || msg.role === 'assistant') {\n // User and assistant messages have content as array of parts\n const msgContent = msg.content;\n if (Array.isArray(msgContent)) {\n content = msgContent\n .filter((part: { type: string }) => part.type === 'text')\n .map((part: { type: string; text?: string }) => part.text || '')\n .join('');\n } else if (typeof msgContent === 'string') {\n content = msgContent;\n }\n }\n\n if (content && (msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant')) {\n messages.push({\n role: msg.role as 'system' | 'user' | 'assistant',\n content,\n });\n }\n }\n\n return messages;\n }\n\n private mapFinishReason(reason?: string): LanguageModelV3FinishReason {\n let unified: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';\n\n switch (reason) {\n case 'stop':\n case 'end_turn':\n unified = 'stop';\n break;\n case 'length':\n case 'max_tokens':\n unified = 'length';\n break;\n case 'content_filter':\n unified = 'content-filter';\n break;\n case 'tool_calls':\n case 'tool-calls':\n unified = 'tool-calls';\n break;\n case 'error':\n unified = 'error';\n break;\n default:\n unified = 'stop';\n }\n\n return { unified, raw: reason };\n }\n\n private buildUsage(inputTokens: number, outputTokens: number): LanguageModelV3Usage {\n return {\n inputTokens: {\n total: inputTokens,\n noCache: inputTokens,\n cacheRead: undefined,\n cacheWrite: undefined,\n },\n outputTokens: {\n total: outputTokens,\n text: outputTokens,\n reasoning: undefined,\n },\n };\n }\n\n async doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult> {\n const messages = this.convertMessages(options);\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: false,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const data = await response.json() as CencoriResponse;\n\n const content: LanguageModelV3Content[] = [{\n type: 'text',\n text: data.content,\n providerMetadata: undefined,\n }];\n\n const warnings: SharedV3Warning[] = [];\n\n return {\n content,\n finishReason: this.mapFinishReason(data.finish_reason),\n usage: this.buildUsage(data.usage.prompt_tokens, data.usage.completion_tokens),\n warnings,\n };\n }\n\n async doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult> {\n const messages = this.convertMessages(options);\n const self = this;\n\n const response = await fetch(`${this.settings.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: this.getHeaders(),\n body: JSON.stringify({\n messages,\n model: this.modelId,\n temperature: options.temperature,\n maxTokens: options.maxOutputTokens,\n stream: true,\n userId: this.settings.userId,\n }),\n signal: options.abortSignal,\n });\n\n if (!response.ok) {\n const error = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${error.error || response.statusText}`);\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let inputTokens = 0;\n let outputTokens = 0;\n const textPartId = 'text-0';\n let started = false;\n\n const stream = new ReadableStream<LanguageModelV3StreamPart>({\n async pull(controller) {\n try {\n const { done, value } = await reader.read();\n\n if (done) {\n // End text block and finish\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason('stop'),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as CencoriStreamChunk;\n\n if (chunk.delta) {\n // Start text if not started\n if (!started) {\n started = true;\n controller.enqueue({\n type: 'text-start',\n id: textPartId,\n });\n }\n\n outputTokens += Math.ceil(chunk.delta.length / 4); // Rough estimate\n controller.enqueue({\n type: 'text-delta',\n id: textPartId,\n delta: chunk.delta,\n });\n }\n\n if (chunk.finish_reason) {\n if (started) {\n controller.enqueue({\n type: 'text-end',\n id: textPartId,\n });\n }\n controller.enqueue({\n type: 'finish',\n finishReason: self.mapFinishReason(chunk.finish_reason),\n usage: self.buildUsage(inputTokens, outputTokens),\n });\n controller.close();\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n } catch (error) {\n controller.error(error);\n }\n },\n cancel() {\n reader.cancel();\n },\n });\n\n return {\n stream,\n };\n }\n}\n","/**\n * Cencori AI Provider for Vercel AI SDK\n * \n * Use Cencori with streamText(), generateText(), and useChat()\n */\n\nimport { CencoriChatLanguageModel } from './cencori-chat-model';\nimport type { CencoriProviderSettings, CencoriChatSettings } from './types';\n\nexport interface CencoriProvider {\n /**\n * Create a Cencori chat model for use with Vercel AI SDK\n * \n * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')\n * @param settings - Optional model-specific settings\n * @returns A LanguageModelV1 compatible model\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;\n\n /**\n * Create a chat model (alias for the provider function)\n */\n chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;\n}\n\n/**\n * Create a Cencori provider instance\n * \n * @param options - Provider configuration options\n * @returns A Cencori provider\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk';\n * \n * const cencori = createCencori({\n * apiKey: process.env.CENCORI_API_KEY\n * });\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function createCencori(options: CencoriProviderSettings = {}): CencoriProvider {\n const baseUrl = options.baseUrl ?? 'https://cencori.com';\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n\n if (!apiKey) {\n throw new Error('Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.');\n }\n\n const createModel = (modelId: string, settings: CencoriChatSettings = {}) => {\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl,\n headers: options.headers,\n ...settings,\n });\n };\n\n const provider = function (modelId: string, settings?: CencoriChatSettings) {\n return createModel(modelId, settings);\n } as CencoriProvider;\n\n provider.chat = createModel;\n\n return provider;\n}\n\n/**\n * Default Cencori provider instance\n * Uses CENCORI_API_KEY environment variable (lazy initialization)\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk';\n * import { streamText } from 'ai';\n * \n * const result = await streamText({\n * model: cencori('gemini-2.5-flash'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport const cencori: CencoriProvider = function (modelId: string, settings?: CencoriChatSettings) {\n const apiKey = process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error('CENCORI_API_KEY environment variable is required. Set it or use createCencori({ apiKey: \"...\" }) instead.');\n }\n return new CencoriChatLanguageModel(modelId, {\n apiKey,\n baseUrl: 'https://cencori.com',\n ...settings,\n });\n} as CencoriProvider;\n\ncencori.chat = cencori;\n"],"mappings":";AAgDO,IAAM,2BAAN,MAA0D;AAAA,EAQ7D,YAAY,SAAiB,UAAoC;AAPjE,SAAS,uBAAuB;AAChC,SAAS,WAAW;AAGpB,SAAS,gBAA0C,CAAC;AAIhD,SAAK,UAAU;AACf,SAAK,WAAW;AAAA,EACpB;AAAA,EAEQ,aAAqC;AACzC,WAAO;AAAA,MACH,gBAAgB;AAAA,MAChB,mBAAmB,KAAK,SAAS;AAAA,MACjC,GAAG,KAAK,SAAS;AAAA,IACrB;AAAA,EACJ;AAAA,EAEQ,gBAAgB,SAAuD;AAC3E,UAAM,WAA6B,CAAC;AAGpC,UAAM,iBAAiB,QAAQ;AAE/B,QAAI,CAAC,kBAAkB,CAAC,MAAM,QAAQ,cAAc,GAAG;AACnD,aAAO;AAAA,IACX;AAEA,eAAW,OAAO,gBAAgB;AAC9B,UAAI,UAAU;AAEd,UAAI,IAAI,SAAS,UAAU;AAEvB,kBAAU,IAAI;AAAA,MAClB,WAAW,IAAI,SAAS,UAAU,IAAI,SAAS,aAAa;AAExD,cAAM,aAAa,IAAI;AACvB,YAAI,MAAM,QAAQ,UAAU,GAAG;AAC3B,oBAAU,WACL,OAAO,CAAC,SAA2B,KAAK,SAAS,MAAM,EACvD,IAAI,CAAC,SAA0C,KAAK,QAAQ,EAAE,EAC9D,KAAK,EAAE;AAAA,QAChB,WAAW,OAAO,eAAe,UAAU;AACvC,oBAAU;AAAA,QACd;AAAA,MACJ;AAEA,UAAI,YAAY,IAAI,SAAS,YAAY,IAAI,SAAS,UAAU,IAAI,SAAS,cAAc;AACvF,iBAAS,KAAK;AAAA,UACV,MAAM,IAAI;AAAA,UACV;AAAA,QACJ,CAAC;AAAA,MACL;AAAA,IACJ;AAEA,WAAO;AAAA,EACX;AAAA,EAEQ,gBAAgB,QAA8C;AAClE,QAAI;AAEJ,YAAQ,QAAQ;AAAA,MACZ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AAAA,MACL,KAAK;AACD,kBAAU;AACV;AAAA,MACJ,KAAK;AACD,kBAAU;AACV;AAAA,MACJ;AACI,kBAAU;AAAA,IAClB;AAEA,WAAO,EAAE,SAAS,KAAK,OAAO;AAAA,EAClC;AAAA,EAEQ,WAAW,aAAqB,cAA4C;AAChF,WAAO;AAAA,MACH,aAAa;AAAA,QACT,OAAO;AAAA,QACP,SAAS;AAAA,QACT,WAAW;AAAA,QACX,YAAY;AAAA,MAChB;AAAA,MACA,cAAc;AAAA,QACV,OAAO;AAAA,QACP,MAAM;AAAA,QACN,WAAW;AAAA,MACf;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,WAAW,SAA6E;AAC1F,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAE7C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,UAAM,UAAoC,CAAC;AAAA,MACvC,MAAM;AAAA,MACN,MAAM,KAAK;AAAA,MACX,kBAAkB;AAAA,IACtB,CAAC;AAED,UAAM,WAA8B,CAAC;AAErC,WAAO;AAAA,MACH;AAAA,MACA,cAAc,KAAK,gBAAgB,KAAK,aAAa;AAAA,MACrD,OAAO,KAAK,WAAW,KAAK,MAAM,eAAe,KAAK,MAAM,iBAAiB;AAAA,MAC7E;AAAA,IACJ;AAAA,EACJ;AAAA,EAEA,MAAM,SAAS,SAA2E;AACtF,UAAM,WAAW,KAAK,gBAAgB,OAAO;AAC7C,UAAM,OAAO;AAEb,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,SAAS,OAAO,gBAAgB;AAAA,MACjE,QAAQ;AAAA,MACR,SAAS,KAAK,WAAW;AAAA,MACzB,MAAM,KAAK,UAAU;AAAA,QACjB;AAAA,QACA,OAAO,KAAK;AAAA,QACZ,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,KAAK,SAAS;AAAA,MAC1B,CAAC;AAAA,MACD,QAAQ,QAAQ;AAAA,IACpB,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,QAAQ,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAC5E,YAAM,IAAI,MAAM,sBAAsB,MAAM,SAAS,SAAS,UAAU,EAAE;AAAA,IAC9E;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,cAAc;AAClB,QAAI,eAAe;AACnB,UAAM,aAAa;AACnB,QAAI,UAAU;AAEd,UAAM,SAAS,IAAI,eAA0C;AAAA,MACzD,MAAM,KAAK,YAAY;AACnB,YAAI;AACA,gBAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,cAAI,MAAM;AAEN,gBAAI,SAAS;AACT,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,IAAI;AAAA,cACR,CAAC;AAAA,YACL;AACA,uBAAW,QAAQ;AAAA,cACf,MAAM;AAAA,cACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,cACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,YACpD,CAAC;AACD,uBAAW,MAAM;AACjB;AAAA,UACJ;AAEA,oBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,gBAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,mBAAS,MAAM,IAAI,KAAK;AAExB,qBAAW,QAAQ,OAAO;AACtB,gBAAI,KAAK,KAAK,MAAM,GAAI;AACxB,gBAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,kBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,gBAAI,SAAS,UAAU;AACnB,kBAAI,SAAS;AACT,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,gBACR,CAAC;AAAA,cACL;AACA,yBAAW,QAAQ;AAAA,gBACf,MAAM;AAAA,gBACN,cAAc,KAAK,gBAAgB,MAAM;AAAA,gBACzC,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,cACpD,CAAC;AACD,yBAAW,MAAM;AACjB;AAAA,YACJ;AAEA,gBAAI;AACA,oBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,kBAAI,MAAM,OAAO;AAEb,oBAAI,CAAC,SAAS;AACV,4BAAU;AACV,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AAEA,gCAAgB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAChD,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,IAAI;AAAA,kBACJ,OAAO,MAAM;AAAA,gBACjB,CAAC;AAAA,cACL;AAEA,kBAAI,MAAM,eAAe;AACrB,oBAAI,SAAS;AACT,6BAAW,QAAQ;AAAA,oBACf,MAAM;AAAA,oBACN,IAAI;AAAA,kBACR,CAAC;AAAA,gBACL;AACA,2BAAW,QAAQ;AAAA,kBACf,MAAM;AAAA,kBACN,cAAc,KAAK,gBAAgB,MAAM,aAAa;AAAA,kBACtD,OAAO,KAAK,WAAW,aAAa,YAAY;AAAA,gBACpD,CAAC;AACD,2BAAW,MAAM;AACjB;AAAA,cACJ;AAAA,YACJ,QAAQ;AAAA,YAER;AAAA,UACJ;AAAA,QACJ,SAAS,OAAO;AACZ,qBAAW,MAAM,KAAK;AAAA,QAC1B;AAAA,MACJ;AAAA,MACA,SAAS;AACL,eAAO,OAAO;AAAA,MAClB;AAAA,IACJ,CAAC;AAED,WAAO;AAAA,MACH;AAAA,IACJ;AAAA,EACJ;AACJ;;;ACrRO,SAAS,cAAc,UAAmC,CAAC,GAAoB;AAClF,QAAM,UAAU,QAAQ,WAAW;AACnC,QAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAE7C,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,sGAAsG;AAAA,EAC1H;AAEA,QAAM,cAAc,CAAC,SAAiB,WAAgC,CAAC,MAAM;AACzE,WAAO,IAAI,yBAAyB,SAAS;AAAA,MACzC;AAAA,MACA;AAAA,MACA,SAAS,QAAQ;AAAA,MACjB,GAAG;AAAA,IACP,CAAC;AAAA,EACL;AAEA,QAAM,WAAW,SAAU,SAAiB,UAAgC;AACxE,WAAO,YAAY,SAAS,QAAQ;AAAA,EACxC;AAEA,WAAS,OAAO;AAEhB,SAAO;AACX;AAeO,IAAM,UAA2B,SAAU,SAAiB,UAAgC;AAC/F,QAAM,SAAS,QAAQ,IAAI;AAC3B,MAAI,CAAC,QAAQ;AACT,UAAM,IAAI,MAAM,2GAA2G;AAAA,EAC/H;AACA,SAAO,IAAI,yBAAyB,SAAS;AAAA,IACzC;AAAA,IACA,SAAS;AAAA,IACT,GAAG;AAAA,EACP,CAAC;AACL;AAEA,QAAQ,OAAO;","names":[]}
package/package.json ADDED
@@ -0,0 +1,60 @@
1
+ {
2
+ "$schema": "https://json.schemastore.org/package.json",
3
+ "name": "@cencori/ai-sdk",
4
+ "version": "0.2.1",
5
+ "description": "Cencori AI SDK - The infrastructure layer for AI applications. Works with Vercel AI SDK, TanStack AI, and more.",
6
+ "main": "dist/index.js",
7
+ "module": "dist/index.mjs",
8
+ "types": "dist/index.d.ts",
9
+ "exports": {
10
+ ".": {
11
+ "types": "./dist/index.d.ts",
12
+ "import": "./dist/index.mjs",
13
+ "require": "./dist/index.js"
14
+ }
15
+ },
16
+ "files": [
17
+ "dist",
18
+ "README.md"
19
+ ],
20
+ "keywords": [
21
+ "ai",
22
+ "cencori",
23
+ "ai-sdk",
24
+ "vercel",
25
+ "tanstack",
26
+ "provider",
27
+ "streaming",
28
+ "llm",
29
+ "openai",
30
+ "anthropic",
31
+ "gemini",
32
+ "infrastructure",
33
+ "security",
34
+ "observability"
35
+ ],
36
+ "author": "FohnAI",
37
+ "license": "MIT",
38
+ "repository": {
39
+ "type": "git",
40
+ "url": "git+https://github.com/cencori/cencori.git"
41
+ },
42
+ "homepage": "https://cencori.com",
43
+ "bugs": {
44
+ "url": "https://github.com/cencori/cencori/issues"
45
+ },
46
+ "scripts": {
47
+ "build": "tsup",
48
+ "dev": "tsup --watch",
49
+ "prepublishOnly": "npm run build"
50
+ },
51
+ "peerDependencies": {
52
+ "@ai-sdk/provider": ">=1.0.0"
53
+ },
54
+ "devDependencies": {
55
+ "@ai-sdk/provider": "^3.0.0",
56
+ "@ai-sdk/provider-utils": "^3.0.0",
57
+ "tsup": "^8.0.0",
58
+ "typescript": "^5.3.0"
59
+ }
60
+ }