@cencori/ai-sdk 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,233 @@
1
+ // src/tanstack/index.ts
2
+ var CENCORI_CHAT_MODELS = [
3
+ // OpenAI
4
+ "gpt-4o",
5
+ "gpt-4o-mini",
6
+ "o1",
7
+ "o1-mini",
8
+ // Anthropic
9
+ "claude-3-5-sonnet",
10
+ "claude-3-opus",
11
+ "claude-3-haiku",
12
+ // Google
13
+ "gemini-2.5-flash",
14
+ "gemini-2.0-flash",
15
+ "gemini-3-pro",
16
+ // xAI
17
+ "grok-4",
18
+ "grok-3",
19
+ // Mistral
20
+ "mistral-large",
21
+ "codestral",
22
+ // DeepSeek
23
+ "deepseek-v3.2",
24
+ "deepseek-reasoner",
25
+ // Groq
26
+ "llama-3-70b",
27
+ "mixtral-8x7b"
28
+ ];
29
+ var CencoriTextAdapter = class {
30
+ constructor(model, options = {}) {
31
+ this.kind = "text";
32
+ this.name = "cencori";
33
+ this["~types"] = {};
34
+ this.model = model;
35
+ this.providerOptions = options;
36
+ const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
37
+ if (!apiKey) {
38
+ throw new Error(
39
+ "Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable."
40
+ );
41
+ }
42
+ this.config = {
43
+ apiKey,
44
+ baseUrl: options.baseUrl ?? "https://cencori.com",
45
+ headers: options.headers
46
+ };
47
+ }
48
+ /**
49
+ * Stream chat completions from the model
50
+ */
51
+ async *chatStream(options) {
52
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
53
+ method: "POST",
54
+ headers: {
55
+ "Content-Type": "application/json",
56
+ "CENCORI_API_KEY": this.config.apiKey,
57
+ ...this.config.headers
58
+ },
59
+ body: JSON.stringify({
60
+ model: this.model,
61
+ messages: options.messages,
62
+ temperature: options.temperature,
63
+ maxTokens: options.maxTokens,
64
+ stream: true,
65
+ userId: options.modelOptions?.userId
66
+ }),
67
+ signal: options.abortController?.signal
68
+ });
69
+ if (!response.ok) {
70
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
71
+ const errorChunk = {
72
+ type: "error",
73
+ id: this.generateId(),
74
+ model: this.model,
75
+ timestamp: Date.now(),
76
+ error: {
77
+ message: errorData.error || response.statusText,
78
+ code: String(response.status)
79
+ }
80
+ };
81
+ yield errorChunk;
82
+ return;
83
+ }
84
+ const reader = response.body?.getReader();
85
+ if (!reader) {
86
+ throw new Error("Response body is null");
87
+ }
88
+ const decoder = new TextDecoder();
89
+ let buffer = "";
90
+ let content = "";
91
+ let promptTokens = 0;
92
+ let completionTokens = 0;
93
+ try {
94
+ while (true) {
95
+ const { done, value } = await reader.read();
96
+ if (done) {
97
+ const doneChunk = {
98
+ type: "done",
99
+ id: this.generateId(),
100
+ model: this.model,
101
+ timestamp: Date.now(),
102
+ finishReason: "stop",
103
+ usage: {
104
+ promptTokens,
105
+ completionTokens,
106
+ totalTokens: promptTokens + completionTokens
107
+ }
108
+ };
109
+ yield doneChunk;
110
+ return;
111
+ }
112
+ buffer += decoder.decode(value, { stream: true });
113
+ const lines = buffer.split("\n");
114
+ buffer = lines.pop() || "";
115
+ for (const line of lines) {
116
+ if (line.trim() === "") continue;
117
+ if (!line.startsWith("data: ")) continue;
118
+ const data = line.slice(6);
119
+ if (data === "[DONE]") {
120
+ const doneChunk = {
121
+ type: "done",
122
+ id: this.generateId(),
123
+ model: this.model,
124
+ timestamp: Date.now(),
125
+ finishReason: "stop",
126
+ usage: {
127
+ promptTokens,
128
+ completionTokens,
129
+ totalTokens: promptTokens + completionTokens
130
+ }
131
+ };
132
+ yield doneChunk;
133
+ return;
134
+ }
135
+ try {
136
+ const chunk = JSON.parse(data);
137
+ if (chunk.delta) {
138
+ content += chunk.delta;
139
+ completionTokens += Math.ceil(chunk.delta.length / 4);
140
+ const contentChunk = {
141
+ type: "content",
142
+ id: this.generateId(),
143
+ model: this.model,
144
+ timestamp: Date.now(),
145
+ delta: chunk.delta,
146
+ content,
147
+ role: "assistant"
148
+ };
149
+ yield contentChunk;
150
+ }
151
+ if (chunk.finish_reason) {
152
+ const doneChunk = {
153
+ type: "done",
154
+ id: this.generateId(),
155
+ model: this.model,
156
+ timestamp: Date.now(),
157
+ finishReason: chunk.finish_reason === "stop" ? "stop" : null,
158
+ usage: {
159
+ promptTokens,
160
+ completionTokens,
161
+ totalTokens: promptTokens + completionTokens
162
+ }
163
+ };
164
+ yield doneChunk;
165
+ return;
166
+ }
167
+ } catch {
168
+ }
169
+ }
170
+ }
171
+ } finally {
172
+ reader.releaseLock();
173
+ }
174
+ }
175
+ /**
176
+ * Generate structured output
177
+ */
178
+ async structuredOutput(options) {
179
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
180
+ method: "POST",
181
+ headers: {
182
+ "Content-Type": "application/json",
183
+ "CENCORI_API_KEY": this.config.apiKey,
184
+ ...this.config.headers
185
+ },
186
+ body: JSON.stringify({
187
+ model: this.model,
188
+ messages: options.chatOptions.messages,
189
+ temperature: options.chatOptions.temperature,
190
+ maxTokens: options.chatOptions.maxTokens,
191
+ stream: false,
192
+ responseFormat: {
193
+ type: "json_schema",
194
+ json_schema: {
195
+ name: "structured_output",
196
+ schema: options.outputSchema,
197
+ strict: true
198
+ }
199
+ }
200
+ })
201
+ });
202
+ if (!response.ok) {
203
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
204
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
205
+ }
206
+ const result = await response.json();
207
+ const rawText = result.content;
208
+ try {
209
+ const data = JSON.parse(rawText);
210
+ return { data, rawText };
211
+ } catch {
212
+ throw new Error(`Failed to parse structured output: ${rawText}`);
213
+ }
214
+ }
215
+ generateId() {
216
+ return `cencori-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
217
+ }
218
+ };
219
+ function createCencori(options = {}) {
220
+ return function cencoriProvider(model) {
221
+ return new CencoriTextAdapter(model, options);
222
+ };
223
+ }
224
+ function cencori(model) {
225
+ return new CencoriTextAdapter(model, {});
226
+ }
227
+ export {
228
+ CENCORI_CHAT_MODELS,
229
+ CencoriTextAdapter,
230
+ cencori,
231
+ createCencori
232
+ };
233
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/tanstack/index.ts"],"sourcesContent":["/**\n * Cencori AI SDK - TanStack AI Integration\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk/tanstack';\n * import { chat } from '@tanstack/ai';\n * \n * const result = await chat({\n * adapter: cencori('gpt-4o'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n * \n * @packageDocumentation\n */\n\nimport type {\n TextAdapter,\n TextOptions,\n StreamChunk,\n ContentStreamChunk,\n DoneStreamChunk,\n ErrorStreamChunk,\n Modality,\n DefaultMessageMetadataByModality,\n} from '@tanstack/ai';\n\n// Re-export types for convenience\nexport type {\n TextAdapter,\n TextOptions,\n StreamChunk,\n};\n\n/**\n * Cencori provider options\n */\nexport interface CencoriProviderOptions {\n /** Cencori API key */\n apiKey?: string;\n /** Base URL for Cencori API (defaults to https://cencori.com) */\n baseUrl?: string;\n /** Custom headers */\n headers?: Record<string, string>;\n}\n\n/**\n * Cencori model-specific options\n */\nexport interface CencoriModelOptions {\n /** User ID for attribution */\n userId?: string;\n}\n\n// All models supported through Cencori Gateway\nexport const CENCORI_CHAT_MODELS = [\n // OpenAI\n 'gpt-4o',\n 'gpt-4o-mini',\n 'o1',\n 'o1-mini',\n // Anthropic\n 'claude-3-5-sonnet',\n 'claude-3-opus',\n 'claude-3-haiku',\n // Google\n 'gemini-2.5-flash',\n 'gemini-2.0-flash',\n 'gemini-3-pro',\n // xAI\n 'grok-4',\n 'grok-3',\n // Mistral\n 'mistral-large',\n 'codestral',\n // DeepSeek\n 'deepseek-v3.2',\n 'deepseek-reasoner',\n // Groq\n 'llama-3-70b',\n 'mixtral-8x7b',\n] as const;\n\nexport type CencoriChatModel = (typeof CENCORI_CHAT_MODELS)[number];\n\n/**\n * Cencori adapter for TanStack AI\n */\nclass CencoriTextAdapter implements TextAdapter<\n CencoriChatModel,\n CencoriModelOptions,\n readonly ['text', 'image'],\n DefaultMessageMetadataByModality\n> {\n readonly kind = 'text' as const;\n readonly name = 'cencori';\n readonly model: CencoriChatModel;\n\n '~types': {\n providerOptions: CencoriModelOptions;\n inputModalities: readonly ['text', 'image'];\n messageMetadataByModality: DefaultMessageMetadataByModality;\n } = {} as any;\n\n private config: {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n };\n private providerOptions: CencoriProviderOptions;\n\n constructor(model: CencoriChatModel, options: CencoriProviderOptions = {}) {\n this.model = model;\n this.providerOptions = options;\n\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.'\n );\n }\n\n this.config = {\n apiKey,\n baseUrl: options.baseUrl ?? 'https://cencori.com',\n headers: options.headers,\n };\n }\n\n /**\n * Stream chat completions from the model\n */\n async *chatStream(options: TextOptions<CencoriModelOptions>): AsyncIterable<StreamChunk> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.config.apiKey!,\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: this.model,\n messages: options.messages,\n temperature: options.temperature,\n maxTokens: options.maxTokens,\n stream: true,\n userId: options.modelOptions?.userId,\n }),\n signal: options.abortController?.signal,\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n const errorChunk: ErrorStreamChunk = {\n type: 'error',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n error: {\n message: errorData.error || response.statusText,\n code: String(response.status),\n },\n };\n yield errorChunk;\n return;\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let content = '';\n let promptTokens = 0;\n let completionTokens = 0;\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n // Emit done chunk\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: 'stop',\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: 'stop',\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n\n try {\n const chunk = JSON.parse(data);\n\n if (chunk.delta) {\n content += chunk.delta;\n completionTokens += Math.ceil(chunk.delta.length / 4);\n\n const contentChunk: ContentStreamChunk = {\n type: 'content',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n delta: chunk.delta,\n content: content,\n role: 'assistant',\n };\n yield contentChunk;\n }\n\n if (chunk.finish_reason) {\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: chunk.finish_reason === 'stop' ? 'stop' : null,\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Generate structured output\n */\n async structuredOutput(options: {\n chatOptions: TextOptions<CencoriModelOptions>;\n outputSchema: any;\n }): Promise<{ data: unknown; rawText: string }> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.config.apiKey!,\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: this.model,\n messages: options.chatOptions.messages,\n temperature: options.chatOptions.temperature,\n maxTokens: options.chatOptions.maxTokens,\n stream: false,\n responseFormat: {\n type: 'json_schema',\n json_schema: {\n name: 'structured_output',\n schema: options.outputSchema,\n strict: true,\n },\n },\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const result = await response.json() as { content: string };\n const rawText = result.content;\n\n try {\n const data = JSON.parse(rawText);\n return { data, rawText };\n } catch {\n throw new Error(`Failed to parse structured output: ${rawText}`);\n }\n }\n\n private generateId(): string {\n return `cencori-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n }\n}\n\n/**\n * Create a Cencori adapter for TanStack AI\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk/tanstack';\n * \n * const myProvider = createCencori({ apiKey: 'csk_...' });\n * const adapter = myProvider('gpt-4o');\n */\nexport function createCencori(options: CencoriProviderOptions = {}) {\n return function cencoriProvider<T extends CencoriChatModel>(model: T) {\n return new CencoriTextAdapter(model, options);\n };\n}\n\n/**\n * Default Cencori provider\n * Uses CENCORI_API_KEY environment variable\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk/tanstack';\n * import { chat } from '@tanstack/ai';\n * \n * const result = await chat({\n * adapter: cencori('gpt-4o'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function cencori<T extends CencoriChatModel>(model: T) {\n return new CencoriTextAdapter(model, {});\n}\n\n// Export adapter class for advanced use cases\nexport { CencoriTextAdapter };\n"],"mappings":";AAsDO,IAAM,sBAAsB;AAAA;AAAA,EAE/B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AACJ;AAOA,IAAM,qBAAN,MAKE;AAAA,EAkBE,YAAY,OAAyB,UAAkC,CAAC,GAAG;AAjB3E,SAAS,OAAO;AAChB,SAAS,OAAO;AAGhB,qBAII,CAAC;AAUD,SAAK,QAAQ;AACb,SAAK,kBAAkB;AAEvB,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAC7C,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI;AAAA,QACN;AAAA,MACJ;AAAA,IACJ;AAEA,SAAK,SAAS;AAAA,MACV;AAAA,MACA,SAAS,QAAQ,WAAW;AAAA,MAC5B,SAAS,QAAQ;AAAA,IACrB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,WAAW,SAAuE;AACrF,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,gBAAgB;AAAA,QAChB,mBAAmB,KAAK,OAAO;AAAA,QAC/B,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,KAAK;AAAA,QACZ,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,QAAQ,cAAc;AAAA,MAClC,CAAC;AAAA,MACD,QAAQ,QAAQ,iBAAiB;AAAA,IACrC,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,aAA+B;AAAA,QACjC,MAAM;AAAA,QACN,IAAI,KAAK,WAAW;AAAA,QACpB,OAAO,KAAK;AAAA,QACZ,WAAW,KAAK,IAAI;AAAA,QACpB,OAAO;AAAA,UACH,SAAS,UAAU,SAAS,SAAS;AAAA,UACrC,MAAM,OAAO,SAAS,MAAM;AAAA,QAChC;AAAA,MACJ;AACA,YAAM;AACN;AAAA,IACJ;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,UAAU;AACd,QAAI,eAAe;AACnB,QAAI,mBAAmB;AAEvB,QAAI;AACA,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,YAAI,MAAM;AAEN,gBAAM,YAA6B;AAAA,YAC/B,MAAM;AAAA,YACN,IAAI,KAAK,WAAW;AAAA,YACpB,OAAO,KAAK;AAAA,YACZ,WAAW,KAAK,IAAI;AAAA,YACpB,cAAc;AAAA,YACd,OAAO;AAAA,cACH;AAAA,cACA;AAAA,cACA,aAAa,eAAe;AAAA,YAChC;AAAA,UACJ;AACA,gBAAM;AACN;AAAA,QACJ;AAEA,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,KAAK,KAAK,MAAM,GAAI;AACxB,cAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,gBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,cAAI,SAAS,UAAU;AACnB,kBAAM,YAA6B;AAAA,cAC/B,MAAM;AAAA,cACN,IAAI,KAAK,WAAW;AAAA,cACpB,OAAO,KAAK;AAAA,cACZ,WAAW,KAAK,IAAI;AAAA,cACpB,cAAc;AAAA,cACd,OAAO;AAAA,gBACH;AAAA,gBACA;AAAA,gBACA,aAAa,eAAe;AAAA,cAChC;AAAA,YACJ;AACA,kBAAM;AACN;AAAA,UACJ;AAEA,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,gBAAI,MAAM,OAAO;AACb,yBAAW,MAAM;AACjB,kCAAoB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAEpD,oBAAM,eAAmC;AAAA,gBACrC,MAAM;AAAA,gBACN,IAAI,KAAK,WAAW;AAAA,gBACpB,OAAO,KAAK;AAAA,gBACZ,WAAW,KAAK,IAAI;AAAA,gBACpB,OAAO,MAAM;AAAA,gBACb;AAAA,gBACA,MAAM;AAAA,cACV;AACA,oBAAM;AAAA,YACV;AAEA,gBAAI,MAAM,eAAe;AACrB,oBAAM,YAA6B;AAAA,gBAC/B,MAAM;AAAA,gBACN,IAAI,KAAK,WAAW;AAAA,gBACpB,OAAO,KAAK;AAAA,gBACZ,WAAW,KAAK,IAAI;AAAA,gBACpB,cAAc,MAAM,kBAAkB,SAAS,SAAS;AAAA,gBACxD,OAAO;AAAA,kBACH;AAAA,kBACA;AAAA,kBACA,aAAa,eAAe;AAAA,gBAChC;AAAA,cACJ;AACA,oBAAM;AACN;AAAA,YACJ;AAAA,UACJ,QAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,UAAE;AACE,aAAO,YAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,iBAAiB,SAGyB;AAC5C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,gBAAgB;AAAA,QAChB,mBAAmB,KAAK,OAAO;AAAA,QAC/B,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,KAAK;AAAA,QACZ,UAAU,QAAQ,YAAY;AAAA,QAC9B,aAAa,QAAQ,YAAY;AAAA,QACjC,WAAW,QAAQ,YAAY;AAAA,QAC/B,QAAQ;AAAA,QACR,gBAAgB;AAAA,UACZ,MAAM;AAAA,UACN,aAAa;AAAA,YACT,MAAM;AAAA,YACN,QAAQ,QAAQ;AAAA,YAChB,QAAQ;AAAA,UACZ;AAAA,QACJ;AAAA,MACJ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,SAAS,MAAM,SAAS,KAAK;AACnC,UAAM,UAAU,OAAO;AAEvB,QAAI;AACA,YAAM,OAAO,KAAK,MAAM,OAAO;AAC/B,aAAO,EAAE,MAAM,QAAQ;AAAA,IAC3B,QAAQ;AACJ,YAAM,IAAI,MAAM,sCAAsC,OAAO,EAAE;AAAA,IACnE;AAAA,EACJ;AAAA,EAEQ,aAAqB;AACzB,WAAO,WAAW,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AAAA,EAC3E;AACJ;AAWO,SAAS,cAAc,UAAkC,CAAC,GAAG;AAChE,SAAO,SAAS,gBAA4C,OAAU;AAClE,WAAO,IAAI,mBAAmB,OAAO,OAAO;AAAA,EAChD;AACJ;AAeO,SAAS,QAAoC,OAAU;AAC1D,SAAO,IAAI,mBAAmB,OAAO,CAAC,CAAC;AAC3C;","names":[]}
@@ -0,0 +1,118 @@
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
+
3
+ /**
4
+ * Cencori Chat Language Model
5
+ *
6
+ * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
+ */
8
+
9
+ interface CencoriChatModelSettings {
10
+ apiKey: string;
11
+ baseUrl: string;
12
+ headers?: Record<string, string>;
13
+ userId?: string;
14
+ }
15
+ declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
+ readonly specificationVersion: "v3";
17
+ readonly provider = "cencori";
18
+ readonly modelId: string;
19
+ readonly supportedUrls: Record<string, RegExp[]>;
20
+ private readonly settings;
21
+ constructor(modelId: string, settings: CencoriChatModelSettings);
22
+ private getHeaders;
23
+ private convertMessages;
24
+ private mapFinishReason;
25
+ private buildUsage;
26
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
+ }
29
+
30
+ /**
31
+ * Types for Cencori AI Provider
32
+ */
33
+ interface CencoriProviderSettings {
34
+ /**
35
+ * Cencori API key (csk_ or cpk_ prefix)
36
+ */
37
+ apiKey?: string;
38
+ /**
39
+ * Base URL for the Cencori API
40
+ * @default 'https://cencori.com'
41
+ */
42
+ baseUrl?: string;
43
+ /**
44
+ * Custom headers to include in requests
45
+ */
46
+ headers?: Record<string, string>;
47
+ }
48
+ interface CencoriChatSettings {
49
+ /**
50
+ * Optional user ID for rate limiting and analytics
51
+ */
52
+ userId?: string;
53
+ }
54
+
55
+ /**
56
+ * Cencori AI Provider for Vercel AI SDK
57
+ *
58
+ * Use Cencori with streamText(), generateText(), and useChat()
59
+ */
60
+
61
+ interface CencoriProvider {
62
+ /**
63
+ * Create a Cencori chat model for use with Vercel AI SDK
64
+ *
65
+ * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
+ * @param settings - Optional model-specific settings
67
+ * @returns A LanguageModelV1 compatible model
68
+ *
69
+ * @example
70
+ * import { cencori } from '@cencori/ai-sdk';
71
+ * import { streamText } from 'ai';
72
+ *
73
+ * const result = await streamText({
74
+ * model: cencori('gemini-2.5-flash'),
75
+ * messages: [{ role: 'user', content: 'Hello!' }]
76
+ * });
77
+ */
78
+ (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
+ /**
80
+ * Create a chat model (alias for the provider function)
81
+ */
82
+ chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
+ }
84
+ /**
85
+ * Create a Cencori provider instance
86
+ *
87
+ * @param options - Provider configuration options
88
+ * @returns A Cencori provider
89
+ *
90
+ * @example
91
+ * import { createCencori } from '@cencori/ai-sdk';
92
+ *
93
+ * const cencori = createCencori({
94
+ * apiKey: process.env.CENCORI_API_KEY
95
+ * });
96
+ *
97
+ * const result = await streamText({
98
+ * model: cencori('gemini-2.5-flash'),
99
+ * messages: [{ role: 'user', content: 'Hello!' }]
100
+ * });
101
+ */
102
+ declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
+ /**
104
+ * Default Cencori provider instance
105
+ * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
+ *
107
+ * @example
108
+ * import { cencori } from '@cencori/ai-sdk';
109
+ * import { streamText } from 'ai';
110
+ *
111
+ * const result = await streamText({
112
+ * model: cencori('gemini-2.5-flash'),
113
+ * messages: [{ role: 'user', content: 'Hello!' }]
114
+ * });
115
+ */
116
+ declare const cencori: CencoriProvider;
117
+
118
+ export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };
@@ -0,0 +1,118 @@
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult } from '@ai-sdk/provider';
2
+
3
+ /**
4
+ * Cencori Chat Language Model
5
+ *
6
+ * Implements the Vercel AI SDK's LanguageModelV3 interface (AI SDK v6 compatible)
7
+ */
8
+
9
+ interface CencoriChatModelSettings {
10
+ apiKey: string;
11
+ baseUrl: string;
12
+ headers?: Record<string, string>;
13
+ userId?: string;
14
+ }
15
+ declare class CencoriChatLanguageModel implements LanguageModelV3 {
16
+ readonly specificationVersion: "v3";
17
+ readonly provider = "cencori";
18
+ readonly modelId: string;
19
+ readonly supportedUrls: Record<string, RegExp[]>;
20
+ private readonly settings;
21
+ constructor(modelId: string, settings: CencoriChatModelSettings);
22
+ private getHeaders;
23
+ private convertMessages;
24
+ private mapFinishReason;
25
+ private buildUsage;
26
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
27
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
28
+ }
29
+
30
+ /**
31
+ * Types for Cencori AI Provider
32
+ */
33
+ interface CencoriProviderSettings {
34
+ /**
35
+ * Cencori API key (csk_ or cpk_ prefix)
36
+ */
37
+ apiKey?: string;
38
+ /**
39
+ * Base URL for the Cencori API
40
+ * @default 'https://cencori.com'
41
+ */
42
+ baseUrl?: string;
43
+ /**
44
+ * Custom headers to include in requests
45
+ */
46
+ headers?: Record<string, string>;
47
+ }
48
+ interface CencoriChatSettings {
49
+ /**
50
+ * Optional user ID for rate limiting and analytics
51
+ */
52
+ userId?: string;
53
+ }
54
+
55
+ /**
56
+ * Cencori AI Provider for Vercel AI SDK
57
+ *
58
+ * Use Cencori with streamText(), generateText(), and useChat()
59
+ */
60
+
61
+ interface CencoriProvider {
62
+ /**
63
+ * Create a Cencori chat model for use with Vercel AI SDK
64
+ *
65
+ * @param modelId - The model ID (e.g., 'gemini-2.5-flash', 'gpt-4o', 'claude-3-opus')
66
+ * @param settings - Optional model-specific settings
67
+ * @returns A LanguageModelV1 compatible model
68
+ *
69
+ * @example
70
+ * import { cencori } from '@cencori/ai-sdk';
71
+ * import { streamText } from 'ai';
72
+ *
73
+ * const result = await streamText({
74
+ * model: cencori('gemini-2.5-flash'),
75
+ * messages: [{ role: 'user', content: 'Hello!' }]
76
+ * });
77
+ */
78
+ (modelId: string, settings?: CencoriChatSettings): CencoriChatLanguageModel;
79
+ /**
80
+ * Create a chat model (alias for the provider function)
81
+ */
82
+ chat: (modelId: string, settings?: CencoriChatSettings) => CencoriChatLanguageModel;
83
+ }
84
+ /**
85
+ * Create a Cencori provider instance
86
+ *
87
+ * @param options - Provider configuration options
88
+ * @returns A Cencori provider
89
+ *
90
+ * @example
91
+ * import { createCencori } from '@cencori/ai-sdk';
92
+ *
93
+ * const cencori = createCencori({
94
+ * apiKey: process.env.CENCORI_API_KEY
95
+ * });
96
+ *
97
+ * const result = await streamText({
98
+ * model: cencori('gemini-2.5-flash'),
99
+ * messages: [{ role: 'user', content: 'Hello!' }]
100
+ * });
101
+ */
102
+ declare function createCencori(options?: CencoriProviderSettings): CencoriProvider;
103
+ /**
104
+ * Default Cencori provider instance
105
+ * Uses CENCORI_API_KEY environment variable (lazy initialization)
106
+ *
107
+ * @example
108
+ * import { cencori } from '@cencori/ai-sdk';
109
+ * import { streamText } from 'ai';
110
+ *
111
+ * const result = await streamText({
112
+ * model: cencori('gemini-2.5-flash'),
113
+ * messages: [{ role: 'user', content: 'Hello!' }]
114
+ * });
115
+ */
116
+ declare const cencori: CencoriProvider;
117
+
118
+ export { CencoriChatLanguageModel, type CencoriChatSettings, type CencoriProvider, type CencoriProviderSettings, cencori, createCencori };