@cencori/ai-sdk 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,95 @@
1
+ import { TextAdapter, DefaultMessageMetadataByModality, TextOptions, StreamChunk } from '@tanstack/ai';
2
+ export { StreamChunk, TextAdapter, TextOptions } from '@tanstack/ai';
3
+
4
+ /**
5
+ * Cencori AI SDK - TanStack AI Integration
6
+ *
7
+ * @example
8
+ * import { cencori } from '@cencori/ai-sdk/tanstack';
9
+ * import { chat } from '@tanstack/ai';
10
+ *
11
+ * const result = await chat({
12
+ * adapter: cencori('gpt-4o'),
13
+ * messages: [{ role: 'user', content: 'Hello!' }]
14
+ * });
15
+ *
16
+ * @packageDocumentation
17
+ */
18
+
19
+ /**
20
+ * Cencori provider options
21
+ */
22
+ interface CencoriProviderOptions {
23
+ /** Cencori API key */
24
+ apiKey?: string;
25
+ /** Base URL for Cencori API (defaults to https://cencori.com) */
26
+ baseUrl?: string;
27
+ /** Custom headers */
28
+ headers?: Record<string, string>;
29
+ }
30
+ /**
31
+ * Cencori model-specific options
32
+ */
33
+ interface CencoriModelOptions {
34
+ /** User ID for attribution */
35
+ userId?: string;
36
+ }
37
+ declare const CENCORI_CHAT_MODELS: readonly ["gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "claude-3-5-sonnet", "claude-3-opus", "claude-3-haiku", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-3-pro", "grok-4", "grok-3", "mistral-large", "codestral", "deepseek-v3.2", "deepseek-reasoner", "llama-3-70b", "mixtral-8x7b"];
38
+ type CencoriChatModel = (typeof CENCORI_CHAT_MODELS)[number];
39
+ /**
40
+ * Cencori adapter for TanStack AI
41
+ */
42
+ declare class CencoriTextAdapter implements TextAdapter<CencoriChatModel, CencoriModelOptions, readonly ['text', 'image'], DefaultMessageMetadataByModality> {
43
+ readonly kind: "text";
44
+ readonly name = "cencori";
45
+ readonly model: CencoriChatModel;
46
+ '~types': {
47
+ providerOptions: CencoriModelOptions;
48
+ inputModalities: readonly ['text', 'image'];
49
+ messageMetadataByModality: DefaultMessageMetadataByModality;
50
+ };
51
+ private config;
52
+ private providerOptions;
53
+ constructor(model: CencoriChatModel, options?: CencoriProviderOptions);
54
+ /**
55
+ * Stream chat completions from the model
56
+ */
57
+ chatStream(options: TextOptions<CencoriModelOptions>): AsyncIterable<StreamChunk>;
58
+ /**
59
+ * Generate structured output
60
+ */
61
+ structuredOutput(options: {
62
+ chatOptions: TextOptions<CencoriModelOptions>;
63
+ outputSchema: any;
64
+ }): Promise<{
65
+ data: unknown;
66
+ rawText: string;
67
+ }>;
68
+ private generateId;
69
+ }
70
+ /**
71
+ * Create a Cencori adapter for TanStack AI
72
+ *
73
+ * @example
74
+ * import { createCencori } from '@cencori/ai-sdk/tanstack';
75
+ *
76
+ * const myProvider = createCencori({ apiKey: 'csk_...' });
77
+ * const adapter = myProvider('gpt-4o');
78
+ */
79
+ declare function createCencori(options?: CencoriProviderOptions): <T extends CencoriChatModel>(model: T) => CencoriTextAdapter;
80
+ /**
81
+ * Default Cencori provider
82
+ * Uses CENCORI_API_KEY environment variable
83
+ *
84
+ * @example
85
+ * import { cencori } from '@cencori/ai-sdk/tanstack';
86
+ * import { chat } from '@tanstack/ai';
87
+ *
88
+ * const result = await chat({
89
+ * adapter: cencori('gpt-4o'),
90
+ * messages: [{ role: 'user', content: 'Hello!' }]
91
+ * });
92
+ */
93
+ declare function cencori<T extends CencoriChatModel>(model: T): CencoriTextAdapter;
94
+
95
+ export { CENCORI_CHAT_MODELS, type CencoriChatModel, type CencoriModelOptions, type CencoriProviderOptions, CencoriTextAdapter, cencori, createCencori };
@@ -0,0 +1,95 @@
1
+ import { TextAdapter, DefaultMessageMetadataByModality, TextOptions, StreamChunk } from '@tanstack/ai';
2
+ export { StreamChunk, TextAdapter, TextOptions } from '@tanstack/ai';
3
+
4
+ /**
5
+ * Cencori AI SDK - TanStack AI Integration
6
+ *
7
+ * @example
8
+ * import { cencori } from '@cencori/ai-sdk/tanstack';
9
+ * import { chat } from '@tanstack/ai';
10
+ *
11
+ * const result = await chat({
12
+ * adapter: cencori('gpt-4o'),
13
+ * messages: [{ role: 'user', content: 'Hello!' }]
14
+ * });
15
+ *
16
+ * @packageDocumentation
17
+ */
18
+
19
+ /**
20
+ * Cencori provider options
21
+ */
22
+ interface CencoriProviderOptions {
23
+ /** Cencori API key */
24
+ apiKey?: string;
25
+ /** Base URL for Cencori API (defaults to https://cencori.com) */
26
+ baseUrl?: string;
27
+ /** Custom headers */
28
+ headers?: Record<string, string>;
29
+ }
30
+ /**
31
+ * Cencori model-specific options
32
+ */
33
+ interface CencoriModelOptions {
34
+ /** User ID for attribution */
35
+ userId?: string;
36
+ }
37
+ declare const CENCORI_CHAT_MODELS: readonly ["gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "claude-3-5-sonnet", "claude-3-opus", "claude-3-haiku", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-3-pro", "grok-4", "grok-3", "mistral-large", "codestral", "deepseek-v3.2", "deepseek-reasoner", "llama-3-70b", "mixtral-8x7b"];
38
+ type CencoriChatModel = (typeof CENCORI_CHAT_MODELS)[number];
39
+ /**
40
+ * Cencori adapter for TanStack AI
41
+ */
42
+ declare class CencoriTextAdapter implements TextAdapter<CencoriChatModel, CencoriModelOptions, readonly ['text', 'image'], DefaultMessageMetadataByModality> {
43
+ readonly kind: "text";
44
+ readonly name = "cencori";
45
+ readonly model: CencoriChatModel;
46
+ '~types': {
47
+ providerOptions: CencoriModelOptions;
48
+ inputModalities: readonly ['text', 'image'];
49
+ messageMetadataByModality: DefaultMessageMetadataByModality;
50
+ };
51
+ private config;
52
+ private providerOptions;
53
+ constructor(model: CencoriChatModel, options?: CencoriProviderOptions);
54
+ /**
55
+ * Stream chat completions from the model
56
+ */
57
+ chatStream(options: TextOptions<CencoriModelOptions>): AsyncIterable<StreamChunk>;
58
+ /**
59
+ * Generate structured output
60
+ */
61
+ structuredOutput(options: {
62
+ chatOptions: TextOptions<CencoriModelOptions>;
63
+ outputSchema: any;
64
+ }): Promise<{
65
+ data: unknown;
66
+ rawText: string;
67
+ }>;
68
+ private generateId;
69
+ }
70
+ /**
71
+ * Create a Cencori adapter for TanStack AI
72
+ *
73
+ * @example
74
+ * import { createCencori } from '@cencori/ai-sdk/tanstack';
75
+ *
76
+ * const myProvider = createCencori({ apiKey: 'csk_...' });
77
+ * const adapter = myProvider('gpt-4o');
78
+ */
79
+ declare function createCencori(options?: CencoriProviderOptions): <T extends CencoriChatModel>(model: T) => CencoriTextAdapter;
80
+ /**
81
+ * Default Cencori provider
82
+ * Uses CENCORI_API_KEY environment variable
83
+ *
84
+ * @example
85
+ * import { cencori } from '@cencori/ai-sdk/tanstack';
86
+ * import { chat } from '@tanstack/ai';
87
+ *
88
+ * const result = await chat({
89
+ * adapter: cencori('gpt-4o'),
90
+ * messages: [{ role: 'user', content: 'Hello!' }]
91
+ * });
92
+ */
93
+ declare function cencori<T extends CencoriChatModel>(model: T): CencoriTextAdapter;
94
+
95
+ export { CENCORI_CHAT_MODELS, type CencoriChatModel, type CencoriModelOptions, type CencoriProviderOptions, CencoriTextAdapter, cencori, createCencori };
@@ -0,0 +1,261 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/tanstack/index.ts
21
+ var tanstack_exports = {};
22
+ __export(tanstack_exports, {
23
+ CENCORI_CHAT_MODELS: () => CENCORI_CHAT_MODELS,
24
+ CencoriTextAdapter: () => CencoriTextAdapter,
25
+ cencori: () => cencori,
26
+ createCencori: () => createCencori
27
+ });
28
+ module.exports = __toCommonJS(tanstack_exports);
29
+ var CENCORI_CHAT_MODELS = [
30
+ // OpenAI
31
+ "gpt-4o",
32
+ "gpt-4o-mini",
33
+ "o1",
34
+ "o1-mini",
35
+ // Anthropic
36
+ "claude-3-5-sonnet",
37
+ "claude-3-opus",
38
+ "claude-3-haiku",
39
+ // Google
40
+ "gemini-2.5-flash",
41
+ "gemini-2.0-flash",
42
+ "gemini-3-pro",
43
+ // xAI
44
+ "grok-4",
45
+ "grok-3",
46
+ // Mistral
47
+ "mistral-large",
48
+ "codestral",
49
+ // DeepSeek
50
+ "deepseek-v3.2",
51
+ "deepseek-reasoner",
52
+ // Groq
53
+ "llama-3-70b",
54
+ "mixtral-8x7b"
55
+ ];
56
+ var CencoriTextAdapter = class {
57
+ constructor(model, options = {}) {
58
+ this.kind = "text";
59
+ this.name = "cencori";
60
+ this["~types"] = {};
61
+ this.model = model;
62
+ this.providerOptions = options;
63
+ const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;
64
+ if (!apiKey) {
65
+ throw new Error(
66
+ "Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable."
67
+ );
68
+ }
69
+ this.config = {
70
+ apiKey,
71
+ baseUrl: options.baseUrl ?? "https://cencori.com",
72
+ headers: options.headers
73
+ };
74
+ }
75
+ /**
76
+ * Stream chat completions from the model
77
+ */
78
+ async *chatStream(options) {
79
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
80
+ method: "POST",
81
+ headers: {
82
+ "Content-Type": "application/json",
83
+ "CENCORI_API_KEY": this.config.apiKey,
84
+ ...this.config.headers
85
+ },
86
+ body: JSON.stringify({
87
+ model: this.model,
88
+ messages: options.messages,
89
+ temperature: options.temperature,
90
+ maxTokens: options.maxTokens,
91
+ stream: true,
92
+ userId: options.modelOptions?.userId
93
+ }),
94
+ signal: options.abortController?.signal
95
+ });
96
+ if (!response.ok) {
97
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
98
+ const errorChunk = {
99
+ type: "error",
100
+ id: this.generateId(),
101
+ model: this.model,
102
+ timestamp: Date.now(),
103
+ error: {
104
+ message: errorData.error || response.statusText,
105
+ code: String(response.status)
106
+ }
107
+ };
108
+ yield errorChunk;
109
+ return;
110
+ }
111
+ const reader = response.body?.getReader();
112
+ if (!reader) {
113
+ throw new Error("Response body is null");
114
+ }
115
+ const decoder = new TextDecoder();
116
+ let buffer = "";
117
+ let content = "";
118
+ let promptTokens = 0;
119
+ let completionTokens = 0;
120
+ try {
121
+ while (true) {
122
+ const { done, value } = await reader.read();
123
+ if (done) {
124
+ const doneChunk = {
125
+ type: "done",
126
+ id: this.generateId(),
127
+ model: this.model,
128
+ timestamp: Date.now(),
129
+ finishReason: "stop",
130
+ usage: {
131
+ promptTokens,
132
+ completionTokens,
133
+ totalTokens: promptTokens + completionTokens
134
+ }
135
+ };
136
+ yield doneChunk;
137
+ return;
138
+ }
139
+ buffer += decoder.decode(value, { stream: true });
140
+ const lines = buffer.split("\n");
141
+ buffer = lines.pop() || "";
142
+ for (const line of lines) {
143
+ if (line.trim() === "") continue;
144
+ if (!line.startsWith("data: ")) continue;
145
+ const data = line.slice(6);
146
+ if (data === "[DONE]") {
147
+ const doneChunk = {
148
+ type: "done",
149
+ id: this.generateId(),
150
+ model: this.model,
151
+ timestamp: Date.now(),
152
+ finishReason: "stop",
153
+ usage: {
154
+ promptTokens,
155
+ completionTokens,
156
+ totalTokens: promptTokens + completionTokens
157
+ }
158
+ };
159
+ yield doneChunk;
160
+ return;
161
+ }
162
+ try {
163
+ const chunk = JSON.parse(data);
164
+ if (chunk.delta) {
165
+ content += chunk.delta;
166
+ completionTokens += Math.ceil(chunk.delta.length / 4);
167
+ const contentChunk = {
168
+ type: "content",
169
+ id: this.generateId(),
170
+ model: this.model,
171
+ timestamp: Date.now(),
172
+ delta: chunk.delta,
173
+ content,
174
+ role: "assistant"
175
+ };
176
+ yield contentChunk;
177
+ }
178
+ if (chunk.finish_reason) {
179
+ const doneChunk = {
180
+ type: "done",
181
+ id: this.generateId(),
182
+ model: this.model,
183
+ timestamp: Date.now(),
184
+ finishReason: chunk.finish_reason === "stop" ? "stop" : null,
185
+ usage: {
186
+ promptTokens,
187
+ completionTokens,
188
+ totalTokens: promptTokens + completionTokens
189
+ }
190
+ };
191
+ yield doneChunk;
192
+ return;
193
+ }
194
+ } catch {
195
+ }
196
+ }
197
+ }
198
+ } finally {
199
+ reader.releaseLock();
200
+ }
201
+ }
202
+ /**
203
+ * Generate structured output
204
+ */
205
+ async structuredOutput(options) {
206
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
207
+ method: "POST",
208
+ headers: {
209
+ "Content-Type": "application/json",
210
+ "CENCORI_API_KEY": this.config.apiKey,
211
+ ...this.config.headers
212
+ },
213
+ body: JSON.stringify({
214
+ model: this.model,
215
+ messages: options.chatOptions.messages,
216
+ temperature: options.chatOptions.temperature,
217
+ maxTokens: options.chatOptions.maxTokens,
218
+ stream: false,
219
+ responseFormat: {
220
+ type: "json_schema",
221
+ json_schema: {
222
+ name: "structured_output",
223
+ schema: options.outputSchema,
224
+ strict: true
225
+ }
226
+ }
227
+ })
228
+ });
229
+ if (!response.ok) {
230
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
231
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
232
+ }
233
+ const result = await response.json();
234
+ const rawText = result.content;
235
+ try {
236
+ const data = JSON.parse(rawText);
237
+ return { data, rawText };
238
+ } catch {
239
+ throw new Error(`Failed to parse structured output: ${rawText}`);
240
+ }
241
+ }
242
+ generateId() {
243
+ return `cencori-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
244
+ }
245
+ };
246
+ function createCencori(options = {}) {
247
+ return function cencoriProvider(model) {
248
+ return new CencoriTextAdapter(model, options);
249
+ };
250
+ }
251
+ function cencori(model) {
252
+ return new CencoriTextAdapter(model, {});
253
+ }
254
+ // Annotate the CommonJS export names for ESM import in node:
255
+ 0 && (module.exports = {
256
+ CENCORI_CHAT_MODELS,
257
+ CencoriTextAdapter,
258
+ cencori,
259
+ createCencori
260
+ });
261
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/tanstack/index.ts"],"sourcesContent":["/**\n * Cencori AI SDK - TanStack AI Integration\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk/tanstack';\n * import { chat } from '@tanstack/ai';\n * \n * const result = await chat({\n * adapter: cencori('gpt-4o'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n * \n * @packageDocumentation\n */\n\nimport type {\n TextAdapter,\n TextOptions,\n StreamChunk,\n ContentStreamChunk,\n DoneStreamChunk,\n ErrorStreamChunk,\n Modality,\n DefaultMessageMetadataByModality,\n} from '@tanstack/ai';\n\n// Re-export types for convenience\nexport type {\n TextAdapter,\n TextOptions,\n StreamChunk,\n};\n\n/**\n * Cencori provider options\n */\nexport interface CencoriProviderOptions {\n /** Cencori API key */\n apiKey?: string;\n /** Base URL for Cencori API (defaults to https://cencori.com) */\n baseUrl?: string;\n /** Custom headers */\n headers?: Record<string, string>;\n}\n\n/**\n * Cencori model-specific options\n */\nexport interface CencoriModelOptions {\n /** User ID for attribution */\n userId?: string;\n}\n\n// All models supported through Cencori Gateway\nexport const CENCORI_CHAT_MODELS = [\n // OpenAI\n 'gpt-4o',\n 'gpt-4o-mini',\n 'o1',\n 'o1-mini',\n // Anthropic\n 'claude-3-5-sonnet',\n 'claude-3-opus',\n 'claude-3-haiku',\n // Google\n 'gemini-2.5-flash',\n 'gemini-2.0-flash',\n 'gemini-3-pro',\n // xAI\n 'grok-4',\n 'grok-3',\n // Mistral\n 'mistral-large',\n 'codestral',\n // DeepSeek\n 'deepseek-v3.2',\n 'deepseek-reasoner',\n // Groq\n 'llama-3-70b',\n 'mixtral-8x7b',\n] as const;\n\nexport type CencoriChatModel = (typeof CENCORI_CHAT_MODELS)[number];\n\n/**\n * Cencori adapter for TanStack AI\n */\nclass CencoriTextAdapter implements TextAdapter<\n CencoriChatModel,\n CencoriModelOptions,\n readonly ['text', 'image'],\n DefaultMessageMetadataByModality\n> {\n readonly kind = 'text' as const;\n readonly name = 'cencori';\n readonly model: CencoriChatModel;\n\n '~types': {\n providerOptions: CencoriModelOptions;\n inputModalities: readonly ['text', 'image'];\n messageMetadataByModality: DefaultMessageMetadataByModality;\n } = {} as any;\n\n private config: {\n apiKey: string;\n baseUrl: string;\n headers?: Record<string, string>;\n };\n private providerOptions: CencoriProviderOptions;\n\n constructor(model: CencoriChatModel, options: CencoriProviderOptions = {}) {\n this.model = model;\n this.providerOptions = options;\n\n const apiKey = options.apiKey ?? process.env.CENCORI_API_KEY;\n if (!apiKey) {\n throw new Error(\n 'Cencori API key is required. Pass it via options.apiKey or set CENCORI_API_KEY environment variable.'\n );\n }\n\n this.config = {\n apiKey,\n baseUrl: options.baseUrl ?? 'https://cencori.com',\n headers: options.headers,\n };\n }\n\n /**\n * Stream chat completions from the model\n */\n async *chatStream(options: TextOptions<CencoriModelOptions>): AsyncIterable<StreamChunk> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.config.apiKey!,\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: this.model,\n messages: options.messages,\n temperature: options.temperature,\n maxTokens: options.maxTokens,\n stream: true,\n userId: options.modelOptions?.userId,\n }),\n signal: options.abortController?.signal,\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n const errorChunk: ErrorStreamChunk = {\n type: 'error',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n error: {\n message: errorData.error || response.statusText,\n code: String(response.status),\n },\n };\n yield errorChunk;\n return;\n }\n\n const reader = response.body?.getReader();\n if (!reader) {\n throw new Error('Response body is null');\n }\n\n const decoder = new TextDecoder();\n let buffer = '';\n let content = '';\n let promptTokens = 0;\n let completionTokens = 0;\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n // Emit done chunk\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: 'stop',\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6);\n if (data === '[DONE]') {\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: 'stop',\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n\n try {\n const chunk = JSON.parse(data);\n\n if (chunk.delta) {\n content += chunk.delta;\n completionTokens += Math.ceil(chunk.delta.length / 4);\n\n const contentChunk: ContentStreamChunk = {\n type: 'content',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n delta: chunk.delta,\n content: content,\n role: 'assistant',\n };\n yield contentChunk;\n }\n\n if (chunk.finish_reason) {\n const doneChunk: DoneStreamChunk = {\n type: 'done',\n id: this.generateId(),\n model: this.model,\n timestamp: Date.now(),\n finishReason: chunk.finish_reason === 'stop' ? 'stop' : null,\n usage: {\n promptTokens,\n completionTokens,\n totalTokens: promptTokens + completionTokens,\n },\n };\n yield doneChunk;\n return;\n }\n } catch {\n // Skip malformed JSON\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Generate structured output\n */\n async structuredOutput(options: {\n chatOptions: TextOptions<CencoriModelOptions>;\n outputSchema: any;\n }): Promise<{ data: unknown; rawText: string }> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'Content-Type': 'application/json',\n 'CENCORI_API_KEY': this.config.apiKey!,\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: this.model,\n messages: options.chatOptions.messages,\n temperature: options.chatOptions.temperature,\n maxTokens: options.chatOptions.maxTokens,\n stream: false,\n responseFormat: {\n type: 'json_schema',\n json_schema: {\n name: 'structured_output',\n schema: options.outputSchema,\n strict: true,\n },\n },\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const result = await response.json() as { content: string };\n const rawText = result.content;\n\n try {\n const data = JSON.parse(rawText);\n return { data, rawText };\n } catch {\n throw new Error(`Failed to parse structured output: ${rawText}`);\n }\n }\n\n private generateId(): string {\n return `cencori-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;\n }\n}\n\n/**\n * Create a Cencori adapter for TanStack AI\n * \n * @example\n * import { createCencori } from '@cencori/ai-sdk/tanstack';\n * \n * const myProvider = createCencori({ apiKey: 'csk_...' });\n * const adapter = myProvider('gpt-4o');\n */\nexport function createCencori(options: CencoriProviderOptions = {}) {\n return function cencoriProvider<T extends CencoriChatModel>(model: T) {\n return new CencoriTextAdapter(model, options);\n };\n}\n\n/**\n * Default Cencori provider\n * Uses CENCORI_API_KEY environment variable\n * \n * @example\n * import { cencori } from '@cencori/ai-sdk/tanstack';\n * import { chat } from '@tanstack/ai';\n * \n * const result = await chat({\n * adapter: cencori('gpt-4o'),\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\nexport function cencori<T extends CencoriChatModel>(model: T) {\n return new CencoriTextAdapter(model, {});\n}\n\n// Export adapter class for advanced use cases\nexport { CencoriTextAdapter };\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAsDO,IAAM,sBAAsB;AAAA;AAAA,EAE/B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA;AACJ;AAOA,IAAM,qBAAN,MAKE;AAAA,EAkBE,YAAY,OAAyB,UAAkC,CAAC,GAAG;AAjB3E,SAAS,OAAO;AAChB,SAAS,OAAO;AAGhB,qBAII,CAAC;AAUD,SAAK,QAAQ;AACb,SAAK,kBAAkB;AAEvB,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAC7C,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI;AAAA,QACN;AAAA,MACJ;AAAA,IACJ;AAEA,SAAK,SAAS;AAAA,MACV;AAAA,MACA,SAAS,QAAQ,WAAW;AAAA,MAC5B,SAAS,QAAQ;AAAA,IACrB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,WAAW,SAAuE;AACrF,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,gBAAgB;AAAA,QAChB,mBAAmB,KAAK,OAAO;AAAA,QAC/B,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,KAAK;AAAA,QACZ,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,QACR,QAAQ,QAAQ,cAAc;AAAA,MAClC,CAAC;AAAA,MACD,QAAQ,QAAQ,iBAAiB;AAAA,IACrC,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,aAA+B;AAAA,QACjC,MAAM;AAAA,QACN,IAAI,KAAK,WAAW;AAAA,QACpB,OAAO,KAAK;AAAA,QACZ,WAAW,KAAK,IAAI;AAAA,QACpB,OAAO;AAAA,UACH,SAAS,UAAU,SAAS,SAAS;AAAA,UACrC,MAAM,OAAO,SAAS,MAAM;AAAA,QAChC;AAAA,MACJ;AACA,YAAM;AACN;AAAA,IACJ;AAEA,UAAM,SAAS,SAAS,MAAM,UAAU;AACxC,QAAI,CAAC,QAAQ;AACT,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AACb,QAAI,UAAU;AACd,QAAI,eAAe;AACnB,QAAI,mBAAmB;AAEvB,QAAI;AACA,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,YAAI,MAAM;AAEN,gBAAM,YAA6B;AAAA,YAC/B,MAAM;AAAA,YACN,IAAI,KAAK,WAAW;AAAA,YACpB,OAAO,KAAK;AAAA,YACZ,WAAW,KAAK,IAAI;AAAA,YACpB,cAAc;AAAA,YACd,OAAO;AAAA,cACH;AAAA,cACA;AAAA,cACA,aAAa,eAAe;AAAA,YAChC;AAAA,UACJ;AACA,gBAAM;AACN;AAAA,QACJ;AAEA,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,KAAK,KAAK,MAAM,GAAI;AACxB,cAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,gBAAM,OAAO,KAAK,MAAM,CAAC;AACzB,cAAI,SAAS,UAAU;AACnB,kBAAM,YAA6B;AAAA,cAC/B,MAAM;AAAA,cACN,IAAI,KAAK,WAAW;AAAA,cACpB,OAAO,KAAK;AAAA,cACZ,WAAW,KAAK,IAAI;AAAA,cACpB,cAAc;AAAA,cACd,OAAO;AAAA,gBACH;AAAA,gBACA;AAAA,gBACA,aAAa,eAAe;AAAA,cAChC;AAAA,YACJ;AACA,kBAAM;AACN;AAAA,UACJ;AAEA,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAE7B,gBAAI,MAAM,OAAO;AACb,yBAAW,MAAM;AACjB,kCAAoB,KAAK,KAAK,MAAM,MAAM,SAAS,CAAC;AAEpD,oBAAM,eAAmC;AAAA,gBACrC,MAAM;AAAA,gBACN,IAAI,KAAK,WAAW;AAAA,gBACpB,OAAO,KAAK;AAAA,gBACZ,WAAW,KAAK,IAAI;AAAA,gBACpB,OAAO,MAAM;AAAA,gBACb;AAAA,gBACA,MAAM;AAAA,cACV;AACA,oBAAM;AAAA,YACV;AAEA,gBAAI,MAAM,eAAe;AACrB,oBAAM,YAA6B;AAAA,gBAC/B,MAAM;AAAA,gBACN,IAAI,KAAK,WAAW;AAAA,gBACpB,OAAO,KAAK;AAAA,gBACZ,WAAW,KAAK,IAAI;AAAA,gBACpB,cAAc,MAAM,kBAAkB,SAAS,SAAS;AAAA,gBACxD,OAAO;AAAA,kBACH;AAAA,kBACA;AAAA,kBACA,aAAa,eAAe;AAAA,gBAChC;AAAA,cACJ;AACA,oBAAM;AACN;AAAA,YACJ;AAAA,UACJ,QAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,UAAE;AACE,aAAO,YAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,iBAAiB,SAGyB;AAC5C,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,gBAAgB;AAAA,QAChB,mBAAmB,KAAK,OAAO;AAAA,QAC/B,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,KAAK;AAAA,QACZ,UAAU,QAAQ,YAAY;AAAA,QAC9B,aAAa,QAAQ,YAAY;AAAA,QACjC,WAAW,QAAQ,YAAY;AAAA,QAC/B,QAAQ;AAAA,QACR,gBAAgB;AAAA,UACZ,MAAM;AAAA,UACN,aAAa;AAAA,YACT,MAAM;AAAA,YACN,QAAQ,QAAQ;AAAA,YAChB,QAAQ;AAAA,UACZ;AAAA,QACJ;AAAA,MACJ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,SAAS,MAAM,SAAS,KAAK;AACnC,UAAM,UAAU,OAAO;AAEvB,QAAI;AACA,YAAM,OAAO,KAAK,MAAM,OAAO;AAC/B,aAAO,EAAE,MAAM,QAAQ;AAAA,IAC3B,QAAQ;AACJ,YAAM,IAAI,MAAM,sCAAsC,OAAO,EAAE;AAAA,IACnE;AAAA,EACJ;AAAA,EAEQ,aAAqB;AACzB,WAAO,WAAW,KAAK,IAAI,CAAC,IAAI,KAAK,OAAO,EAAE,SAAS,EAAE,EAAE,OAAO,GAAG,CAAC,CAAC;AAAA,EAC3E;AACJ;AAWO,SAAS,cAAc,UAAkC,CAAC,GAAG;AAChE,SAAO,SAAS,gBAA4C,OAAU;AAClE,WAAO,IAAI,mBAAmB,OAAO,OAAO;AAAA,EAChD;AACJ;AAeO,SAAS,QAAoC,OAAU;AAC1D,SAAO,IAAI,mBAAmB,OAAO,CAAC,CAAC;AAC3C;","names":[]}