cencori 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
- import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.mjs';
1
+ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from '../types-DIuz6XWN.mjs';
2
2
 
3
3
  /**
4
- * AI Gateway - Chat, Completions, and Embeddings
4
+ * AI Gateway - Chat, Completions, Embeddings, and Streaming
5
5
  *
6
6
  * @example
7
7
  * const response = await cencori.ai.chat({
@@ -10,6 +10,15 @@ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as Completio
10
10
  * });
11
11
  */
12
12
 
13
+ /**
14
+ * Stream chunk from chat stream
15
+ */
16
+ interface StreamChunk {
17
+ delta: string;
18
+ finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';
19
+ /** Error message if the stream encountered an error */
20
+ error?: string;
21
+ }
13
22
  declare class AINamespace {
14
23
  private config;
15
24
  constructor(config: Required<CencoriConfig>);
@@ -23,6 +32,16 @@ declare class AINamespace {
23
32
  * });
24
33
  */
25
34
  chat(request: ChatRequest): Promise<ChatResponse>;
35
+ /**
36
+ * Stream chat completions
37
+ * Returns an async generator that yields chunks as they arrive
38
+ *
39
+ * @example
40
+ * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
41
+ * process.stdout.write(chunk.delta);
42
+ * }
43
+ */
44
+ chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
26
45
  /**
27
46
  * Create a text completion
28
47
  *
@@ -45,4 +64,4 @@ declare class AINamespace {
45
64
  embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
46
65
  }
47
66
 
48
- export { AINamespace };
67
+ export { AINamespace, type StreamChunk };
@@ -1,7 +1,7 @@
1
- import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.js';
1
+ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from '../types-DIuz6XWN.js';
2
2
 
3
3
  /**
4
- * AI Gateway - Chat, Completions, and Embeddings
4
+ * AI Gateway - Chat, Completions, Embeddings, and Streaming
5
5
  *
6
6
  * @example
7
7
  * const response = await cencori.ai.chat({
@@ -10,6 +10,15 @@ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as Completio
10
10
  * });
11
11
  */
12
12
 
13
+ /**
14
+ * Stream chunk from chat stream
15
+ */
16
+ interface StreamChunk {
17
+ delta: string;
18
+ finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';
19
+ /** Error message if the stream encountered an error */
20
+ error?: string;
21
+ }
13
22
  declare class AINamespace {
14
23
  private config;
15
24
  constructor(config: Required<CencoriConfig>);
@@ -23,6 +32,16 @@ declare class AINamespace {
23
32
  * });
24
33
  */
25
34
  chat(request: ChatRequest): Promise<ChatResponse>;
35
+ /**
36
+ * Stream chat completions
37
+ * Returns an async generator that yields chunks as they arrive
38
+ *
39
+ * @example
40
+ * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
41
+ * process.stdout.write(chunk.delta);
42
+ * }
43
+ */
44
+ chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
26
45
  /**
27
46
  * Create a text completion
28
47
  *
@@ -45,4 +64,4 @@ declare class AINamespace {
45
64
  embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
46
65
  }
47
66
 
48
- export { AINamespace };
67
+ export { AINamespace, type StreamChunk };
package/dist/ai/index.js CHANGED
@@ -37,10 +37,10 @@ var AINamespace = class {
37
37
  * });
38
38
  */
39
39
  async chat(request) {
40
- const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
40
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
41
41
  method: "POST",
42
42
  headers: {
43
- "Authorization": `Bearer ${this.config.apiKey}`,
43
+ "CENCORI_API_KEY": this.config.apiKey,
44
44
  "Content-Type": "application/json",
45
45
  ...this.config.headers
46
46
  },
@@ -48,8 +48,8 @@ var AINamespace = class {
48
48
  model: request.model,
49
49
  messages: request.messages,
50
50
  temperature: request.temperature,
51
- max_tokens: request.maxTokens,
52
- stream: request.stream ?? false
51
+ maxTokens: request.maxTokens,
52
+ stream: false
53
53
  })
54
54
  });
55
55
  if (!response.ok) {
@@ -68,6 +68,66 @@ var AINamespace = class {
68
68
  }
69
69
  };
70
70
  }
71
+ /**
72
+ * Stream chat completions
73
+ * Returns an async generator that yields chunks as they arrive
74
+ *
75
+ * @example
76
+ * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
77
+ * process.stdout.write(chunk.delta);
78
+ * }
79
+ */
80
+ async *chatStream(request) {
81
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
82
+ method: "POST",
83
+ headers: {
84
+ "CENCORI_API_KEY": this.config.apiKey,
85
+ "Content-Type": "application/json",
86
+ ...this.config.headers
87
+ },
88
+ body: JSON.stringify({
89
+ model: request.model,
90
+ messages: request.messages,
91
+ temperature: request.temperature,
92
+ maxTokens: request.maxTokens,
93
+ stream: true
94
+ })
95
+ });
96
+ if (!response.ok) {
97
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
98
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
99
+ }
100
+ if (!response.body) {
101
+ throw new Error("Response body is null");
102
+ }
103
+ const reader = response.body.getReader();
104
+ const decoder = new TextDecoder();
105
+ let buffer = "";
106
+ try {
107
+ while (true) {
108
+ const { done, value } = await reader.read();
109
+ if (done) break;
110
+ buffer += decoder.decode(value, { stream: true });
111
+ const lines = buffer.split("\n");
112
+ buffer = lines.pop() || "";
113
+ for (const line of lines) {
114
+ if (line.trim() === "") continue;
115
+ if (!line.startsWith("data: ")) continue;
116
+ const data = line.slice(6);
117
+ if (data === "[DONE]") {
118
+ return;
119
+ }
120
+ try {
121
+ const chunk = JSON.parse(data);
122
+ yield chunk;
123
+ } catch {
124
+ }
125
+ }
126
+ }
127
+ } finally {
128
+ reader.releaseLock();
129
+ }
130
+ }
71
131
  /**
72
132
  * Create a text completion
73
133
  *
@@ -98,7 +158,7 @@ var AINamespace = class {
98
158
  const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
99
159
  method: "POST",
100
160
  headers: {
101
- "Authorization": `Bearer ${this.config.apiKey}`,
161
+ "CENCORI_API_KEY": this.config.apiKey,
102
162
  "Content-Type": "application/json",
103
163
  ...this.config.headers
104
164
  },
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
1
+ {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, Embeddings, and Streaming\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\n/**\n * Stream chunk from chat stream\n */\nexport interface StreamChunk {\n delta: string;\n finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';\n /** Error message if the stream encountered an error */\n error?: string;\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Stream chat completions\n * Returns an async generator that yields chunks as they arrive\n * \n * @example\n * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {\n * process.stdout.write(chunk.delta);\n * }\n */\n async *chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: true,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n if (!response.body) {\n throw new Error('Response body is null');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n\n // Keep the last incomplete line in the buffer\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6); // Remove 'data: ' prefix\n\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as StreamChunk;\n yield chunk;\n } catch {\n // Skip malformed JSON\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAuDO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,WAAW,SAAkE;AAChF,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,QAAI,CAAC,SAAS,MAAM;AAChB,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,SAAS,SAAS,KAAK,UAAU;AACvC,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AAEb,QAAI;AACA,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,YAAI,KAAM;AAEV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAG/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,KAAK,KAAK,MAAM,GAAI;AACxB,cAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,gBAAM,OAAO,KAAK,MAAM,CAAC;AAEzB,cAAI,SAAS,UAAU;AACnB;AAAA,UACJ;AAEA,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAC7B,kBAAM;AAAA,UACV,QAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,UAAE;AACE,aAAO,YAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
package/dist/ai/index.mjs CHANGED
@@ -13,10 +13,10 @@ var AINamespace = class {
13
13
  * });
14
14
  */
15
15
  async chat(request) {
16
- const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
16
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
17
17
  method: "POST",
18
18
  headers: {
19
- "Authorization": `Bearer ${this.config.apiKey}`,
19
+ "CENCORI_API_KEY": this.config.apiKey,
20
20
  "Content-Type": "application/json",
21
21
  ...this.config.headers
22
22
  },
@@ -24,8 +24,8 @@ var AINamespace = class {
24
24
  model: request.model,
25
25
  messages: request.messages,
26
26
  temperature: request.temperature,
27
- max_tokens: request.maxTokens,
28
- stream: request.stream ?? false
27
+ maxTokens: request.maxTokens,
28
+ stream: false
29
29
  })
30
30
  });
31
31
  if (!response.ok) {
@@ -44,6 +44,66 @@ var AINamespace = class {
44
44
  }
45
45
  };
46
46
  }
47
+ /**
48
+ * Stream chat completions
49
+ * Returns an async generator that yields chunks as they arrive
50
+ *
51
+ * @example
52
+ * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {
53
+ * process.stdout.write(chunk.delta);
54
+ * }
55
+ */
56
+ async *chatStream(request) {
57
+ const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {
58
+ method: "POST",
59
+ headers: {
60
+ "CENCORI_API_KEY": this.config.apiKey,
61
+ "Content-Type": "application/json",
62
+ ...this.config.headers
63
+ },
64
+ body: JSON.stringify({
65
+ model: request.model,
66
+ messages: request.messages,
67
+ temperature: request.temperature,
68
+ maxTokens: request.maxTokens,
69
+ stream: true
70
+ })
71
+ });
72
+ if (!response.ok) {
73
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
74
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
75
+ }
76
+ if (!response.body) {
77
+ throw new Error("Response body is null");
78
+ }
79
+ const reader = response.body.getReader();
80
+ const decoder = new TextDecoder();
81
+ let buffer = "";
82
+ try {
83
+ while (true) {
84
+ const { done, value } = await reader.read();
85
+ if (done) break;
86
+ buffer += decoder.decode(value, { stream: true });
87
+ const lines = buffer.split("\n");
88
+ buffer = lines.pop() || "";
89
+ for (const line of lines) {
90
+ if (line.trim() === "") continue;
91
+ if (!line.startsWith("data: ")) continue;
92
+ const data = line.slice(6);
93
+ if (data === "[DONE]") {
94
+ return;
95
+ }
96
+ try {
97
+ const chunk = JSON.parse(data);
98
+ yield chunk;
99
+ } catch {
100
+ }
101
+ }
102
+ }
103
+ } finally {
104
+ reader.releaseLock();
105
+ }
106
+ }
47
107
  /**
48
108
  * Create a text completion
49
109
  *
@@ -74,7 +134,7 @@ var AINamespace = class {
74
134
  const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
75
135
  method: "POST",
76
136
  headers: {
77
- "Authorization": `Bearer ${this.config.apiKey}`,
137
+ "CENCORI_API_KEY": this.config.apiKey,
78
138
  "Content-Type": "application/json",
79
139
  ...this.config.headers
80
140
  },
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
1
+ {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, Embeddings, and Streaming\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\n/**\n * Stream chunk from chat stream\n */\nexport interface StreamChunk {\n delta: string;\n finish_reason?: 'stop' | 'length' | 'content_filter' | 'error';\n /** Error message if the stream encountered an error */\n error?: string;\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Stream chat completions\n * Returns an async generator that yields chunks as they arrive\n * \n * @example\n * for await (const chunk of cencori.ai.chatStream({ model: 'gpt-4o', messages })) {\n * process.stdout.write(chunk.delta);\n * }\n */\n async *chatStream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown> {\n const response = await fetch(`${this.config.baseUrl}/api/ai/chat`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n stream: true,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n if (!response.body) {\n throw new Error('Response body is null');\n }\n\n const reader = response.body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n\n // Keep the last incomplete line in the buffer\n buffer = lines.pop() || '';\n\n for (const line of lines) {\n if (line.trim() === '') continue;\n if (!line.startsWith('data: ')) continue;\n\n const data = line.slice(6); // Remove 'data: ' prefix\n\n if (data === '[DONE]') {\n return;\n }\n\n try {\n const chunk = JSON.parse(data) as StreamChunk;\n yield chunk;\n } catch {\n // Skip malformed JSON\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'CENCORI_API_KEY': this.config.apiKey,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n\n"],"mappings":";AAuDO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,WAAW,SAAkE;AAChF,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,gBAAgB;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,WAAW,QAAQ;AAAA,QACnB,QAAQ;AAAA,MACZ,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,QAAI,CAAC,SAAS,MAAM;AAChB,YAAM,IAAI,MAAM,uBAAuB;AAAA,IAC3C;AAEA,UAAM,SAAS,SAAS,KAAK,UAAU;AACvC,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,SAAS;AAEb,QAAI;AACA,aAAO,MAAM;AACT,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,YAAI,KAAM;AAEV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAG/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACtB,cAAI,KAAK,KAAK,MAAM,GAAI;AACxB,cAAI,CAAC,KAAK,WAAW,QAAQ,EAAG;AAEhC,gBAAM,OAAO,KAAK,MAAM,CAAC;AAEzB,cAAI,SAAS,UAAU;AACnB;AAAA,UACJ;AAEA,cAAI;AACA,kBAAM,QAAQ,KAAK,MAAM,IAAI;AAC7B,kBAAM;AAAA,UACV,QAAQ;AAAA,UAER;AAAA,QACJ;AAAA,MACJ;AAAA,IACJ,UAAE;AACE,aAAO,YAAY;AAAA,IACvB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,mBAAmB,KAAK,OAAO;AAAA,QAC/B,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
@@ -1,4 +1,4 @@
1
- import { f as ComputeRunOptions } from '../types-Be_rWV2h.mjs';
1
+ import { e as ComputeRunOptions } from '../types-DIuz6XWN.mjs';
2
2
 
3
3
  /**
4
4
  * Compute Namespace - Serverless Functions & GPU Access
@@ -1,4 +1,4 @@
1
- import { f as ComputeRunOptions } from '../types-Be_rWV2h.js';
1
+ import { e as ComputeRunOptions } from '../types-DIuz6XWN.js';
2
2
 
3
3
  /**
4
4
  * Compute Namespace - Serverless Functions & GPU Access
package/dist/index.d.mts CHANGED
@@ -1,6 +1,7 @@
1
- import { C as CencoriConfig } from './types-Be_rWV2h.mjs';
2
- export { c as ChatMessage, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from './types-Be_rWV2h.mjs';
1
+ import { C as CencoriConfig, R as RequestOptions } from './types-DIuz6XWN.mjs';
2
+ export { f as ChatMessage, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from './types-DIuz6XWN.mjs';
3
3
  import { AINamespace } from './ai/index.mjs';
4
+ export { StreamChunk } from './ai/index.mjs';
4
5
  import { ComputeNamespace } from './compute/index.mjs';
5
6
  import { WorkflowNamespace } from './workflow/index.mjs';
6
7
  import { StorageNamespace } from './storage/index.mjs';
@@ -84,6 +85,14 @@ declare class Cencori {
84
85
  * });
85
86
  */
86
87
  constructor(config?: CencoriConfig);
88
+ /**
89
+ * Get the base URL for API calls
90
+ */
91
+ getBaseUrl(): string;
92
+ /**
93
+ * Get the API key
94
+ */
95
+ getApiKey(): string;
87
96
  /**
88
97
  * Get the current configuration (API key is masked)
89
98
  */
@@ -91,6 +100,46 @@ declare class Cencori {
91
100
  baseUrl: string;
92
101
  apiKeyHint: string;
93
102
  };
103
+ /**
104
+ * Make a generic API request with retry and error handling
105
+ *
106
+ * @example
107
+ * const data = await cencori.request('/api/custom-endpoint', {
108
+ * method: 'POST',
109
+ * body: JSON.stringify({ foo: 'bar' })
110
+ * });
111
+ */
112
+ request<T>(endpoint: string, options: RequestOptions): Promise<T>;
113
+ }
114
+
115
+ /**
116
+ * Cencori Error Classes
117
+ *
118
+ * Custom error types for better error handling
119
+ */
120
+ declare class CencoriError extends Error {
121
+ statusCode?: number | undefined;
122
+ code?: string | undefined;
123
+ constructor(message: string, statusCode?: number | undefined, code?: string | undefined);
124
+ }
125
+ declare class AuthenticationError extends CencoriError {
126
+ constructor(message?: string);
127
+ }
128
+ declare class RateLimitError extends CencoriError {
129
+ constructor(message?: string);
130
+ }
131
+ declare class SafetyError extends CencoriError {
132
+ reasons?: string[] | undefined;
133
+ constructor(message?: string, reasons?: string[] | undefined);
94
134
  }
95
135
 
96
- export { AINamespace, Cencori, CencoriConfig, ComputeNamespace, StorageNamespace, WorkflowNamespace, Cencori as default };
136
+ /**
137
+ * Utility functions for Cencori SDK
138
+ */
139
+ /**
140
+ * Fetch with automatic retry on 5xx errors
141
+ * Uses exponential backoff: 1s, 2s, 4s
142
+ */
143
+ declare function fetchWithRetry(url: string, options: RequestInit, maxRetries?: number): Promise<Response>;
144
+
145
+ export { AINamespace, AuthenticationError, Cencori, CencoriConfig, CencoriError, ComputeNamespace, RateLimitError, RequestOptions, SafetyError, StorageNamespace, WorkflowNamespace, Cencori as default, fetchWithRetry };
package/dist/index.d.ts CHANGED
@@ -1,6 +1,7 @@
1
- import { C as CencoriConfig } from './types-Be_rWV2h.js';
2
- export { c as ChatMessage, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from './types-Be_rWV2h.js';
1
+ import { C as CencoriConfig, R as RequestOptions } from './types-DIuz6XWN.js';
2
+ export { f as ChatMessage, a as ChatRequest, b as ChatResponse, c as CompletionRequest, E as EmbeddingRequest, d as EmbeddingResponse } from './types-DIuz6XWN.js';
3
3
  import { AINamespace } from './ai/index.js';
4
+ export { StreamChunk } from './ai/index.js';
4
5
  import { ComputeNamespace } from './compute/index.js';
5
6
  import { WorkflowNamespace } from './workflow/index.js';
6
7
  import { StorageNamespace } from './storage/index.js';
@@ -84,6 +85,14 @@ declare class Cencori {
84
85
  * });
85
86
  */
86
87
  constructor(config?: CencoriConfig);
88
+ /**
89
+ * Get the base URL for API calls
90
+ */
91
+ getBaseUrl(): string;
92
+ /**
93
+ * Get the API key
94
+ */
95
+ getApiKey(): string;
87
96
  /**
88
97
  * Get the current configuration (API key is masked)
89
98
  */
@@ -91,6 +100,46 @@ declare class Cencori {
91
100
  baseUrl: string;
92
101
  apiKeyHint: string;
93
102
  };
103
+ /**
104
+ * Make a generic API request with retry and error handling
105
+ *
106
+ * @example
107
+ * const data = await cencori.request('/api/custom-endpoint', {
108
+ * method: 'POST',
109
+ * body: JSON.stringify({ foo: 'bar' })
110
+ * });
111
+ */
112
+ request<T>(endpoint: string, options: RequestOptions): Promise<T>;
113
+ }
114
+
115
+ /**
116
+ * Cencori Error Classes
117
+ *
118
+ * Custom error types for better error handling
119
+ */
120
+ declare class CencoriError extends Error {
121
+ statusCode?: number | undefined;
122
+ code?: string | undefined;
123
+ constructor(message: string, statusCode?: number | undefined, code?: string | undefined);
124
+ }
125
+ declare class AuthenticationError extends CencoriError {
126
+ constructor(message?: string);
127
+ }
128
+ declare class RateLimitError extends CencoriError {
129
+ constructor(message?: string);
130
+ }
131
+ declare class SafetyError extends CencoriError {
132
+ reasons?: string[] | undefined;
133
+ constructor(message?: string, reasons?: string[] | undefined);
94
134
  }
95
135
 
96
- export { AINamespace, Cencori, CencoriConfig, ComputeNamespace, StorageNamespace, WorkflowNamespace, Cencori as default };
136
+ /**
137
+ * Utility functions for Cencori SDK
138
+ */
139
+ /**
140
+ * Fetch with automatic retry on 5xx errors
141
+ * Uses exponential backoff: 1s, 2s, 4s
142
+ */
143
+ declare function fetchWithRetry(url: string, options: RequestInit, maxRetries?: number): Promise<Response>;
144
+
145
+ export { AINamespace, AuthenticationError, Cencori, CencoriConfig, CencoriError, ComputeNamespace, RateLimitError, RequestOptions, SafetyError, StorageNamespace, WorkflowNamespace, Cencori as default, fetchWithRetry };