cencori 0.3.2 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +88 -124
  2. package/dist/ai/index.d.mts +48 -0
  3. package/dist/ai/index.d.ts +48 -0
  4. package/dist/ai/index.js +128 -0
  5. package/dist/ai/index.js.map +1 -0
  6. package/dist/ai/index.mjs +103 -0
  7. package/dist/ai/index.mjs.map +1 -0
  8. package/dist/compute/index.d.mts +38 -0
  9. package/dist/compute/index.d.ts +38 -0
  10. package/dist/compute/index.js +62 -0
  11. package/dist/compute/index.js.map +1 -0
  12. package/dist/compute/index.mjs +37 -0
  13. package/dist/compute/index.mjs.map +1 -0
  14. package/dist/index.d.mts +89 -72
  15. package/dist/index.d.ts +89 -72
  16. package/dist/index.js +632 -165
  17. package/dist/index.js.map +1 -1
  18. package/dist/index.mjs +623 -156
  19. package/dist/index.mjs.map +1 -1
  20. package/dist/storage/index.d.mts +82 -0
  21. package/dist/storage/index.d.ts +82 -0
  22. package/dist/storage/index.js +122 -0
  23. package/dist/storage/index.js.map +1 -0
  24. package/dist/storage/index.mjs +97 -0
  25. package/dist/storage/index.mjs.map +1 -0
  26. package/dist/tanstack/index.d.mts +95 -0
  27. package/dist/tanstack/index.d.ts +95 -0
  28. package/dist/tanstack/index.js +290 -0
  29. package/dist/tanstack/index.js.map +1 -0
  30. package/dist/tanstack/index.mjs +262 -0
  31. package/dist/tanstack/index.mjs.map +1 -0
  32. package/dist/types-Be_rWV2h.d.mts +70 -0
  33. package/dist/types-Be_rWV2h.d.ts +70 -0
  34. package/dist/vercel/index.d.mts +126 -0
  35. package/dist/vercel/index.d.ts +126 -0
  36. package/dist/vercel/index.js +373 -0
  37. package/dist/vercel/index.js.map +1 -0
  38. package/dist/vercel/index.mjs +344 -0
  39. package/dist/vercel/index.mjs.map +1 -0
  40. package/dist/workflow/index.d.mts +44 -0
  41. package/dist/workflow/index.d.ts +44 -0
  42. package/dist/workflow/index.js +72 -0
  43. package/dist/workflow/index.js.map +1 -0
  44. package/dist/workflow/index.mjs +47 -0
  45. package/dist/workflow/index.mjs.map +1 -0
  46. package/package.json +98 -41
package/README.md CHANGED
@@ -1,15 +1,11 @@
1
1
  # Cencori
2
2
 
3
- Official SDK for Cencori - The Security Layer for AI Development.
3
+ **The unified infrastructure layer for AI applications.**
4
4
 
5
- ## Installation
5
+ One SDK. Every AI primitive. Always secure. Always logged.
6
6
 
7
7
  ```bash
8
8
  npm install cencori
9
- # or
10
- yarn add cencori
11
- # or
12
- pnpm add cencori
13
9
  ```
14
10
 
15
11
  ## Quick Start
@@ -17,176 +13,144 @@ pnpm add cencori
17
13
  ```typescript
18
14
  import { Cencori } from 'cencori';
19
15
 
20
- const cencori = new Cencori({
21
- apiKey: process.env.CENCORI_API_KEY!
16
+ const cencori = new Cencori({
17
+ apiKey: process.env.CENCORI_API_KEY
22
18
  });
23
19
 
20
+ // AI Gateway - Chat with any model
24
21
  const response = await cencori.ai.chat({
25
- messages: [
26
- { role: 'user', content: 'Hello, AI!' }
27
- ]
22
+ model: 'gpt-4o',
23
+ messages: [{ role: 'user', content: 'Hello!' }]
28
24
  });
29
25
 
30
26
  console.log(response.content);
31
27
  ```
32
28
 
33
- ## Authentication
34
-
35
- Get your API key from the [Cencori Dashboard](https://cencori.com/dashboard):
36
-
37
- 1. Create a project
38
- 2. Navigate to Settings → API tab
39
- 3. Generate a new key:
40
- - **Secret key (`csk_`)** - For server-side use only
41
- - **Publishable key (`cpk_`)** - Safe for browser use (requires domain whitelisting)
42
- 4. Copy and store it securely
43
-
44
- ## API Reference
45
-
46
- ### Cencori
47
-
48
- Initialize the SDK client.
49
-
50
- ```typescript
51
- import { Cencori } from 'cencori';
52
-
53
- const cencori = new Cencori({
54
- apiKey: 'csk_xxx', // Secret key for server-side
55
- baseUrl: 'https://cencori.com' // Optional, defaults to production
56
- });
57
- ```
58
-
59
- ### AI Module
29
+ ## Products
60
30
 
61
- #### `ai.chat(params)`
31
+ | Product | Status | Description |
32
+ |---------|--------|-------------|
33
+ | **AI Gateway** | ✅ Available | Multi-provider routing, security, observability |
34
+ | **Compute** | 🚧 Coming Soon | Serverless functions, GPU access |
35
+ | **Workflow** | 🚧 Coming Soon | Visual AI pipelines, orchestration |
36
+ | **Storage** | 🚧 Coming Soon | Vector database, knowledge base, RAG |
37
+ | **Integration** | ✅ Available | SDKs, Vercel AI, TanStack |
62
38
 
63
- Send a chat message to the AI (non-streaming).
39
+ ## AI Gateway
64
40
 
65
- **Parameters:**
66
- - `messages`: Array of message objects with `role` ('system' | 'user' | 'assistant') and `content`
67
- - `model`: Optional AI model (defaults to 'gemini-2.5-flash')
68
- - `temperature`: Optional temperature (0-1)
69
- - `maxTokens`: Optional max tokens for response
70
- - `userId`: Optional user ID for rate limiting
71
-
72
- **Example:**
41
+ ### Chat Completions
73
42
 
74
43
  ```typescript
75
44
  const response = await cencori.ai.chat({
45
+ model: 'gpt-4o', // or 'claude-3-opus', 'gemini-1.5-pro', etc.
76
46
  messages: [
77
- { role: 'user', content: 'Explain quantum computing' }
47
+ { role: 'system', content: 'You are a helpful assistant.' },
48
+ { role: 'user', content: 'What is the capital of France?' }
78
49
  ],
79
- model: 'gpt-4o',
80
- temperature: 0.7
50
+ temperature: 0.7,
51
+ maxTokens: 1000
81
52
  });
82
53
 
83
54
  console.log(response.content);
84
- console.log(response.usage); // Token usage stats
85
- console.log(response.cost_usd); // Cost in USD
55
+ console.log(response.usage); // { promptTokens, completionTokens, totalTokens }
86
56
  ```
87
57
 
88
- #### `ai.chatStream(params)`
89
-
90
- Stream a chat response token-by-token.
91
-
92
- **Example:**
58
+ ### Embeddings
93
59
 
94
60
  ```typescript
95
- const stream = cencori.ai.chatStream({
96
- messages: [
97
- { role: 'user', content: 'Tell me a story' }
98
- ],
99
- model: 'gpt-4o'
61
+ const response = await cencori.ai.embeddings({
62
+ model: 'text-embedding-3-small',
63
+ input: 'Hello world'
100
64
  });
101
65
 
102
- for await (const chunk of stream) {
103
- process.stdout.write(chunk.delta);
104
- }
66
+ console.log(response.embeddings[0]); // [0.1, 0.2, ...]
105
67
  ```
106
68
 
107
- ## Error Handling
69
+ ## Framework Integrations
108
70
 
109
- The SDK includes custom error classes for common scenarios:
71
+ ### Vercel AI SDK
110
72
 
111
73
  ```typescript
112
- import {
113
- Cencori,
114
- AuthenticationError,
115
- RateLimitError,
116
- SafetyError
117
- } from 'cencori';
118
-
119
- try {
120
- const response = await cencori.ai.chat({ messages: [...] });
121
- } catch (error) {
122
- if (error instanceof AuthenticationError) {
123
- console.error('Invalid API key');
124
- } else if (error instanceof RateLimitError) {
125
- console.error('Too many requests, please slow down');
126
- } else if (error instanceof SafetyError) {
127
- console.error('Content blocked:', error.reasons);
128
- }
74
+ import { cencori } from 'cencori/vercel';
75
+ import { streamText } from 'ai';
76
+
77
+ const result = await streamText({
78
+ model: cencori('gpt-4o'),
79
+ messages: [{ role: 'user', content: 'Hello!' }]
80
+ });
81
+
82
+ for await (const chunk of result.textStream) {
83
+ console.log(chunk);
129
84
  }
130
85
  ```
131
86
 
132
- ## TypeScript Support
133
-
134
- The SDK is written in TypeScript and includes full type definitions.
87
+ ### With React/Next.js
135
88
 
136
89
  ```typescript
137
- import type { ChatParams, ChatResponse, Message, StreamChunk } from 'cencori';
90
+ import { cencori } from 'cencori/vercel';
91
+ import { useChat } from 'ai/react';
92
+
93
+ export default function Chat() {
94
+ const { messages, input, handleInputChange, handleSubmit } = useChat({
95
+ api: '/api/chat'
96
+ });
97
+
98
+ return (
99
+ <div>
100
+ {messages.map(m => <div key={m.id}>{m.content}</div>)}
101
+ <form onSubmit={handleSubmit}>
102
+ <input value={input} onChange={handleInputChange} />
103
+ </form>
104
+ </div>
105
+ );
106
+ }
138
107
  ```
139
108
 
140
- ## Features
141
-
142
- - ✅ Full TypeScript support with type definitions
143
- - ✅ Built-in authentication
144
- - ✅ Automatic retry logic with exponential backoff
145
- - ✅ Custom error classes
146
- - ✅ Content safety filtering (PII, prompt injection, harmful content)
147
- - ✅ Rate limiting protection
148
- - ✅ Streaming support with `chatStream()`
149
-
150
- ## Supported Models
109
+ ## Coming Soon
151
110
 
152
- | Provider | Models |
153
- |----------|--------|
154
- | OpenAI | `gpt-4o`, `gpt-4-turbo`, `gpt-3.5-turbo` |
155
- | Anthropic | `claude-3-opus`, `claude-3-sonnet`, `claude-3-haiku` |
156
- | Google | `gemini-2.5-flash`, `gemini-2.0-flash` |
111
+ ### Compute
157
112
 
158
- ## Local Development
113
+ ```typescript
114
+ // 🚧 Coming Soon
115
+ await cencori.compute.run('my-function', {
116
+ input: { data: 'hello' }
117
+ });
118
+ ```
159
119
 
160
- For local development or testing:
120
+ ### Workflow
161
121
 
162
122
  ```typescript
163
- const cencori = new Cencori({
164
- apiKey: 'csk_test_xxx', // Test secret key
165
- baseUrl: 'http://localhost:3000'
123
+ // 🚧 Coming Soon
124
+ await cencori.workflow.trigger('data-enrichment', {
125
+ data: { userId: '123' }
166
126
  });
167
127
  ```
168
128
 
169
- ## Browser Usage (Publishable Keys)
170
-
171
- For browser/client-side usage, use publishable keys:
129
+ ### Storage
172
130
 
173
131
  ```typescript
174
- // Safe to use in browser - only works from allowed domains
175
- const cencori = new Cencori({
176
- apiKey: 'cpk_xxx' // Publishable key
132
+ // 🚧 Coming Soon
133
+ const results = await cencori.storage.vectors.search('query', {
134
+ limit: 5
177
135
  });
178
136
 
179
- const response = await cencori.ai.chat({
180
- messages: [{ role: 'user', content: 'Hello!' }]
181
- });
137
+ await cencori.storage.knowledge.query('What is our refund policy?');
182
138
  ```
183
139
 
184
- ## Support
140
+ ## Why Cencori?
141
+
142
+ - **🛡️ Security Built-in**: PII detection, content filtering, jailbreak protection
143
+ - **📊 Observability**: Every request logged, every token tracked
144
+ - **💰 Cost Control**: Budget alerts, spend caps, per-request costing
145
+ - **🔄 Multi-Provider**: Switch between OpenAI, Anthropic, Google, etc.
146
+ - **⚡ One SDK**: AI, compute, storage, workflows - unified
147
+
148
+ ## Links
185
149
 
186
- - **Documentation**: [cencori.com/docs](https://cencori.com/docs)
187
- - **Dashboard**: [cencori.com/dashboard](https://cencori.com/dashboard)
188
- - **GitHub**: [github.com/cencori](https://github.com/cencori)
150
+ - [Documentation](https://cencori.com/docs)
151
+ - [Dashboard](https://cencori.com/dashboard)
152
+ - [GitHub](https://github.com/cencori/cencori)
189
153
 
190
154
  ## License
191
155
 
192
- MIT © FohnAI
156
+ MIT
@@ -0,0 +1,48 @@
1
+ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.mjs';
2
+
3
+ /**
4
+ * AI Gateway - Chat, Completions, and Embeddings
5
+ *
6
+ * @example
7
+ * const response = await cencori.ai.chat({
8
+ * model: 'gpt-4o',
9
+ * messages: [{ role: 'user', content: 'Hello!' }]
10
+ * });
11
+ */
12
+
13
+ declare class AINamespace {
14
+ private config;
15
+ constructor(config: Required<CencoriConfig>);
16
+ /**
17
+ * Create a chat completion
18
+ *
19
+ * @example
20
+ * const response = await cencori.ai.chat({
21
+ * model: 'gpt-4o',
22
+ * messages: [{ role: 'user', content: 'Hello!' }]
23
+ * });
24
+ */
25
+ chat(request: ChatRequest): Promise<ChatResponse>;
26
+ /**
27
+ * Create a text completion
28
+ *
29
+ * @example
30
+ * const response = await cencori.ai.completions({
31
+ * model: 'gpt-4o',
32
+ * prompt: 'Write a haiku about coding'
33
+ * });
34
+ */
35
+ completions(request: CompletionRequest): Promise<ChatResponse>;
36
+ /**
37
+ * Create embeddings
38
+ *
39
+ * @example
40
+ * const response = await cencori.ai.embeddings({
41
+ * model: 'text-embedding-3-small',
42
+ * input: 'Hello world'
43
+ * });
44
+ */
45
+ embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
46
+ }
47
+
48
+ export { AINamespace };
@@ -0,0 +1,48 @@
1
+ import { C as CencoriConfig, a as ChatRequest, b as ChatResponse, d as CompletionRequest, E as EmbeddingRequest, e as EmbeddingResponse } from '../types-Be_rWV2h.js';
2
+
3
+ /**
4
+ * AI Gateway - Chat, Completions, and Embeddings
5
+ *
6
+ * @example
7
+ * const response = await cencori.ai.chat({
8
+ * model: 'gpt-4o',
9
+ * messages: [{ role: 'user', content: 'Hello!' }]
10
+ * });
11
+ */
12
+
13
+ declare class AINamespace {
14
+ private config;
15
+ constructor(config: Required<CencoriConfig>);
16
+ /**
17
+ * Create a chat completion
18
+ *
19
+ * @example
20
+ * const response = await cencori.ai.chat({
21
+ * model: 'gpt-4o',
22
+ * messages: [{ role: 'user', content: 'Hello!' }]
23
+ * });
24
+ */
25
+ chat(request: ChatRequest): Promise<ChatResponse>;
26
+ /**
27
+ * Create a text completion
28
+ *
29
+ * @example
30
+ * const response = await cencori.ai.completions({
31
+ * model: 'gpt-4o',
32
+ * prompt: 'Write a haiku about coding'
33
+ * });
34
+ */
35
+ completions(request: CompletionRequest): Promise<ChatResponse>;
36
+ /**
37
+ * Create embeddings
38
+ *
39
+ * @example
40
+ * const response = await cencori.ai.embeddings({
41
+ * model: 'text-embedding-3-small',
42
+ * input: 'Hello world'
43
+ * });
44
+ */
45
+ embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse>;
46
+ }
47
+
48
+ export { AINamespace };
@@ -0,0 +1,128 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/ai/index.ts
21
+ var ai_exports = {};
22
+ __export(ai_exports, {
23
+ AINamespace: () => AINamespace
24
+ });
25
+ module.exports = __toCommonJS(ai_exports);
26
+ var AINamespace = class {
27
+ constructor(config) {
28
+ this.config = config;
29
+ }
30
+ /**
31
+ * Create a chat completion
32
+ *
33
+ * @example
34
+ * const response = await cencori.ai.chat({
35
+ * model: 'gpt-4o',
36
+ * messages: [{ role: 'user', content: 'Hello!' }]
37
+ * });
38
+ */
39
+ async chat(request) {
40
+ const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
41
+ method: "POST",
42
+ headers: {
43
+ "Authorization": `Bearer ${this.config.apiKey}`,
44
+ "Content-Type": "application/json",
45
+ ...this.config.headers
46
+ },
47
+ body: JSON.stringify({
48
+ model: request.model,
49
+ messages: request.messages,
50
+ temperature: request.temperature,
51
+ max_tokens: request.maxTokens,
52
+ stream: request.stream ?? false
53
+ })
54
+ });
55
+ if (!response.ok) {
56
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
57
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
58
+ }
59
+ const data = await response.json();
60
+ return {
61
+ id: data.id,
62
+ model: data.model,
63
+ content: data.choices?.[0]?.message?.content ?? "",
64
+ usage: {
65
+ promptTokens: data.usage?.prompt_tokens ?? 0,
66
+ completionTokens: data.usage?.completion_tokens ?? 0,
67
+ totalTokens: data.usage?.total_tokens ?? 0
68
+ }
69
+ };
70
+ }
71
+ /**
72
+ * Create a text completion
73
+ *
74
+ * @example
75
+ * const response = await cencori.ai.completions({
76
+ * model: 'gpt-4o',
77
+ * prompt: 'Write a haiku about coding'
78
+ * });
79
+ */
80
+ async completions(request) {
81
+ return this.chat({
82
+ model: request.model,
83
+ messages: [{ role: "user", content: request.prompt }],
84
+ temperature: request.temperature,
85
+ maxTokens: request.maxTokens
86
+ });
87
+ }
88
+ /**
89
+ * Create embeddings
90
+ *
91
+ * @example
92
+ * const response = await cencori.ai.embeddings({
93
+ * model: 'text-embedding-3-small',
94
+ * input: 'Hello world'
95
+ * });
96
+ */
97
+ async embeddings(request) {
98
+ const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
99
+ method: "POST",
100
+ headers: {
101
+ "Authorization": `Bearer ${this.config.apiKey}`,
102
+ "Content-Type": "application/json",
103
+ ...this.config.headers
104
+ },
105
+ body: JSON.stringify({
106
+ model: request.model,
107
+ input: request.input
108
+ })
109
+ });
110
+ if (!response.ok) {
111
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
112
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
113
+ }
114
+ const data = await response.json();
115
+ return {
116
+ model: data.model,
117
+ embeddings: data.data?.map((d) => d.embedding) ?? [],
118
+ usage: {
119
+ totalTokens: data.usage?.total_tokens ?? 0
120
+ }
121
+ };
122
+ }
123
+ };
124
+ // Annotate the CommonJS export names for ESM import in node:
125
+ 0 && (module.exports = {
126
+ AINamespace
127
+ });
128
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
@@ -0,0 +1,103 @@
1
+ // src/ai/index.ts
2
+ var AINamespace = class {
3
+ constructor(config) {
4
+ this.config = config;
5
+ }
6
+ /**
7
+ * Create a chat completion
8
+ *
9
+ * @example
10
+ * const response = await cencori.ai.chat({
11
+ * model: 'gpt-4o',
12
+ * messages: [{ role: 'user', content: 'Hello!' }]
13
+ * });
14
+ */
15
+ async chat(request) {
16
+ const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {
17
+ method: "POST",
18
+ headers: {
19
+ "Authorization": `Bearer ${this.config.apiKey}`,
20
+ "Content-Type": "application/json",
21
+ ...this.config.headers
22
+ },
23
+ body: JSON.stringify({
24
+ model: request.model,
25
+ messages: request.messages,
26
+ temperature: request.temperature,
27
+ max_tokens: request.maxTokens,
28
+ stream: request.stream ?? false
29
+ })
30
+ });
31
+ if (!response.ok) {
32
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
33
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
34
+ }
35
+ const data = await response.json();
36
+ return {
37
+ id: data.id,
38
+ model: data.model,
39
+ content: data.choices?.[0]?.message?.content ?? "",
40
+ usage: {
41
+ promptTokens: data.usage?.prompt_tokens ?? 0,
42
+ completionTokens: data.usage?.completion_tokens ?? 0,
43
+ totalTokens: data.usage?.total_tokens ?? 0
44
+ }
45
+ };
46
+ }
47
+ /**
48
+ * Create a text completion
49
+ *
50
+ * @example
51
+ * const response = await cencori.ai.completions({
52
+ * model: 'gpt-4o',
53
+ * prompt: 'Write a haiku about coding'
54
+ * });
55
+ */
56
+ async completions(request) {
57
+ return this.chat({
58
+ model: request.model,
59
+ messages: [{ role: "user", content: request.prompt }],
60
+ temperature: request.temperature,
61
+ maxTokens: request.maxTokens
62
+ });
63
+ }
64
+ /**
65
+ * Create embeddings
66
+ *
67
+ * @example
68
+ * const response = await cencori.ai.embeddings({
69
+ * model: 'text-embedding-3-small',
70
+ * input: 'Hello world'
71
+ * });
72
+ */
73
+ async embeddings(request) {
74
+ const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {
75
+ method: "POST",
76
+ headers: {
77
+ "Authorization": `Bearer ${this.config.apiKey}`,
78
+ "Content-Type": "application/json",
79
+ ...this.config.headers
80
+ },
81
+ body: JSON.stringify({
82
+ model: request.model,
83
+ input: request.input
84
+ })
85
+ });
86
+ if (!response.ok) {
87
+ const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
88
+ throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);
89
+ }
90
+ const data = await response.json();
91
+ return {
92
+ model: data.model,
93
+ embeddings: data.data?.map((d) => d.embedding) ?? [],
94
+ usage: {
95
+ totalTokens: data.usage?.total_tokens ?? 0
96
+ }
97
+ };
98
+ }
99
+ };
100
+ export {
101
+ AINamespace
102
+ };
103
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/ai/index.ts"],"sourcesContent":["/**\n * AI Gateway - Chat, Completions, and Embeddings\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n\nimport type {\n CencoriConfig,\n ChatRequest,\n ChatResponse,\n CompletionRequest,\n EmbeddingRequest,\n EmbeddingResponse\n} from '../types';\n\n// API Response types\ninterface OpenAIChatResponse {\n id: string;\n model: string;\n choices?: Array<{\n message?: {\n content?: string;\n };\n }>;\n usage?: {\n prompt_tokens?: number;\n completion_tokens?: number;\n total_tokens?: number;\n };\n}\n\ninterface OpenAIEmbeddingResponse {\n model: string;\n data?: Array<{\n embedding: number[];\n }>;\n usage?: {\n total_tokens?: number;\n };\n}\n\nexport class AINamespace {\n private config: Required<CencoriConfig>;\n\n constructor(config: Required<CencoriConfig>) {\n this.config = config;\n }\n\n /**\n * Create a chat completion\n * \n * @example\n * const response = await cencori.ai.chat({\n * model: 'gpt-4o',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n */\n async chat(request: ChatRequest): Promise<ChatResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/chat/completions`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n messages: request.messages,\n temperature: request.temperature,\n max_tokens: request.maxTokens,\n stream: request.stream ?? false,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIChatResponse;\n\n return {\n id: data.id,\n model: data.model,\n content: data.choices?.[0]?.message?.content ?? '',\n usage: {\n promptTokens: data.usage?.prompt_tokens ?? 0,\n completionTokens: data.usage?.completion_tokens ?? 0,\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n\n /**\n * Create a text completion\n * \n * @example\n * const response = await cencori.ai.completions({\n * model: 'gpt-4o',\n * prompt: 'Write a haiku about coding'\n * });\n */\n async completions(request: CompletionRequest): Promise<ChatResponse> {\n // Convert to chat format internally\n return this.chat({\n model: request.model,\n messages: [{ role: 'user', content: request.prompt }],\n temperature: request.temperature,\n maxTokens: request.maxTokens,\n });\n }\n\n /**\n * Create embeddings\n * \n * @example\n * const response = await cencori.ai.embeddings({\n * model: 'text-embedding-3-small',\n * input: 'Hello world'\n * });\n */\n async embeddings(request: EmbeddingRequest): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.config.baseUrl}/api/v1/embeddings`, {\n method: 'POST',\n headers: {\n 'Authorization': `Bearer ${this.config.apiKey}`,\n 'Content-Type': 'application/json',\n ...this.config.headers,\n },\n body: JSON.stringify({\n model: request.model,\n input: request.input,\n }),\n });\n\n if (!response.ok) {\n const errorData = await response.json().catch(() => ({ error: 'Unknown error' })) as { error?: string };\n throw new Error(`Cencori API error: ${errorData.error || response.statusText}`);\n }\n\n const data = await response.json() as OpenAIEmbeddingResponse;\n\n return {\n model: data.model,\n embeddings: data.data?.map((d) => d.embedding) ?? [],\n usage: {\n totalTokens: data.usage?.total_tokens ?? 0,\n },\n };\n }\n}\n"],"mappings":";AA6CO,IAAM,cAAN,MAAkB;AAAA,EAGrB,YAAY,QAAiC;AACzC,SAAK,SAAS;AAAA,EAClB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,KAAK,SAA6C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,4BAA4B;AAAA,MAC3E,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,UAAU,QAAQ;AAAA,QAClB,aAAa,QAAQ;AAAA,QACrB,YAAY,QAAQ;AAAA,QACpB,QAAQ,QAAQ,UAAU;AAAA,MAC9B,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,IAAI,KAAK;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,UAAU,CAAC,GAAG,SAAS,WAAW;AAAA,MAChD,OAAO;AAAA,QACH,cAAc,KAAK,OAAO,iBAAiB;AAAA,QAC3C,kBAAkB,KAAK,OAAO,qBAAqB;AAAA,QACnD,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,YAAY,SAAmD;AAEjE,WAAO,KAAK,KAAK;AAAA,MACb,OAAO,QAAQ;AAAA,MACf,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,QAAQ,OAAO,CAAC;AAAA,MACpD,aAAa,QAAQ;AAAA,MACrB,WAAW,QAAQ;AAAA,IACvB,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,WAAW,SAAuD;AACpE,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,OAAO,sBAAsB;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS;AAAA,QACL,iBAAiB,UAAU,KAAK,OAAO,MAAM;AAAA,QAC7C,gBAAgB;AAAA,QAChB,GAAG,KAAK,OAAO;AAAA,MACnB;AAAA,MACA,MAAM,KAAK,UAAU;AAAA,QACjB,OAAO,QAAQ;AAAA,QACf,OAAO,QAAQ;AAAA,MACnB,CAAC;AAAA,IACL,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AACd,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,OAAO,EAAE,OAAO,gBAAgB,EAAE;AAChF,YAAM,IAAI,MAAM,sBAAsB,UAAU,SAAS,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAEjC,WAAO;AAAA,MACH,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,KAAK,CAAC;AAAA,MACnD,OAAO;AAAA,QACH,aAAa,KAAK,OAAO,gBAAgB;AAAA,MAC7C;AAAA,IACJ;AAAA,EACJ;AACJ;","names":[]}
@@ -0,0 +1,38 @@
1
+ import { f as ComputeRunOptions } from '../types-Be_rWV2h.mjs';
2
+
3
+ /**
4
+ * Compute Namespace - Serverless Functions & GPU Access
5
+ *
6
+ * 🚧 Coming Soon
7
+ *
8
+ * @example
9
+ * const result = await cencori.compute.run('my-function', {
10
+ * input: { data: 'hello' }
11
+ * });
12
+ */
13
+
14
+ declare class ComputeNamespace {
15
+ /**
16
+ * Run a serverless function
17
+ *
18
+ * 🚧 Coming Soon - This feature is not yet available.
19
+ */
20
+ run(functionId: string, options?: ComputeRunOptions): Promise<never>;
21
+ /**
22
+ * Deploy a function
23
+ *
24
+ * 🚧 Coming Soon - This feature is not yet available.
25
+ */
26
+ deploy(config: {
27
+ name: string;
28
+ code: string;
29
+ }): Promise<never>;
30
+ /**
31
+ * List deployed functions
32
+ *
33
+ * 🚧 Coming Soon - This feature is not yet available.
34
+ */
35
+ list(): Promise<never>;
36
+ }
37
+
38
+ export { ComputeNamespace };