@codetunezstudios/token-kit 0.1.0-beta.1 → 0.1.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -15,7 +15,7 @@ token-kit enables developers to integrate LLM features into their applications u
15
15
 
16
16
  - **Simple API** — Intuitive methods for chat completions
17
17
  - **TypeScript First** — Full type safety and IntelliSense support
18
- - **Multiple Models** — Support for GPT-3.5, GPT-4, GPT-4 Turbo (more coming)
18
+ - **Multiple Models** — Support for Claude, GPT-4o, Amazon Nova (more coming)
19
19
  - **Token Management** — Built-in balance checking and validation
20
20
  - **Error Handling** — Comprehensive typed error classes
21
21
  - **Lightweight** — Single runtime dependency (axios)
@@ -79,7 +79,7 @@ const res = await tk.chat('user_token', [
79
79
  TokenKit.system('You are a helpful assistant.'),
80
80
  TokenKit.user('Explain quantum computing simply.'),
81
81
  ], {
82
- model: 'gpt-4',
82
+ model: 'gpt-4o',
83
83
  maxTokens: 200,
84
84
  temperature: 0.8,
85
85
  });
@@ -89,7 +89,7 @@ const res = await tk.chat('user_token', [
89
89
 
90
90
  | Option | Type | Default | Description |
91
91
  |--------|------|---------|-------------|
92
- | `model` | `string` | `gpt-3.5-turbo` | LLM model to use |
92
+ | `model` | `string` | `gpt-4o-mini` | LLM model to use |
93
93
  | `maxTokens` | `number` | `500` | Max tokens in response |
94
94
  | `temperature` | `number` | `0.7` | Randomness (0–2) |
95
95
 
@@ -144,7 +144,7 @@ List available LLM models.
144
144
 
145
145
  ```typescript
146
146
  const models = await tk.getModels();
147
- // ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo']
147
+ // ['gpt-4o-mini', 'gpt-4o', 'claude-3.5-haiku', 'claude-sonnet-4', 'nova-micro', 'nova-lite']
148
148
  ```
149
149
 
150
150
  ### Helper Methods
@@ -213,9 +213,12 @@ Different models consume tokens at different rates:
213
213
 
214
214
  | Model | Rate | 1,000 TK tokens = |
215
215
  |-------|------|-------------------|
216
- | GPT-3.5 Turbo | 1.0x | 1,000 LLM tokens |
217
- | GPT-4 Turbo | 2.0x | 500 LLM tokens |
218
- | GPT-4 | 3.0x | 333 LLM tokens |
216
+ | GPT-4o Mini | 1.0x | 1,000 LLM tokens |
217
+ | Claude 3.5 Haiku | 1.0x | 1,000 LLM tokens |
218
+ | Amazon Nova Micro | 1.0x | 1,000 LLM tokens |
219
+ | Amazon Nova Lite | 1.0x | 1,000 LLM tokens |
220
+ | GPT-4o | 2.0x | 500 LLM tokens |
221
+ | Claude Sonnet 4 | 3.0x | 333 LLM tokens |
219
222
 
220
223
  See [token-kit.com](https://token-kit.com) for current package pricing and details.
221
224
 
package/dist/index.d.mts CHANGED
@@ -8,7 +8,7 @@ interface Message {
8
8
  content: string;
9
9
  }
10
10
  interface ChatOptions {
11
- /** LLM model to use (default: 'gpt-3.5-turbo') */
11
+ /** LLM model to use (default: 'gpt-4o-mini') */
12
12
  model?: string;
13
13
  /** Maximum tokens in response (default: 500) */
14
14
  maxTokens?: number;
@@ -89,7 +89,7 @@ declare class TokenKitAPIError extends Error implements TokenKitError {
89
89
  interface TokenKitConfig {
90
90
  /** Developer API key */
91
91
  apiKey: string;
92
- /** API Gateway base URL (default: https://api.token-kit.com) */
92
+ /** API Gateway base URL (default: https://api.token-kit.com/api/v1) */
93
93
  baseUrl?: string;
94
94
  /** Request timeout in milliseconds (default: 60000) */
95
95
  timeout?: number;
@@ -159,7 +159,7 @@ declare class TokenKit {
159
159
  * { role: 'system', content: 'You are a helpful assistant.' },
160
160
  * { role: 'user', content: 'Hello!' }
161
161
  * ], {
162
- * model: 'gpt-4',
162
+ * model: 'gpt-4o',
163
163
  * maxTokens: 200,
164
164
  * temperature: 0.8
165
165
  * });
@@ -200,9 +200,9 @@ declare class TokenKit {
200
200
  *
201
201
  * @example
202
202
  * ```typescript
203
- * const models = await tokenKit.getModels();
203
+ * const response = await tokenKit.getModels();
204
204
  * console.log('Available models:', models);
205
- * // ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo']
205
+ * // ['claude-3.5-haiku', 'claude-sonnet-4', 'nova-micro', 'nova-lite', 'gpt-4o', 'gpt-4o-mini']
206
206
  * ```
207
207
  */
208
208
  getModels(): Promise<string[]>;
package/dist/index.d.ts CHANGED
@@ -8,7 +8,7 @@ interface Message {
8
8
  content: string;
9
9
  }
10
10
  interface ChatOptions {
11
- /** LLM model to use (default: 'gpt-3.5-turbo') */
11
+ /** LLM model to use (default: 'gpt-4o-mini') */
12
12
  model?: string;
13
13
  /** Maximum tokens in response (default: 500) */
14
14
  maxTokens?: number;
@@ -89,7 +89,7 @@ declare class TokenKitAPIError extends Error implements TokenKitError {
89
89
  interface TokenKitConfig {
90
90
  /** Developer API key */
91
91
  apiKey: string;
92
- /** API Gateway base URL (default: https://api.token-kit.com) */
92
+ /** API Gateway base URL (default: https://api.token-kit.com/api/v1) */
93
93
  baseUrl?: string;
94
94
  /** Request timeout in milliseconds (default: 60000) */
95
95
  timeout?: number;
@@ -159,7 +159,7 @@ declare class TokenKit {
159
159
  * { role: 'system', content: 'You are a helpful assistant.' },
160
160
  * { role: 'user', content: 'Hello!' }
161
161
  * ], {
162
- * model: 'gpt-4',
162
+ * model: 'gpt-4o',
163
163
  * maxTokens: 200,
164
164
  * temperature: 0.8
165
165
  * });
@@ -200,9 +200,9 @@ declare class TokenKit {
200
200
  *
201
201
  * @example
202
202
  * ```typescript
203
- * const models = await tokenKit.getModels();
203
+ * const response = await tokenKit.getModels();
204
204
  * console.log('Available models:', models);
205
- * // ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo']
205
+ * // ['claude-3.5-haiku', 'claude-sonnet-4', 'nova-micro', 'nova-lite', 'gpt-4o', 'gpt-4o-mini']
206
206
  * ```
207
207
  */
208
208
  getModels(): Promise<string[]>;
package/dist/index.js CHANGED
@@ -245,7 +245,7 @@ var TokenKit = class {
245
245
  * { role: 'system', content: 'You are a helpful assistant.' },
246
246
  * { role: 'user', content: 'Hello!' }
247
247
  * ], {
248
- * model: 'gpt-4',
248
+ * model: 'gpt-4o',
249
249
  * maxTokens: 200,
250
250
  * temperature: 0.8
251
251
  * });
@@ -315,9 +315,9 @@ var TokenKit = class {
315
315
  *
316
316
  * @example
317
317
  * ```typescript
318
- * const models = await tokenKit.getModels();
318
+ * const response = await tokenKit.getModels();
319
319
  * console.log('Available models:', models);
320
- * // ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo']
320
+ * // ['claude-3.5-haiku', 'claude-sonnet-4', 'nova-micro', 'nova-lite', 'gpt-4o', 'gpt-4o-mini']
321
321
  * ```
322
322
  */
323
323
  async getModels() {
package/dist/index.mjs CHANGED
@@ -207,7 +207,7 @@ var TokenKit = class {
207
207
  * { role: 'system', content: 'You are a helpful assistant.' },
208
208
  * { role: 'user', content: 'Hello!' }
209
209
  * ], {
210
- * model: 'gpt-4',
210
+ * model: 'gpt-4o',
211
211
  * maxTokens: 200,
212
212
  * temperature: 0.8
213
213
  * });
@@ -277,9 +277,9 @@ var TokenKit = class {
277
277
  *
278
278
  * @example
279
279
  * ```typescript
280
- * const models = await tokenKit.getModels();
280
+ * const response = await tokenKit.getModels();
281
281
  * console.log('Available models:', models);
282
- * // ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo']
282
+ * // ['claude-3.5-haiku', 'claude-sonnet-4', 'nova-micro', 'nova-lite', 'gpt-4o', 'gpt-4o-mini']
283
283
  * ```
284
284
  */
285
285
  async getModels() {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@codetunezstudios/token-kit",
3
- "version": "0.1.0-beta.1",
3
+ "version": "0.1.0-beta.2",
4
4
  "description": "Official TypeScript SDK for token-kit - AI token infrastructure for developers",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
@@ -33,7 +33,8 @@
33
33
  "openai",
34
34
  "gpt",
35
35
  "claude",
36
- "gemini",
36
+ "bedrock",
37
+ "amazon-nova",
37
38
  "tokens",
38
39
  "sdk",
39
40
  "typescript",